changeset 1:193df1943809 trunk

[svn] Load openjdk/jdk7/b13 into jdk/trunk.
author xiomara
date Fri, 25 May 2007 00:49:14 +0000
parents a4ed3fb96592
children 16f2b6c91171
files control/make/motif-rules.gmk control/make/templates/bsd-header control/make/templates/gpl-cp-header control/make/templates/gpl-header hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/OopHandle.java hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/JVMPIDaemonThread.java hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaVFrame.java hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ObjectSynchronizer.java hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/MonitorCacheDumpPanel.java hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js hotspot/build/linux/makefiles/top.make hotspot/build/linux/makefiles/vm.make hotspot/build/solaris/makefiles/reorder_COMPILER1_i486 hotspot/build/solaris/makefiles/reorder_COMPILER1_sparc hotspot/build/solaris/makefiles/reorder_COMPILER2_amd64 hotspot/build/solaris/makefiles/reorder_COMPILER2_i486 hotspot/build/solaris/makefiles/reorder_COMPILER2_sparc hotspot/build/solaris/makefiles/reorder_COMPILER2_sparcv9 hotspot/build/solaris/makefiles/reorder_TIERED_amd64 hotspot/build/solaris/makefiles/reorder_TIERED_i486 hotspot/build/solaris/makefiles/reorder_TIERED_sparc hotspot/build/solaris/makefiles/top.make hotspot/build/windows/makefiles/adlc.make hotspot/build/windows/makefiles/generated.make hotspot/build/windows/makefiles/makedeps.make hotspot/build/windows/makefiles/vm.make hotspot/build/windows/projectfiles/common/Makefile hotspot/make/templates/bsd-header hotspot/make/templates/gpl-cp-header hotspot/make/templates/gpl-header hotspot/src/cpu/amd64/vm/amd64.ad hotspot/src/cpu/amd64/vm/assembler_amd64.cpp hotspot/src/cpu/amd64/vm/assembler_amd64.hpp hotspot/src/cpu/amd64/vm/disassembler_amd64.cpp hotspot/src/cpu/amd64/vm/icache_amd64.cpp hotspot/src/cpu/amd64/vm/interp_masm_amd64.cpp hotspot/src/cpu/amd64/vm/interp_masm_amd64.hpp hotspot/src/cpu/amd64/vm/interpreter_amd64.cpp hotspot/src/cpu/amd64/vm/sharedRuntime_amd64.cpp hotspot/src/cpu/amd64/vm/stubGenerator_amd64.cpp hotspot/src/cpu/amd64/vm/templateTable_amd64.cpp hotspot/src/cpu/amd64/vm/vm_version_amd64.cpp hotspot/src/cpu/amd64/vm/vtableStubs_amd64.cpp hotspot/src/cpu/i486/vm/assembler_i486.cpp hotspot/src/cpu/i486/vm/assembler_i486.hpp hotspot/src/cpu/i486/vm/c1_CodeStubs_i486.cpp hotspot/src/cpu/i486/vm/c1_LIRAssembler_i486.cpp hotspot/src/cpu/i486/vm/c1_MacroAssembler_i486.cpp hotspot/src/cpu/i486/vm/c1_Runtime1_i486.cpp hotspot/src/cpu/i486/vm/i486.ad hotspot/src/cpu/i486/vm/interp_masm_i486.cpp hotspot/src/cpu/i486/vm/interp_masm_i486.hpp hotspot/src/cpu/i486/vm/interpreter_i486.cpp hotspot/src/cpu/i486/vm/sharedRuntime_i486.cpp hotspot/src/cpu/i486/vm/stubGenerator_i486.cpp hotspot/src/cpu/i486/vm/templateTable_i486.cpp hotspot/src/cpu/i486/vm/vm_version_i486.cpp hotspot/src/cpu/i486/vm/vtableStubs_i486.cpp hotspot/src/cpu/sparc/vm/assembler_sparc.cpp hotspot/src/cpu/sparc/vm/assembler_sparc.hpp hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp hotspot/src/cpu/sparc/vm/disassembler_sparc.cpp hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp hotspot/src/cpu/sparc/vm/sparc.ad hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp hotspot/src/os/linux/vm/os_linux.cpp hotspot/src/os/win32/vm/os_win32.cpp hotspot/src/os_cpu/linux_amd64/vm/atomic_linux_amd64.inline.hpp hotspot/src/os_cpu/linux_i486/vm/atomic_linux_i486.inline.hpp hotspot/src/os_cpu/linux_i486/vm/copy_linux_i486.inline.hpp hotspot/src/os_cpu/linux_i486/vm/linux_i486.s hotspot/src/os_cpu/solaris_amd64/vm/os_solaris_amd64.cpp hotspot/src/os_cpu/solaris_i486/vm/os_solaris_i486.cpp hotspot/src/os_cpu/solaris_i486/vm/solaris_i486.s hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp hotspot/src/os_cpu/win32_i486/vm/atomic_win32_i486.inline.hpp hotspot/src/os_cpu/win32_i486/vm/copy_win32_i486.inline.hpp hotspot/src/share/vm/adlc/formssel.cpp hotspot/src/share/vm/adlc/formssel.hpp hotspot/src/share/vm/adlc/output_c.cpp hotspot/src/share/vm/asm/codeBuffer.hpp hotspot/src/share/vm/c1/c1_Compilation.cpp hotspot/src/share/vm/c1/c1_Compilation.hpp hotspot/src/share/vm/c1/c1_GraphBuilder.cpp hotspot/src/share/vm/c1/c1_GraphBuilder.hpp hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp hotspot/src/share/vm/c1/c1_LIRGenerator.cpp hotspot/src/share/vm/c1/c1_Runtime1.cpp hotspot/src/share/vm/c1/c1_Runtime1.hpp hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp hotspot/src/share/vm/ci/ciEnv.cpp hotspot/src/share/vm/ci/ciInstanceKlass.cpp hotspot/src/share/vm/ci/ciObject.cpp hotspot/src/share/vm/ci/ciObjectFactory.cpp hotspot/src/share/vm/ci/ciObjectFactory.hpp hotspot/src/share/vm/ci/ciSymbol.hpp hotspot/src/share/vm/classfile/classFileError.cpp hotspot/src/share/vm/classfile/classFileParser.cpp hotspot/src/share/vm/classfile/classFileParser.hpp hotspot/src/share/vm/classfile/classFileStream.cpp hotspot/src/share/vm/classfile/classFileStream.hpp hotspot/src/share/vm/classfile/classLoader.cpp hotspot/src/share/vm/classfile/classLoader.hpp hotspot/src/share/vm/classfile/dictionary.cpp hotspot/src/share/vm/classfile/dictionary.hpp hotspot/src/share/vm/classfile/javaAssertions.cpp hotspot/src/share/vm/classfile/javaAssertions.hpp hotspot/src/share/vm/classfile/javaClasses.cpp hotspot/src/share/vm/classfile/javaClasses.hpp hotspot/src/share/vm/classfile/loaderConstraints.cpp hotspot/src/share/vm/classfile/loaderConstraints.hpp hotspot/src/share/vm/classfile/placeholders.cpp hotspot/src/share/vm/classfile/placeholders.hpp hotspot/src/share/vm/classfile/resolutionErrors.cpp hotspot/src/share/vm/classfile/resolutionErrors.hpp hotspot/src/share/vm/classfile/stackMapFrame.cpp hotspot/src/share/vm/classfile/stackMapFrame.hpp hotspot/src/share/vm/classfile/stackMapTable.cpp hotspot/src/share/vm/classfile/stackMapTable.hpp hotspot/src/share/vm/classfile/symbolTable.cpp hotspot/src/share/vm/classfile/symbolTable.hpp hotspot/src/share/vm/classfile/systemDictionary.cpp hotspot/src/share/vm/classfile/systemDictionary.hpp hotspot/src/share/vm/classfile/verificationType.cpp hotspot/src/share/vm/classfile/verificationType.hpp hotspot/src/share/vm/classfile/verifier.cpp hotspot/src/share/vm/classfile/verifier.hpp hotspot/src/share/vm/classfile/vmSymbols.cpp hotspot/src/share/vm/classfile/vmSymbols.hpp hotspot/src/share/vm/code/nmethod.cpp hotspot/src/share/vm/code/nmethod.hpp hotspot/src/share/vm/compiler/compileBroker.cpp hotspot/src/share/vm/compiler/compilerOracle.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/asParNewGeneration.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/asParNewGeneration.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge hotspot/src/share/vm/gc_implementation/includeDB_gc_shared hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp hotspot/src/share/vm/gc_interface/collectedHeap.hpp hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp hotspot/src/share/vm/includeDB_compiler1 hotspot/src/share/vm/includeDB_compiler2 hotspot/src/share/vm/includeDB_core hotspot/src/share/vm/interpreter/cInterpretMethod.hpp hotspot/src/share/vm/interpreter/cInterpreter.cpp hotspot/src/share/vm/interpreter/cInterpreter.hpp hotspot/src/share/vm/interpreter/interpreter.cpp hotspot/src/share/vm/interpreter/interpreter.hpp hotspot/src/share/vm/memory/binaryTreeDictionary.cpp hotspot/src/share/vm/memory/binaryTreeDictionary.hpp hotspot/src/share/vm/memory/cardTableModRefBS.cpp hotspot/src/share/vm/memory/cardTableModRefBS.hpp hotspot/src/share/vm/memory/cmsLockVerifier.cpp hotspot/src/share/vm/memory/cmsLockVerifier.hpp hotspot/src/share/vm/memory/collectorPolicy.cpp hotspot/src/share/vm/memory/collectorPolicy.hpp hotspot/src/share/vm/memory/compactibleFreeListSpace.cpp hotspot/src/share/vm/memory/compactibleFreeListSpace.hpp hotspot/src/share/vm/memory/concurrentMarkSweepGeneration.cpp hotspot/src/share/vm/memory/concurrentMarkSweepGeneration.hpp hotspot/src/share/vm/memory/concurrentMarkSweepGeneration.inline.hpp hotspot/src/share/vm/memory/defNewGeneration.cpp hotspot/src/share/vm/memory/defNewGeneration.hpp hotspot/src/share/vm/memory/dictionary.cpp hotspot/src/share/vm/memory/dictionary.hpp hotspot/src/share/vm/memory/freeBlockDictionary.cpp hotspot/src/share/vm/memory/freeBlockDictionary.hpp hotspot/src/share/vm/memory/freeChunk.cpp hotspot/src/share/vm/memory/freeList.cpp hotspot/src/share/vm/memory/freeList.hpp hotspot/src/share/vm/memory/gcLocker.cpp hotspot/src/share/vm/memory/gcLocker.hpp hotspot/src/share/vm/memory/genCollectedHeap.cpp hotspot/src/share/vm/memory/genCollectedHeap.hpp hotspot/src/share/vm/memory/genMarkSweep.cpp hotspot/src/share/vm/memory/genOopClosures.hpp hotspot/src/share/vm/memory/genOopClosures.inline.hpp hotspot/src/share/vm/memory/generation.hpp hotspot/src/share/vm/memory/javaClasses.cpp hotspot/src/share/vm/memory/javaClasses.hpp hotspot/src/share/vm/memory/loaderConstraints.cpp hotspot/src/share/vm/memory/loaderConstraints.hpp hotspot/src/share/vm/memory/parGCAllocBuffer.cpp hotspot/src/share/vm/memory/parGCAllocBuffer.hpp hotspot/src/share/vm/memory/parNewGeneration.cpp hotspot/src/share/vm/memory/parNewGeneration.hpp hotspot/src/share/vm/memory/permGen.cpp hotspot/src/share/vm/memory/placeholders.cpp hotspot/src/share/vm/memory/placeholders.hpp hotspot/src/share/vm/memory/referenceProcessor.cpp hotspot/src/share/vm/memory/resolutionErrors.cpp hotspot/src/share/vm/memory/resolutionErrors.hpp hotspot/src/share/vm/memory/sharedHeap.cpp hotspot/src/share/vm/memory/space.hpp hotspot/src/share/vm/memory/symbolTable.cpp hotspot/src/share/vm/memory/symbolTable.hpp hotspot/src/share/vm/memory/systemDictionary.cpp hotspot/src/share/vm/memory/systemDictionary.hpp hotspot/src/share/vm/memory/tenuredGeneration.cpp hotspot/src/share/vm/memory/tenuredGeneration.hpp hotspot/src/share/vm/memory/universe.cpp hotspot/src/share/vm/memory/universe.hpp hotspot/src/share/vm/memory/vmSymbols.cpp hotspot/src/share/vm/memory/vmSymbols.hpp hotspot/src/share/vm/oops/constantPoolOop.hpp hotspot/src/share/vm/oops/instanceKlass.cpp hotspot/src/share/vm/oops/klass.hpp hotspot/src/share/vm/oops/methodKlass.cpp hotspot/src/share/vm/oops/methodOop.cpp hotspot/src/share/vm/oops/methodOop.hpp hotspot/src/share/vm/oops/objArrayKlass.cpp hotspot/src/share/vm/oops/oopsHierarchy.hpp hotspot/src/share/vm/oops/typeArrayKlass.cpp hotspot/src/share/vm/opto/block.cpp hotspot/src/share/vm/opto/block.hpp hotspot/src/share/vm/opto/c2_globals.cpp hotspot/src/share/vm/opto/c2_globals.hpp hotspot/src/share/vm/opto/callnode.hpp hotspot/src/share/vm/opto/cfgnode.cpp hotspot/src/share/vm/opto/coalesce.cpp hotspot/src/share/vm/opto/compile.cpp hotspot/src/share/vm/opto/compile.hpp hotspot/src/share/vm/opto/connode.cpp hotspot/src/share/vm/opto/doCall.cpp hotspot/src/share/vm/opto/escape.cpp hotspot/src/share/vm/opto/escape.hpp hotspot/src/share/vm/opto/gcm.cpp hotspot/src/share/vm/opto/graphKit.cpp hotspot/src/share/vm/opto/graphKit.hpp hotspot/src/share/vm/opto/ifnode.cpp hotspot/src/share/vm/opto/lcm.cpp hotspot/src/share/vm/opto/library_call.cpp hotspot/src/share/vm/opto/live.cpp hotspot/src/share/vm/opto/locknode.cpp hotspot/src/share/vm/opto/locknode.hpp hotspot/src/share/vm/opto/loopnode.cpp hotspot/src/share/vm/opto/loopnode.hpp hotspot/src/share/vm/opto/machnode.hpp hotspot/src/share/vm/opto/macro.cpp hotspot/src/share/vm/opto/memnode.cpp hotspot/src/share/vm/opto/node.cpp hotspot/src/share/vm/opto/node.hpp hotspot/src/share/vm/opto/output.cpp hotspot/src/share/vm/opto/parse1.cpp hotspot/src/share/vm/opto/parseHelper.cpp hotspot/src/share/vm/opto/phase.cpp hotspot/src/share/vm/opto/phase.hpp hotspot/src/share/vm/opto/runtime.cpp hotspot/src/share/vm/opto/runtime.hpp hotspot/src/share/vm/opto/superword.cpp hotspot/src/share/vm/opto/superword.hpp hotspot/src/share/vm/opto/type.cpp hotspot/src/share/vm/opto/type.hpp hotspot/src/share/vm/opto/vectornode.cpp hotspot/src/share/vm/opto/vectornode.hpp hotspot/src/share/vm/prims/forte.cpp hotspot/src/share/vm/prims/jni.cpp hotspot/src/share/vm/prims/jvm.cpp hotspot/src/share/vm/prims/jvmpi.cpp hotspot/src/share/vm/prims/jvmpi.h hotspot/src/share/vm/prims/jvmpi.hpp hotspot/src/share/vm/prims/jvmpi.inline.hpp hotspot/src/share/vm/prims/jvmtiEnv.cpp hotspot/src/share/vm/prims/jvmtiEnvBase.cpp hotspot/src/share/vm/prims/jvmtiEnvBase.hpp hotspot/src/share/vm/prims/jvmtiImpl.cpp hotspot/src/share/vm/prims/jvmtiImpl.hpp hotspot/src/share/vm/prims/rawMonitor.cpp hotspot/src/share/vm/prims/rawMonitor.hpp hotspot/src/share/vm/prims/unsafe.cpp hotspot/src/share/vm/runtime/arguments.cpp hotspot/src/share/vm/runtime/arguments.hpp hotspot/src/share/vm/runtime/biasedLocking.cpp hotspot/src/share/vm/runtime/biasedLocking.hpp hotspot/src/share/vm/runtime/classFileError.cpp hotspot/src/share/vm/runtime/classFileParser.cpp hotspot/src/share/vm/runtime/classFileParser.hpp hotspot/src/share/vm/runtime/classFileStream.cpp hotspot/src/share/vm/runtime/classFileStream.hpp hotspot/src/share/vm/runtime/classLoader.cpp hotspot/src/share/vm/runtime/classLoader.hpp hotspot/src/share/vm/runtime/concurrentGCThread.cpp hotspot/src/share/vm/runtime/concurrentGCThread.hpp hotspot/src/share/vm/runtime/concurrentMarkSweepThread.cpp hotspot/src/share/vm/runtime/concurrentMarkSweepThread.hpp hotspot/src/share/vm/runtime/deoptimization.cpp hotspot/src/share/vm/runtime/globals.cpp hotspot/src/share/vm/runtime/globals.hpp hotspot/src/share/vm/runtime/globals_extension.hpp hotspot/src/share/vm/runtime/hpi.cpp hotspot/src/share/vm/runtime/init.cpp hotspot/src/share/vm/runtime/interfaceSupport.hpp hotspot/src/share/vm/runtime/java.cpp hotspot/src/share/vm/runtime/javaAssertions.cpp hotspot/src/share/vm/runtime/javaAssertions.hpp hotspot/src/share/vm/runtime/jniHandles.cpp hotspot/src/share/vm/runtime/jniHandles.hpp hotspot/src/share/vm/runtime/mutexLocker.hpp hotspot/src/share/vm/runtime/objectMonitor.hpp hotspot/src/share/vm/runtime/os.cpp hotspot/src/share/vm/runtime/os.hpp hotspot/src/share/vm/runtime/sharedRuntime.cpp hotspot/src/share/vm/runtime/sharedRuntime.hpp hotspot/src/share/vm/runtime/stackMapFrame.cpp hotspot/src/share/vm/runtime/stackMapFrame.hpp hotspot/src/share/vm/runtime/stackMapTable.cpp hotspot/src/share/vm/runtime/stackMapTable.hpp hotspot/src/share/vm/runtime/stubCodeGenerator.cpp hotspot/src/share/vm/runtime/stubCodeGenerator.hpp hotspot/src/share/vm/runtime/stubRoutines.cpp hotspot/src/share/vm/runtime/stubRoutines.hpp hotspot/src/share/vm/runtime/synchronizer.cpp hotspot/src/share/vm/runtime/synchronizer.hpp hotspot/src/share/vm/runtime/thread.cpp hotspot/src/share/vm/runtime/thread.hpp hotspot/src/share/vm/runtime/verificationType.cpp hotspot/src/share/vm/runtime/verificationType.hpp hotspot/src/share/vm/runtime/verifier.cpp hotspot/src/share/vm/runtime/verifier.hpp hotspot/src/share/vm/runtime/vframe.cpp hotspot/src/share/vm/runtime/vframe.hpp hotspot/src/share/vm/runtime/vmStructs.cpp hotspot/src/share/vm/services/threadService.cpp hotspot/src/share/vm/utilities/copy.cpp hotspot/src/share/vm/utilities/copy.hpp hotspot/src/share/vm/utilities/globalDefinitions.hpp j2se/make/common/Defs-linux.gmk j2se/make/common/shared/Compiler-gcc.gmk j2se/make/common/shared/Platform.gmk j2se/make/sun/awt/Makefile j2se/make/sun/awt/mawt.gmk j2se/make/sun/jdbc/Makefile j2se/make/templates/bsd-header j2se/make/templates/gpl-cp-header j2se/make/templates/gpl-header j2se/src/share/back/ThreadReferenceImpl.c j2se/src/share/back/util.h j2se/src/share/classes/com/sun/corba/se/impl/activation/CommandHandler.java j2se/src/share/classes/com/sun/jdi/connect/IllegalConnectorArgumentsException.java j2se/src/share/classes/com/sun/script/javascript/ExternalScriptable.java j2se/src/share/classes/com/sun/script/javascript/JavaAdapter.java j2se/src/share/classes/com/sun/script/javascript/RhinoClassShutter.java j2se/src/share/classes/com/sun/script/javascript/RhinoScriptEngine.java j2se/src/share/classes/com/sun/script/util/InterfaceImplementor.java j2se/src/share/classes/com/sun/tools/corba/se/idl/first.set j2se/src/share/classes/com/sun/tools/corba/se/idl/follow.set j2se/src/share/classes/com/sun/tools/example/debug/bdi/ChildSession.java j2se/src/share/classes/com/sun/tools/example/debug/bdi/EventRequestSpecList.java j2se/src/share/classes/com/sun/tools/example/debug/bdi/ExecutionManager.java j2se/src/share/classes/com/sun/tools/example/debug/bdi/MethodBreakpointSpec.java j2se/src/share/classes/com/sun/tools/example/debug/bdi/ThreadGroupIterator.java j2se/src/share/classes/com/sun/tools/example/debug/bdi/ThreadIterator.java j2se/src/share/classes/com/sun/tools/example/debug/expr/ExpressionParser.java j2se/src/share/classes/com/sun/tools/example/debug/expr/LValue.java j2se/src/share/classes/com/sun/tools/example/debug/gui/CommandInterpreter.java j2se/src/share/classes/com/sun/tools/example/debug/gui/ContextManager.java j2se/src/share/classes/com/sun/tools/example/debug/gui/JDBFileFilter.java j2se/src/share/classes/com/sun/tools/example/debug/gui/JDBMenuBar.java j2se/src/share/classes/com/sun/tools/example/debug/gui/LaunchTool.java j2se/src/share/classes/com/sun/tools/example/debug/gui/MonitorListModel.java j2se/src/share/classes/com/sun/tools/example/debug/gui/SearchPath.java j2se/src/share/classes/com/sun/tools/example/debug/gui/SourceManager.java j2se/src/share/classes/com/sun/tools/example/debug/gui/SourceModel.java j2se/src/share/classes/com/sun/tools/example/debug/gui/ThreadTreeTool.java j2se/src/share/classes/com/sun/tools/example/debug/tty/BreakpointSpec.java j2se/src/share/classes/com/sun/tools/example/debug/tty/Commands.java j2se/src/share/classes/com/sun/tools/example/debug/tty/Env.java j2se/src/share/classes/com/sun/tools/example/debug/tty/EventRequestSpec.java j2se/src/share/classes/com/sun/tools/example/debug/tty/EventRequestSpecList.java j2se/src/share/classes/com/sun/tools/example/debug/tty/MessageOutput.java j2se/src/share/classes/com/sun/tools/example/debug/tty/SourceMapper.java j2se/src/share/classes/com/sun/tools/example/debug/tty/TTY.java j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadGroupIterator.java j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadInfo.java j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadIterator.java j2se/src/share/classes/com/sun/tools/example/debug/tty/VMConnection.java j2se/src/share/classes/com/sun/tools/example/doc/index.html j2se/src/share/classes/com/sun/tools/example/doc/javadt.html j2se/src/share/classes/com/sun/tools/example/doc/jdb.html j2se/src/share/classes/com/sun/tools/example/doc/trace.html j2se/src/share/classes/com/sun/tools/example/trace/EventThread.java j2se/src/share/classes/com/sun/tools/example/trace/StreamRedirectThread.java j2se/src/share/classes/com/sun/tools/example/trace/Trace.java j2se/src/share/classes/com/sun/tools/hat/internal/model/JavaClass.java j2se/src/share/classes/com/sun/tools/hat/internal/model/ReachableExcludesImpl.java j2se/src/share/classes/com/sun/tools/hat/internal/model/ReachableObjects.java j2se/src/share/classes/com/sun/tools/hat/internal/model/Snapshot.java j2se/src/share/classes/com/sun/tools/hat/internal/oql/OQLEngine.java j2se/src/share/classes/com/sun/tools/hat/internal/parser/HprofReader.java j2se/src/share/classes/com/sun/tools/hat/internal/server/FinalizerSummaryQuery.java j2se/src/share/classes/com/sun/tools/hat/internal/server/HistogramQuery.java j2se/src/share/classes/com/sun/tools/hat/internal/server/PlatformClasses.java j2se/src/share/classes/com/sun/tools/hat/internal/server/RefsByTypeQuery.java j2se/src/share/classes/com/sun/tools/hat/internal/util/VectorSorter.java j2se/src/share/classes/com/sun/tools/javac/jvm/ClassWriter.java j2se/src/share/classes/com/sun/tools/jdi/AbstractLauncher.java j2se/src/share/classes/com/sun/tools/jdi/ArrayReferenceImpl.java j2se/src/share/classes/com/sun/tools/jdi/ArrayTypeImpl.java j2se/src/share/classes/com/sun/tools/jdi/ClassLoaderReferenceImpl.java j2se/src/share/classes/com/sun/tools/jdi/ClassTypeImpl.java j2se/src/share/classes/com/sun/tools/jdi/ConcreteMethodImpl.java j2se/src/share/classes/com/sun/tools/jdi/ConnectorImpl.java j2se/src/share/classes/com/sun/tools/jdi/EventQueueImpl.java j2se/src/share/classes/com/sun/tools/jdi/EventRequestManagerImpl.java j2se/src/share/classes/com/sun/tools/jdi/FieldImpl.java j2se/src/share/classes/com/sun/tools/jdi/GenericListeningConnector.java j2se/src/share/classes/com/sun/tools/jdi/InterfaceTypeImpl.java j2se/src/share/classes/com/sun/tools/jdi/JNITypeParser.java j2se/src/share/classes/com/sun/tools/jdi/MethodImpl.java j2se/src/share/classes/com/sun/tools/jdi/NonConcreteMethodImpl.java j2se/src/share/classes/com/sun/tools/jdi/ObjectReferenceImpl.java j2se/src/share/classes/com/sun/tools/jdi/ObsoleteMethodImpl.java j2se/src/share/classes/com/sun/tools/jdi/PacketStream.java j2se/src/share/classes/com/sun/tools/jdi/ReferenceTypeImpl.java j2se/src/share/classes/com/sun/tools/jdi/SDE.java j2se/src/share/classes/com/sun/tools/jdi/StackFrameImpl.java j2se/src/share/classes/com/sun/tools/jdi/TargetVM.java j2se/src/share/classes/com/sun/tools/jdi/ThreadGroupReferenceImpl.java j2se/src/share/classes/com/sun/tools/jdi/ThreadReferenceImpl.java j2se/src/share/classes/com/sun/tools/jdi/VMState.java j2se/src/share/classes/com/sun/tools/jdi/VirtualMachineImpl.java j2se/src/share/classes/com/sun/tools/jdi/VirtualMachineManagerImpl.java j2se/src/share/classes/com/sun/tools/jdwpgen/AbstractSimpleNode.java j2se/src/share/classes/com/sun/tools/jdwpgen/ConstantNode.java j2se/src/share/classes/com/sun/tools/jdwpgen/ConstantSetNode.java j2se/src/share/classes/com/sun/tools/jdwpgen/ErrorNode.java j2se/src/share/classes/com/sun/tools/jdwpgen/Node.java j2se/src/share/classes/com/sun/tools/jdwpgen/OutNode.java j2se/src/share/classes/com/sun/tools/jdwpgen/Parse.java j2se/src/share/classes/com/sun/tools/jdwpgen/ReplyNode.java j2se/src/share/classes/java/lang/management/ManagementFactory.java j2se/src/share/classes/java/nio/channels/SocketChannel.java j2se/src/share/classes/java/security/AccessController.java j2se/src/share/classes/java/util/Arrays.java j2se/src/share/classes/java/util/WeakHashMap.java j2se/src/share/classes/java/util/logging/FileHandler.java j2se/src/share/classes/java/util/logging/Level.java j2se/src/share/classes/java/util/logging/LogRecord.java j2se/src/share/classes/java/util/logging/Logger.java j2se/src/share/classes/javax/script/ScriptEngineManager.java j2se/src/share/classes/sun/instrument/InstrumentationImpl.java j2se/src/share/classes/sun/jvmstat/monitor/MonitoredHost.java j2se/src/share/classes/sun/jvmstat/monitor/MonitoredVm.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AbstractMonitoredVm.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AbstractPerfDataBuffer.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AliasFileParser.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/PerfDataBufferImpl.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/file/MonitoredHostProvider.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/LocalMonitoredVm.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/LocalVmManager.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/MonitoredHostProvider.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/PerfDataBuffer.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/MonitoredHostProvider.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/RemoteMonitoredVm.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/RemoteVmManager.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/v1_0/PerfDataBuffer.java j2se/src/share/classes/sun/jvmstat/perfdata/monitor/v2_0/PerfDataBuffer.java j2se/src/share/classes/sun/management/Agent.java j2se/src/share/classes/sun/management/GcInfoCompositeData.java j2se/src/share/classes/sun/management/HotspotClassLoading.java j2se/src/share/classes/sun/management/HotspotCompilation.java j2se/src/share/classes/sun/management/HotspotMemory.java j2se/src/share/classes/sun/management/HotspotRuntime.java j2se/src/share/classes/sun/management/HotspotThread.java j2se/src/share/classes/sun/management/ManagementFactory.java j2se/src/share/classes/sun/management/MappedMXBeanType.java j2se/src/share/classes/sun/management/MemoryPoolImpl.java j2se/src/share/classes/sun/management/MonitorInfoCompositeData.java j2se/src/share/classes/sun/management/NotificationEmitterSupport.java j2se/src/share/classes/sun/management/RuntimeImpl.java j2se/src/share/classes/sun/management/VMManagement.java j2se/src/share/classes/sun/management/VMManagementImpl.java j2se/src/share/classes/sun/management/counter/perf/PerfInstrumentation.java j2se/src/share/classes/sun/management/jmxremote/ConnectorBootstrap.java j2se/src/share/classes/sun/management/snmp/jvminstr/README j2se/src/share/classes/sun/misc/Perf.java j2se/src/share/classes/sun/nio/ch/ChannelInputStream.java j2se/src/share/classes/sun/nio/ch/NativeThreadSet.java j2se/src/share/classes/sun/security/provider/certpath/CrlRevocationChecker.java j2se/src/share/classes/sun/tools/jinfo/JInfo.java j2se/src/share/classes/sun/tools/jmap/JMap.java j2se/src/share/classes/sun/tools/jstack/JStack.java j2se/src/share/classes/sun/tools/jstat/Alignment.java j2se/src/share/classes/sun/tools/jstat/Arguments.java j2se/src/share/classes/sun/tools/jstat/AscendingMonitorComparator.java j2se/src/share/classes/sun/tools/jstat/DescendingMonitorComparator.java j2se/src/share/classes/sun/tools/jstat/ExpressionExecuter.java j2se/src/share/classes/sun/tools/jstat/JStatLogger.java j2se/src/share/classes/sun/tools/jstat/Jstat.java j2se/src/share/classes/sun/tools/jstat/Operator.java j2se/src/share/classes/sun/tools/jstat/OptionFormat.java j2se/src/share/classes/sun/tools/jstat/OptionLister.java j2se/src/share/classes/sun/tools/jstat/Parser.java j2se/src/share/classes/sun/tools/jstat/Scale.java j2se/src/share/classes/sun/tools/jstatd/RemoteHostImpl.java j2se/src/share/demo/jvmti/java_crw_demo/java_crw_demo.c j2se/src/share/instrument/InstrumentationImplNativeMethods.c j2se/src/share/instrument/InvocationAdapter.c j2se/src/share/instrument/JPLISAgent.c j2se/src/share/native/common/check_code.c j2se/src/share/native/java/net/Inet6Address.c j2se/src/share/native/java/net/net_util.h j2se/src/solaris/bin/java_md.c j2se/src/solaris/hpi/native_threads/src/sys_api_td.c j2se/src/solaris/native/java/net/PlainDatagramSocketImpl.c j2se/src/solaris/native/java/net/PlainSocketImpl.c j2se/src/solaris/native/java/net/net_util_md.c j2se/src/solaris/native/java/net/net_util_md.h j2se/src/solaris/native/sun/nio/ch/DatagramChannelImpl.c j2se/src/solaris/native/sun/nio/ch/Net.c j2se/src/windows/native/java/net/PlainDatagramSocketImpl.c j2se/src/windows/native/java/net/PlainSocketImpl.c j2se/src/windows/native/java/net/net_util_md.c j2se/src/windows/native/sun/nio/ch/DatagramChannelImpl.c j2se/src/windows/native/sun/nio/ch/Net.c j2se/test/com/sun/jdi/JdbReadTwiceTest.sh j2se/test/com/sun/jdi/ShellScaffold.sh j2se/test/com/sun/management/UnixOperatingSystemMXBean/GetMaxFileDescriptorCount.sh j2se/test/com/sun/management/UnixOperatingSystemMXBean/GetOpenFileDescriptorCount.sh j2se/test/java/lang/management/MemoryPoolMXBean/ThresholdTest.java j2se/test/java/lang/management/OperatingSystemMXBean/GetSystemLoadAverage.java j2se/test/java/lang/management/OperatingSystemMXBean/TestSystemLoadAvg.sh j2se/test/java/lang/ref/SoftReference/Pin.java j2se/test/java/net/ipv6tests/B6521014.java j2se/test/java/nio/channels/Channels/ReadOffset.java j2se/test/java/nio/channels/SocketChannel/OpenLeak.java j2se/test/java/security/Security/signedfirst/Dyn.sh j2se/test/java/security/Security/signedfirst/Static.sh j2se/test/java/util/Arrays/FloatDoubleOrder.java j2se/test/java/util/PriorityQueue/PriorityQueueSort.java j2se/test/java/util/Random/DistinctSeeds.java j2se/test/java/util/concurrent/BlockingQueue/CancelledProducerConsumerLoops.java j2se/test/java/util/concurrent/BlockingQueue/LoopHelpers.java j2se/test/java/util/concurrent/BlockingQueue/MultipleProducersSingleConsumerLoops.java j2se/test/java/util/concurrent/BlockingQueue/PollMemoryLeak.java j2se/test/java/util/concurrent/BlockingQueue/ProducerConsumerLoops.java j2se/test/java/util/concurrent/BlockingQueue/SingleProducerMultipleConsumerLoops.java j2se/test/java/util/concurrent/ConcurrentHashMap/LoopHelpers.java j2se/test/java/util/concurrent/ConcurrentHashMap/MapCheck.java j2se/test/java/util/concurrent/ConcurrentHashMap/MapLoops.java j2se/test/java/util/concurrent/ConcurrentLinkedQueue/ConcurrentQueueLoops.java j2se/test/java/util/concurrent/ConcurrentLinkedQueue/LoopHelpers.java j2se/test/java/util/concurrent/Exchanger/ExchangeLoops.java j2se/test/java/util/concurrent/Exchanger/LoopHelpers.java j2se/test/java/util/concurrent/ExecutorCompletionService/ExecutorCompletionServiceLoops.java j2se/test/java/util/concurrent/ExecutorCompletionService/LoopHelpers.java j2se/test/java/util/concurrent/FutureTask/CancelledFutureLoops.java j2se/test/java/util/concurrent/FutureTask/LoopHelpers.java j2se/test/java/util/concurrent/locks/ReentrantLock/CancelledLockLoops.java j2se/test/java/util/concurrent/locks/ReentrantLock/LockOncePerThreadLoops.java j2se/test/java/util/concurrent/locks/ReentrantLock/LoopHelpers.java j2se/test/java/util/concurrent/locks/ReentrantLock/SimpleReentrantLockLoops.java j2se/test/java/util/concurrent/locks/ReentrantLock/TimeoutLockLoops.java j2se/test/java/util/concurrent/locks/ReentrantReadWriteLock/LoopHelpers.java j2se/test/java/util/concurrent/locks/ReentrantReadWriteLock/MapLoops.java j2se/test/java/util/concurrent/locks/ReentrantReadWriteLock/RWMap.java j2se/test/javax/management/ImplementationVersion/ImplVersionCommand.java j2se/test/javax/management/ImplementationVersion/ImplVersionReader.java j2se/test/javax/management/monitor/MBeanServerBuilderImpl.java j2se/test/javax/management/monitor/MBeanServerForwarderInvocationHandler.java j2se/test/javax/management/remote/mandatory/version/ImplVersionCommand.java j2se/test/javax/management/remote/mandatory/version/ImplVersionReader.java j2se/test/sun/net/www/http/ChunkedInputStream/test.txt j2se/test/sun/security/provider/PolicyFile/getinstance/getinstance.sh j2se/test/sun/security/x509/AVA/AVAEqualsHashCode.java j2se/test/sun/security/x509/AVA/EmptyValue.java j2se/test/sun/security/x509/X500Name/AllAttribs.java j2se/test/sun/security/x509/X500Name/DerValueConstructor.java j2se/test/sun/security/x509/X500Name/NullX500Name.java j2se/test/tools/javac/6547131/T.java j2se/test/tools/javac/6547131/p/Outer$I.class j2se/test/tools/javac/6547131/p/Outer$I.jasm j2se/test/tools/javac/6547131/p/Outer.class j2se/test/tools/javac/6547131/p/Outer.jasm j2se/test/vm/verifier/VerifyProtectedConstructor.java j2se/test/vm/verifier/VerifyStackForExceptionHandlers.java
diffstat 613 files changed, 56272 insertions(+), 54507 deletions(-) [+]
line wrap: on
line diff
--- a/control/make/motif-rules.gmk	Tue May 08 19:38:19 2007 +0000
+++ b/control/make/motif-rules.gmk	Fri May 25 00:49:14 2007 +0000
@@ -48,8 +48,8 @@
 
 motif-build:
 	$(CD) $(MOTIF_TOPDIR)/lib/Xm ; \
-	$(MAKE) ARCH_DATA_MODEL=$(ARCH_DATA_MODEL) ARCH=$(ARCH); \
-	$(MAKE) includes ARCH_DATA_MODEL=$(ARCH_DATA_MODEL) ARCH=$(ARCH)
+	$(MAKE) PLATFORM=$(PLATFORM) ARCH_DATA_MODEL=$(ARCH_DATA_MODEL) ARCH=$(ARCH) ARCH_FAMILY=$(ARCH_FAMILY) all; \
+	$(MAKE) PLATFORM=$(PLATFORM) ARCH_DATA_MODEL=$(ARCH_DATA_MODEL) ARCH=$(ARCH) ARCH_FAMILY=$(ARCH_FAMILY) includes
 
 motif-install: motif-install-lib motif-install-include
 
@@ -69,7 +69,7 @@
 #
 motif-clobber:
 	( $(CD) $(MOTIF_TOPDIR)/lib/Xm ; \
-		$(MAKE) clean ARCH_DATA_MODEL=$(ARCH_DATA_MODEL) ARCH=$(ARCH); ); \
+		$(MAKE) PLATFORM=$(PLATFORM) ARCH_DATA_MODEL=$(ARCH_DATA_MODEL) ARCH=$(ARCH) ARCH_FAMILY=$(ARCH_FAMILY) clean; ); \
 	$(RM) $(MOTIF_TOPDIR)/lib/libXm.a \
 	      $(MOTIF_TOPDIR)/lib/Xm/Xm.msg 
 	$(RM) -r $(MOTIF_TOPDIR)/lib/Xm/exports
--- a/control/make/templates/bsd-header	Tue May 08 19:38:19 2007 +0000
+++ b/control/make/templates/bsd-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All rights reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions
--- a/control/make/templates/gpl-cp-header	Tue May 08 19:38:19 2007 +0000
+++ b/control/make/templates/gpl-cp-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All Rights Reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/control/make/templates/gpl-header	Tue May 08 19:38:19 2007 +0000
+++ b/control/make/templates/gpl-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All Rights Reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/OopHandle.java	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/OopHandle.java	Fri May 25 00:49:14 2007 +0000
@@ -36,9 +36,6 @@
 
     <P> Note that in the case of debugging a remote VM, it is not
     workable to handle the automatic updating of these handles.
-// #ifdef JVMPI_SUPPORT
-//     JVMPI attempts to do this but it is too intrusive.
-// #endif // JVMPI_SUPPORT
     If the debugger allows the VM to resume running, it will have to
     look up once again any object references via the path they were
     found (i.e., the activation on the stack as the root, etc.) </P>
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/JVMPIDaemonThread.java	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// #ifdef JVMPI_SUPPORT
-// package sun.jvm.hotspot.runtime;
-// 
-// 
-// import sun.jvm.hotspot.debugger.*;
-// import sun.jvm.hotspot.types.*;
-// 
-// public class JVMPIDaemonThread extends JavaThread {
-//   public JVMPIDaemonThread(Address addr) {
-//     super(addr);
-//   }
-// 
-//   public boolean isJavaThread() { return false; }
-// 
-//   public boolean isJVMPIDaemonThread() { return true; }
-// }
-// #endif // JVMPI_SUPPORT
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaVFrame.java	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaVFrame.java	Fri May 25 00:49:14 2007 +0000
@@ -45,12 +45,6 @@
     super(fr, regMap, thread);
   }
 
-// #ifdef JVMPI_SUPPORT
-//  /** Fabricate heavyweight monitors for lightweight monitors */
-//  // FIXME: not yet implemented 
-//  //  void jvmpi_fab_heavy_monitors(bool query, int* index, int frame_count, GrowableArray<ObjectMonitor*>* fab_list);
-// #endif // JVMPI_SUPPORT
-
   /** Get monitor (if any) that this JavaVFrame is trying to enter */
   // FIXME: not yet implemented 
   //  public Address getPendingMonitor(int frameCount);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ObjectSynchronizer.java	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ObjectSynchronizer.java	Fri May 25 00:49:14 2007 +0000
@@ -85,11 +85,7 @@
 
   private static class ObjectMonitorIterator implements Iterator {
 
-// #ifdef JVMPI_SUPPORT
-//     // JVMTI/JVMPI raw monitors are not pointed by gBlockList
-// #else // !JVMPI_SUPPORT
     // JVMTI raw monitors are not pointed by gBlockList
-// #endif // JVMPI_SUPPORT
     // and are not included by this Iterator. May add them later.
 
     ObjectMonitorIterator() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java	Fri May 25 00:49:14 2007 +0000
@@ -110,9 +110,6 @@
   public boolean   isJavaThread()              { return false; }
   public boolean   isCompilerThread()          { return false; }
   public boolean   isHiddenFromExternalView()  { return false; } 
-// #ifdef JVMPI_SUPPORT
-//   public boolean   isJVMPIDaemonThread()       { return false; }
-// #endif // JVMPI_SUPPORT
   public boolean   isJvmtiAgentThread()        { return false; }
   public boolean   isWatcherThread()           { return false; }
   public boolean   isConcurrentMarkSweepThread() { return false; }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java	Fri May 25 00:49:14 2007 +0000
@@ -106,9 +106,6 @@
         }
         // for now, use JavaThread itself. fix it later with appropriate class if needed
         virtualConstructor.addMapping("SurrogateLockerThread", JavaThread.class);
-// #ifdef JVMPI_SUPPORT
-//         virtualConstructor.addMapping("JVMPIDaemonThread", JVMPIDaemonThread.class);
-// #endif // JVMPI_SUPPORT
         virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
         virtualConstructor.addMapping("LowMemoryDetectorThread", LowMemoryDetectorThread.class);
     }
@@ -117,11 +114,7 @@
     }
     
     /** NOTE: this returns objects of type JavaThread, CompilerThread,
-// #ifdef JVMPI_SUPPORT
-//       JvmtiAgentThread, LowMemoryDetectorThread and JVMPIDaemonThread.
-// #else // !JVMPI_SUPPORT
       JvmtiAgentThread, and LowMemoryDetectorThread.
-// #endif // JVMPI_SUPPORT
       The latter four are subclasses of the former. Most operations
       (fetching the top frame, etc.) are only allowed to be performed on
       a "pure" JavaThread. For this reason, {@link
@@ -150,12 +143,7 @@
             return thread;
         } catch (Exception e) {
             throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
-// #ifdef JVMPI_SUPPORT
-//             " (expected type JavaThread, CompilerThread, LowMemoryDetectorThread, JvmtiAgentThread, JVMPIDaemonThread or SurrogateLockerThread)", e);
-// #else // !JVMPI_SUPPORT
             " (expected type JavaThread, CompilerThread, LowMemoryDetectorThread, JvmtiAgentThread, or SurrogateLockerThread)", e);
-// #endif // JVMPI_SUPPORT
-            
         }
     }
     
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/MonitorCacheDumpPanel.java	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/MonitorCacheDumpPanel.java	Fri May 25 00:49:14 2007 +0000
@@ -54,11 +54,7 @@
 
     ByteArrayOutputStream bos = new ByteArrayOutputStream();
     PrintStream tty = new PrintStream(bos);
-// #ifdef JVMPI_SUPPORT
-//     tty.println("Monitor Cache Dump (not including JVMPI/JVMTI raw monitors):");
-// #else // !JVMPI_SUPPORT
     tty.println("Monitor Cache Dump (not including JVMTI raw monitors):");
-// #endif // JVMPI_SUPPORT
     tty.println();
     dumpOn(tty);
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Fri May 25 00:49:14 2007 +0000
@@ -1090,9 +1090,6 @@
 vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
 vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
 vmType2Class["SurrogateLockerThread"] = sapkg.runtime.JavaThread;
-// #ifdef JVMPI_SUPPORT
-// vmType2Class["JVMPIDaemonThread"] = sapkg.runtime.JVMPIDaemonThread;
-// #endif // JVMPI_SUPPORT
 vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;
 
 // gc
--- a/hotspot/build/linux/makefiles/top.make	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/linux/makefiles/top.make	Fri May 25 00:49:14 2007 +0000
@@ -62,6 +62,7 @@
 Include_DBs/GC          = $(VM)/includeDB_gc \
                           $(VM)/gc_implementation/includeDB_gc_parallelScavenge \
                           $(VM)/gc_implementation/includeDB_gc_concurrentMarkSweep \
+                          $(VM)/gc_implementation/includeDB_gc_parNew \
                           $(VM)/gc_implementation/includeDB_gc_shared
 
 Include_DBs/CORE        = $(VM)/includeDB_core   $(Include_DBs/GC)
--- a/hotspot/build/linux/makefiles/vm.make	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/linux/makefiles/vm.make	Fri May 25 00:49:14 2007 +0000
@@ -183,6 +183,11 @@
 LD_SCRIPT_FLAG = -Wl,-T,$(LD_SCRIPT)
 endif
 
+# With more recent Redhat releases (or the cutting edge version Fedora), if
+# SELinux is configured to be enabled, the runtime linker will fail to apply
+# the text relocation to libjvm.so considering that it is built as a non-PIC
+# DSO. To workaround that, we run chcon to libjvm.so after it is built. See 
+# details in bug 6538311.
 $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
 	$(QUIETLY) {                                                    \
 	    echo Linking vm...;                                         \
@@ -191,6 +196,15 @@
 		       $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);       \
 	    $(LINK_LIB.CC/POST_HOOK)                                    \
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
+	    if [ -x /usr/sbin/selinuxenabled ] ; then                   \
+	      /usr/sbin/selinuxenabled;                                 \
+              if [ $$? = 0 ] ; then					\
+		/usr/bin/chcon -t textrel_shlib_t $@;                   \
+		if [ $$? != 0 ]; then                                   \
+		  echo "ERROR: Cannot chcon $@"; exit 1;                \
+		fi							\
+	      fi							\
+	    fi                                                          \
 	}
 
 DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
--- a/hotspot/build/solaris/makefiles/reorder_COMPILER1_i486	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_COMPILER1_i486	Fri May 25 00:49:14 2007 +0000
@@ -756,9 +756,6 @@
 text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_;
 text: .text%__1cJAssemblerElock6M_v_;
 text: .text%__1cJAssemblerHcmpxchg6MpnMRegisterImpl_nHAddress__v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MnITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cHAddress2t6MinJrelocInfoJrelocType__v_;
 text: .text%__1cOMacroAssemblerFleave6M_v_;
 text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_;
@@ -2302,10 +2299,6 @@
 text: .text%__1cLJvmtiExportQenter_live_phase6F_v_;
 text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_;
 text: .text%__1cUJvmtiEventControllerHvm_init6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-# text: .text%__1cUVM_JVMPIPostObjAllocRclear_restriction6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cHMonitorKnotify_all6M_i_;
 text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_;
 text: .text%__1cMPeriodicTask2t6MI_v_;
@@ -4687,9 +4680,6 @@
 text: .text%__1cNMemoryServiceGgc_end6Fi_v_;
 text: .text%__1cPGCMemoryManagerGgc_end6M_v_;
 text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpiYpost_class_unload_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cNJvmtiGCMarker2T6M_v_;
 text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_;
 text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_;
--- a/hotspot/build/solaris/makefiles/reorder_COMPILER1_sparc	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_COMPILER1_sparc	Fri May 25 00:49:14 2007 +0000
@@ -565,9 +565,6 @@
 text: .text%__1cOMacroAssemblerDset6MipnMRegisterImpl_rknQRelocationHolder__v_: interpreter_sparc.o;
 text: .text%__1cZInterpreterMacroAssemblerHpop_ptr6MpnMRegisterImpl__v_;
 text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MinITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cOMacroAssemblerNload_contents6MrnHAddress_pnMRegisterImpl_i_v_: interp_masm_sparc.o;
 text: .text%__1cOMacroAssemblerCbr6MnJAssemblerJCondition_in0BHPredict_rnFLabel__v_: interp_masm_sparc.o;
 text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpnMRegisterImpl_pC2_v_;
@@ -1710,9 +1707,6 @@
 text: .text%__1cLJvmtiExportQenter_live_phase6F_v_;
 text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_;
 text: .text%__1cUJvmtiEventControllerHvm_init6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cHMonitorKnotify_all6M_i_;
 text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_;
 text: .text%__1cMPeriodicTask2t6MI_v_;
@@ -4034,9 +4028,6 @@
 text: .text%__1cXTraceMemoryManagerStats2T6M_v_;
 text: .text%__1cPGCMemoryManagerGgc_end6M_v_;
 text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpiYpost_class_unload_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cNJvmtiGCMarker2T6M_v_;
 text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_;
 text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_;
--- a/hotspot/build/solaris/makefiles/reorder_COMPILER2_amd64	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_COMPILER2_amd64	Fri May 25 00:49:14 2007 +0000
@@ -6547,9 +6547,6 @@
 text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_;
 text: .text%__1cQmulI_mem_immNodePoper_input_base6kM_I_;
 text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MnITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_;
 text: .text%__1cNdecL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
 text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_;
@@ -7833,9 +7830,6 @@
 text: .text%__1cbEinitialize_converter_functions6F_v_;
 text: .text%JVM_SupportsCX8;
 text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cUVM_JVMPIPostObjAllocRclear_restriction6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cTcompilerOracle_init6F_v_;
 text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_;
 text: .text%__1cOCompilerOraclePparse_from_file6F_v_;
@@ -7844,9 +7838,6 @@
 text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__;
 text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_;
 text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_;
 text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_;
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
@@ -8065,15 +8056,9 @@
 text: .text%__1cICarSpaceEinit6F_v_;
 text: .text%__1cNcarSpace_init6F_v_;
 text: .text%__1cORuntimeServiceEinit6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeWjvmpi_method_exit_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o;
 text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o;
 text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeXjvmpi_method_entry_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__;
 text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_;
--- a/hotspot/build/solaris/makefiles/reorder_COMPILER2_i486	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_COMPILER2_i486	Fri May 25 00:49:14 2007 +0000
@@ -6873,9 +6873,6 @@
 text: .text%__1cRaddL_eReg_memNodeErule6kM_I_: ad_i486_misc.o;
 text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__;
 text: .text%__1cOMacroAssemblerFenter6M_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MnITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%Unsafe_GetNativeByte;
 text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_;
 text: .text%__1cTsarL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
@@ -7895,10 +7892,6 @@
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
 text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiEnvBase.o;
 text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_: jvmtiEnvBase.o;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-# text: .text%__1cUVM_JVMPIPostObjAllocRclear_restriction6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%JVM_SupportsCX8;
 text: .text%__1cbEinitialize_converter_functions6F_v_;
 text: .text%JVM_Socket;
@@ -8098,10 +8091,6 @@
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o;
 text: .text%__1cORuntimeServiceYrecord_application_start6F_v_;
 text: .text%__1cORuntimeServiceEinit6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeWjvmpi_method_exit_Type6F_pknITypeFunc__;
-# text: .text%__1cLOptoRuntimeXjvmpi_method_entry_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__;
--- a/hotspot/build/solaris/makefiles/reorder_COMPILER2_sparc	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_COMPILER2_sparc	Fri May 25 00:49:14 2007 +0000
@@ -5845,9 +5845,6 @@
 text: .text%__1cSmulL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
 text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_;
 text: .text%__1cLcastP2INodeErule6kM_I_: ad_sparc_misc.o;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MinITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__;
 text: .text%__1cLCastP2INodeUdepends_only_on_test6kM_i_: classes.o;
 text: .text%__1cOloadConL13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
@@ -6742,9 +6739,6 @@
 text: .text%__1cbEinitialize_converter_functions6F_v_;
 text: .text%JVM_SupportsCX8;
 text: .text%__1cOcmovIF_immNodeEsize6kMpnNPhaseRegAlloc__I_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
 text: .text%__1cUJvmtiEventControllerIvm_start6F_v_;
 text: .text%__1cUJvmtiEventControllerHvm_init6F_v_;
@@ -6910,10 +6904,6 @@
 text: .text%__1cORuntimeServiceYrecord_application_start6F_v_;
 text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_;
 text: .text%__1cORuntimeServiceEinit6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeWjvmpi_method_exit_Type6F_pknITypeFunc__;
-# text: .text%__1cLOptoRuntimeXjvmpi_method_entry_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__;
 text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_;
--- a/hotspot/build/solaris/makefiles/reorder_COMPILER2_sparcv9	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_COMPILER2_sparcv9	Fri May 25 00:49:14 2007 +0000
@@ -5834,9 +5834,6 @@
 text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__;
 text: .text%__1cKCodeBufferQalloc_relocation6MI_v_;
 text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MinITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_;
 text: .text%__1cNloadConPCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
 text: .text%__1cNloadConL0NodeEsize6kMpnNPhaseRegAlloc__I_;
@@ -6688,9 +6685,6 @@
 text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_;
 text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_;
 text: .text%__1cKvtune_init6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%JVM_SupportsCX8;
 text: .text%__1cLOptoRuntimebSgenerate_polling_page_safepoint_handler_blob6F_v_;
 text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_;
@@ -6865,10 +6859,6 @@
 text: .text%__1cNTemplateTableGdaload6F_v_;
 text: .text%__1cNTemplateTableGaaload6F_v_;
 text: .text%__1cWInlineCacheBuffer_init6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeWjvmpi_method_exit_Type6F_pknITypeFunc__;
-# text: .text%__1cLOptoRuntimeXjvmpi_method_entry_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__;
 text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_;
 text: .text%__1cLOptoRuntimeNfetch_monitor6FipnJBasicLock_pC_pnHoopDesc__;
--- a/hotspot/build/solaris/makefiles/reorder_TIERED_amd64	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_TIERED_amd64	Fri May 25 00:49:14 2007 +0000
@@ -6547,9 +6547,6 @@
 text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_;
 text: .text%__1cQmulI_mem_immNodePoper_input_base6kM_I_;
 text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MnITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_;
 text: .text%__1cNdecL_rRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
 text: .text%__1cOLibraryCallKitXinline_string_compareTo6M_i_;
@@ -7833,9 +7830,6 @@
 text: .text%__1cbEinitialize_converter_functions6F_v_;
 text: .text%JVM_SupportsCX8;
 text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cUVM_JVMPIPostObjAllocRclear_restriction6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cTcompilerOracle_init6F_v_;
 text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_;
 text: .text%__1cOCompilerOraclePparse_from_file6F_v_;
@@ -7844,9 +7838,6 @@
 text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__;
 text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_;
 text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_;
 text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_;
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
@@ -8065,15 +8056,9 @@
 text: .text%__1cICarSpaceEinit6F_v_;
 text: .text%__1cNcarSpace_init6F_v_;
 text: .text%__1cORuntimeServiceEinit6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeWjvmpi_method_exit_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o;
 text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o;
 text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeXjvmpi_method_entry_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__;
 text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_;
--- a/hotspot/build/solaris/makefiles/reorder_TIERED_i486	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_TIERED_i486	Fri May 25 00:49:14 2007 +0000
@@ -6873,9 +6873,6 @@
 text: .text%__1cRaddL_eReg_memNodeErule6kM_I_: ad_i486_misc.o;
 text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__;
 text: .text%__1cOMacroAssemblerFenter6M_v_;
-#ifdef JVMPI_SUPPORT
-text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MnITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%Unsafe_GetNativeByte;
 text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_;
 text: .text%__1cTsarL_eReg_32_63NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
@@ -7895,10 +7892,6 @@
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
 text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiEnvBase.o;
 text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_: jvmtiEnvBase.o;
-#ifdef JVMPI_SUPPORT
-text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-text: .text%__1cUVM_JVMPIPostObjAllocRclear_restriction6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%JVM_SupportsCX8;
 text: .text%__1cbEinitialize_converter_functions6F_v_;
 text: .text%JVM_Socket;
@@ -8098,10 +8091,6 @@
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o;
 text: .text%__1cORuntimeServiceYrecord_application_start6F_v_;
 text: .text%__1cORuntimeServiceEinit6F_v_;
-#ifdef JVMPI_SUPPORT
-text: .text%__1cLOptoRuntimeWjvmpi_method_exit_Type6F_pknITypeFunc__;
-text: .text%__1cLOptoRuntimeXjvmpi_method_entry_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__;
--- a/hotspot/build/solaris/makefiles/reorder_TIERED_sparc	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/reorder_TIERED_sparc	Fri May 25 00:49:14 2007 +0000
@@ -5845,9 +5845,6 @@
 text: .text%__1cSmulL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
 text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_;
 text: .text%__1cLcastP2INodeErule6kM_I_: ad_sparc_misc.o;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cZInterpreterMacroAssemblerYnotify_jvmpi_method_exit6MinITosState__v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__;
 text: .text%__1cLCastP2INodeUdepends_only_on_test6kM_i_: classes.o;
 text: .text%__1cOloadConL13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
@@ -6742,9 +6739,6 @@
 text: .text%__1cbEinitialize_converter_functions6F_v_;
 text: .text%JVM_SupportsCX8;
 text: .text%__1cOcmovIF_immNodeEsize6kMpnNPhaseRegAlloc__I_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cFjvmpibDpost_vm_initialization_events6F_v_;
-#endif // JVMPI_SUPPORT
 text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
 text: .text%__1cUJvmtiEventControllerIvm_start6F_v_;
 text: .text%__1cUJvmtiEventControllerHvm_init6F_v_;
@@ -6910,10 +6904,6 @@
 text: .text%__1cORuntimeServiceYrecord_application_start6F_v_;
 text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_;
 text: .text%__1cORuntimeServiceEinit6F_v_;
-#ifdef JVMPI_SUPPORT
-# text: .text%__1cLOptoRuntimeWjvmpi_method_exit_Type6F_pknITypeFunc__;
-# text: .text%__1cLOptoRuntimeXjvmpi_method_entry_Type6F_pknITypeFunc__;
-#endif // JVMPI_SUPPORT
 text: .text%__1cLOptoRuntimeVhandle_exception_Type6F_pknITypeFunc__;
 text: .text%__1cLOptoRuntimeSfetch_monitor_Type6F_pknITypeFunc__;
 text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_;
--- a/hotspot/build/solaris/makefiles/top.make	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/solaris/makefiles/top.make	Fri May 25 00:49:14 2007 +0000
@@ -52,6 +52,7 @@
 Include_DBs/GC          = $(VM)/includeDB_gc \
                           $(VM)/gc_implementation/includeDB_gc_parallelScavenge \
                           $(VM)/gc_implementation/includeDB_gc_concurrentMarkSweep \
+                          $(VM)/gc_implementation/includeDB_gc_parNew \
                           $(VM)/gc_implementation/includeDB_gc_shared
 
 Include_DBs/CORE        = $(VM)/includeDB_core   $(Include_DBs/GC)   
--- a/hotspot/build/windows/makefiles/adlc.make	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/windows/makefiles/adlc.make	Fri May 25 00:49:14 2007 +0000
@@ -52,7 +52,7 @@
   /I "$(WorkSpace)\src\share\vm\compiler"    \
   /I "$(WorkSpace)\src\share\vm\code"        \
   /I "$(WorkSpace)\src\share\vm\interpreter" \
-  /I "$(WorkSpace)\src\share\vm\lookup"      \
+  /I "$(WorkSpace)\src\share\vm\classfile"   \
   /I "$(WorkSpace)\src\share\vm\asm"         \
   /I "$(WorkSpace)\src\share\vm\memory"      \
   /I "$(WorkSpace)\src\share\vm\oops"        \
--- a/hotspot/build/windows/makefiles/generated.make	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/windows/makefiles/generated.make	Fri May 25 00:49:14 2007 +0000
@@ -39,7 +39,7 @@
 default:: includeDB.current Dependencies $(JvmtiGeneratedFiles)
 !endif
 
-IncludeDBs=$(WorkSpace)/src/share/vm/includeDB_core $(WorkSpace)/src/share/vm/includeDB_gc $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_shared $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
+IncludeDBs=$(WorkSpace)/src/share/vm/includeDB_core $(WorkSpace)/src/share/vm/includeDB_gc $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_shared $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_parNew $(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
 
 !if "$(Variant)" == "core"
 IncludeDBs=$(IncludeDBs) 
--- a/hotspot/build/windows/makefiles/makedeps.make	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/windows/makefiles/makedeps.make	Fri May 25 00:49:14 2007 +0000
@@ -59,10 +59,10 @@
         -relativeInclude src\share\vm\code \
         -relativeInclude src\share\vm\interpreter \
         -relativeInclude src\share\vm\ci \
-        -relativeInclude src\share\vm\jvmci \
-        -relativeInclude src\share\vm\lookup \
+        -relativeInclude src\share\vm\classfile \
         -relativeInclude src\share\vm\gc_implementation\parallelScavenge \
         -relativeInclude src\share\vm\gc_implementation\shared \
+        -relativeInclude src\share\vm\gc_implementation\parNew \
         -relativeInclude src\share\vm\gc_implementation\concurrentMarkSweep \
         -relativeInclude src\share\vm\gc_interface \
         -relativeInclude src\share\vm\asm \
@@ -111,6 +111,7 @@
         -additionalFile includeDB_gc \
         -additionalFile includeDB_gc_parallelScavenge \
         -additionalFile includeDB_gc_concurrentMarkSweep \
+        -additionalFile includeDB_gc_parNew \
         -additionalFile includeDB_gc_shared \
         -additionalGeneratedFile $(HOTSPOTBUILDSPACE)\%f\%b vm.def \
         -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\build\windows\build_vm_def.sh" \
--- a/hotspot/build/windows/makefiles/vm.make	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/windows/makefiles/vm.make	Fri May 25 00:49:14 2007 +0000
@@ -109,10 +109,10 @@
   /I "$(WorkSpace)\src\share\vm\code"        \
   /I "$(WorkSpace)\src\share\vm\interpreter" \
   /I "$(WorkSpace)\src\share\vm\ci"          \
-  /I "$(WorkSpace)\src\share\vm\jvmci"       \
-  /I "$(WorkSpace)\src\share\vm\lookup"      \
+  /I "$(WorkSpace)\src\share\vm\classfile"   \
   /I "$(WorkSpace)\src\share\vm\gc_implementation\parallelScavenge"\
   /I "$(WorkSpace)\src\share\vm\gc_implementation\shared"\
+  /I "$(WorkSpace)\src\share\vm\gc_implementation\parNew"\
   /I "$(WorkSpace)\src\share\vm\gc_implementation\concurrentMarkSweep"\
   /I "$(WorkSpace)\src\share\vm\gc_interface"\
   /I "$(WorkSpace)\src\share\vm\asm"         \
@@ -138,10 +138,10 @@
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/interpreter
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/ci
-VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/jvmci
-VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/lookup
+VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/classfile
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/parallelScavenge
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shared
+VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/parNew
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/concurrentMarkSweep
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_interface
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/asm
@@ -198,10 +198,7 @@
 {$(WorkSpace)\src\share\vm\ci}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
-{$(WorkSpace)\src\share\vm\jvmci}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
-
-{$(WorkSpace)\src\share\vm\lookup}.cpp.obj::
+{$(WorkSpace)\src\share\vm\classfile}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
 {$(WorkSpace)\src\share\vm\gc_implementation\parallelScavenge}.cpp.obj::
@@ -210,6 +207,9 @@
 {$(WorkSpace)\src\share\vm\gc_implementation\shared}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
+{$(WorkSpace)\src\share\vm\gc_implementation\parNew}.cpp.obj::
+        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+
 {$(WorkSpace)\src\share\vm\gc_implementation\concurrentMarkSweep}.cpp.obj::
         $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
 
--- a/hotspot/build/windows/projectfiles/common/Makefile	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/build/windows/projectfiles/common/Makefile	Fri May 25 00:49:14 2007 +0000
@@ -50,6 +50,7 @@
 IncludeDBs_base=$(IncludeDBs_base) $(HOTSPOTWORKSPACE)/src/share/vm/includeDB_gc
 IncludeDBs_base=$(IncludeDBs_base) $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
 IncludeDBs_base=$(IncludeDBs_base) $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_shared
+IncludeDBs_base=$(IncludeDBs_base) $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parNew
 IncludeDBs_base=$(IncludeDBs_base) $(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
 
 IncludeDBs_core =$(IncludeDBs_base) 
--- a/hotspot/make/templates/bsd-header	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/make/templates/bsd-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All rights reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions
--- a/hotspot/make/templates/gpl-cp-header	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/make/templates/gpl-cp-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All Rights Reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/hotspot/make/templates/gpl-header	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/make/templates/gpl-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All Rights Reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/hotspot/src/cpu/amd64/vm/amd64.ad	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/amd64.ad	Fri May 25 00:49:14 2007 +0000
@@ -2517,6 +2517,10 @@
     // to allow platform-specific tweaking on sparc.
     __ cmpq(Rrax, Rrsi);
     __ jcc(Assembler::equal, hit);
+#ifndef PRODUCT
+    __ movq(Rrcx, (intptr_t) &SharedRuntime::_partial_subtype_ctr);
+    __ incrementl(Address(Rrcx));
+#endif //PRODUCT
     __ movq(Rrdi, Address(Rrsi, 
                           sizeof(oopDesc) + 
                           Klass::secondary_supers_offset_in_bytes()));
@@ -3333,6 +3337,9 @@
     assert (objReg != boxReg && objReg != tmpReg && 
             objReg != scrReg && tmpReg != scrReg, "invariant") ; 
 
+    if (_counters != NULL) {
+      masm.atomic_incl(Address((address) _counters->total_entry_count_addr(), relocInfo::none));
+    }
     if (EmitSync & 1) { 
         masm.movq (Address(boxReg), intptr_t(markOopDesc::unused_mark())) ; 
         masm.cmpq (rsp, 0) ; 
@@ -3341,7 +3348,7 @@
         Label DONE_LABEL;
         if (UseBiasedLocking) {
            // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
-          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL);
+          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
         }
         masm.movl(tmpReg, 0x1);
         masm.orq(tmpReg, Address(objReg));
@@ -3374,7 +3381,7 @@
         // If this invariant is not held we'll suffer exclusion (safety) failure.
           
         if (UseBiasedLocking) {
-          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL);
+          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
           masm.movq  (tmpReg, Address(objReg)) ;        // [FETCH]
         }
 
@@ -3382,9 +3389,9 @@
         masm.movq  (Address(boxReg), tmpReg) ;  
         if (os::is_MP()) { masm.lock(); } 
         masm.cmpxchgq(boxReg, Address(objReg)); // Updates tmpReg
-        if (PrintBiasedLockingStatistics) {
+        if (_counters != NULL) {
            masm.cond_incl(Assembler::equal, 
-             Address((address) BiasedLocking::fast_path_entry_count_addr(), relocInfo::none));
+             Address((address) _counters->fast_path_entry_count_addr(), relocInfo::none));
         }
         masm.jcc   (Assembler::equal, DONE_LABEL);
     
@@ -3392,9 +3399,9 @@
         masm.subq  (tmpReg, rsp);
         masm.andq  (tmpReg, 7 - os::vm_page_size());
         masm.movq  (Address(boxReg), tmpReg);
-        if (PrintBiasedLockingStatistics) {
+        if (_counters != NULL) {
            masm.cond_incl(Assembler::equal, 
-             Address((address) BiasedLocking::fast_path_entry_count_addr(), relocInfo::none));
+             Address((address) _counters->fast_path_entry_count_addr(), relocInfo::none));
         }
         masm.jmp   (DONE_LABEL) ; 
 
@@ -3570,7 +3577,7 @@
     // Compare first characters
     masm.subl(rcx, rdi);
     masm.jcc(Assembler::notZero,  POP_LABEL);
-    masm.decl(rsi);
+    masm.decrementl(rsi);
     masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
 
     {
@@ -3599,7 +3606,7 @@
     masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0));
     masm.subl(rcx, rdi);
     masm.jcc(Assembler::notZero, POP_LABEL);
-    masm.incq(rsi);
+    masm.incrementq(rsi);
     masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
     
     // Strings are equal up to min length.  Return the length difference.
@@ -6801,6 +6808,7 @@
 
 instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
 %{
+  predicate(UseIncDec);
   match(Set dst (AddI dst src));
   effect(KILL cr);
 
@@ -6812,6 +6820,7 @@
 
 instruct incI_mem(memory dst, immI1 src, rFlagsReg cr)
 %{
+  predicate(UseIncDec);
   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
   effect(KILL cr);
 
@@ -6825,6 +6834,7 @@
 // XXX why does that use AddI
 instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr) 
 %{
+  predicate(UseIncDec);
   match(Set dst (AddI dst src));
   effect(KILL cr);
 
@@ -6837,6 +6847,7 @@
 // XXX why does that use AddI
 instruct decI_mem(memory dst, immI_M1 src, rFlagsReg cr)
 %{
+  predicate(UseIncDec);
   match(Set dst (StoreI dst (AddI (LoadI dst) src)));
   effect(KILL cr);
 
@@ -6919,6 +6930,7 @@
 
 instruct incL_rReg(rRegI dst, immL1 src, rFlagsReg cr)
 %{
+  predicate(UseIncDec);
   match(Set dst (AddL dst src));
   effect(KILL cr);
 
@@ -6930,6 +6942,7 @@
 
 instruct incL_mem(memory dst, immL1 src, rFlagsReg cr)
 %{
+  predicate(UseIncDec);
   match(Set dst (StoreL dst (AddL (LoadL dst) src)));
   effect(KILL cr);
 
@@ -6943,6 +6956,7 @@
 // XXX why does that use AddL
 instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr) 
 %{
+  predicate(UseIncDec);
   match(Set dst (AddL dst src));
   effect(KILL cr);
 
@@ -6955,6 +6969,7 @@
 // XXX why does that use AddL
 instruct decL_mem(memory dst, immL_M1 src, rFlagsReg cr)
 %{
+  predicate(UseIncDec);
   match(Set dst (StoreL dst (AddL (LoadL dst) src)));
   effect(KILL cr);
 
@@ -10026,6 +10041,42 @@
   ins_pipe(ialu_mem_reg);
 %}
 
+instruct MoveF2I_reg_reg(rRegI dst, regF src) %{
+  match(Set dst (MoveF2I src));
+  effect(DEF dst, USE src);
+  ins_cost(85);
+  format %{ "movd    $dst,$src\t# MoveF2I" %}
+  ins_encode %{ __ movdl($dst$$Register, $src$$FloatRegister); %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct MoveD2L_reg_reg(rRegL dst, regD src) %{
+  match(Set dst (MoveD2L src));
+  effect(DEF dst, USE src);
+  ins_cost(85);
+  format %{ "movd    $dst,$src\t# MoveD2L" %}
+  ins_encode %{ __ movdq($dst$$Register, $src$$FloatRegister); %}
+  ins_pipe( pipe_slow );
+%}
+
+// The next instructions have long latency and use Int unit. Set high cost.
+instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
+  match(Set dst (MoveI2F src));
+  effect(DEF dst, USE src);
+  ins_cost(300);
+  format %{ "movd    $dst,$src\t# MoveI2F" %}
+  ins_encode %{ __ movdl($dst$$FloatRegister, $src$$Register); %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
+  match(Set dst (MoveL2D src));
+  effect(DEF dst, USE src);
+  ins_cost(300);
+  format %{ "movd    $dst,$src\t# MoveL2D" %}
+  ins_encode %{ __ movdq($dst$$FloatRegister, $src$$Register); %}
+  ins_pipe( pipe_slow );
+%}
 
 // =======================================================================
 // fast clearing of an array
--- a/hotspot/src/cpu/amd64/vm/assembler_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/assembler_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)assembler_amd64.cpp	1.55 07/05/05 17:04:10 JVM"
+#pragma ident "@(#)assembler_amd64.cpp	1.56 07/05/17 15:39:38 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -2878,21 +2878,9 @@
   ucomiss(dst, src);
 }
 
-void Assembler::decb(Register dst) 
-{
-  int dstenc = dst->encoding();
-  if (dstenc >= 8) {
-    prefix(REX_B);
-    dstenc -= 8;
-  } else if (dstenc >= 4) {
-    prefix(REX);
-  }
-  emit_byte(0xFE);
-  emit_byte(0xC8 | dstenc);
-}
-
 void Assembler::decl(Register dst)
 {
+  // Don't use it directly. Use MacroAssembler::decrementl() instead.
   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
   int dstenc = dst->encoding();
   if (dstenc >= 8) {
@@ -2905,6 +2893,7 @@
 
 void Assembler::decl(Address dst)
 {
+  // Don't use it directly. Use MacroAssembler::decrementl() instead.
   InstructionMark im(this);
   if (dst.base_needs_rex()) {
     if (dst.index_needs_rex()) {
@@ -2923,6 +2912,7 @@
 
 void Assembler::decq(Register dst) 
 {
+  // Don't use it directly. Use MacroAssembler::decrementq() instead.
   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
   int dstenc = dst->encoding();
   if (dstenc < 8) {
@@ -2937,6 +2927,7 @@
 
 void Assembler::decq(Address dst)
 {
+  // Don't use it directly. Use MacroAssembler::decrementq() instead.
   InstructionMark im(this);
   if (dst.base_needs_rex()) {
     if (dst.index_needs_rex()) {
@@ -3100,6 +3091,7 @@
 
 void Assembler::incl(Register dst)
 {
+  // Don't use it directly. Use MacroAssembler::incrementl() instead.
   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
   int dstenc = dst->encoding();
   if (dstenc >= 8) {
@@ -3112,6 +3104,7 @@
 
 void Assembler::incl(Address dst)
 {
+  // Don't use it directly. Use MacroAssembler::incrementl() instead.
   InstructionMark im(this);
   if (dst.base_needs_rex()) {
     if (dst.index_needs_rex()) {
@@ -3130,6 +3123,7 @@
 
 void Assembler::incq(Register dst)
 {
+  // Don't use it directly. Use MacroAssembler::incrementq() instead.
   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
   int dstenc = dst->encoding();
   if (dstenc < 8) {
@@ -3144,6 +3138,7 @@
 
 void Assembler::incq(Address dst)
 {
+  // Don't use it directly. Use MacroAssembler::incrementq() instead.
   InstructionMark im(this);
   if (dst.base_needs_rex()) {
     if (dst.index_needs_rex()) {
@@ -4576,6 +4571,87 @@
 
 void Assembler::nop(int i) {
   assert(i > 0, " ");
+  if (UseAddressNop) {
+    // Using multi-bytes nops "0x0F 0x1F [address]"
+    while(i >= 15) {
+      // don't generate sequential addess nops (mix with regular nops).
+      i -= 15;
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x0F);   // NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+      emit_byte(0x1F);
+      emit_byte(0x84);   // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
+      emit_byte(0x00);   // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+      emit_long(0);      // 32-bits offset
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x90);   // nop
+    }
+    switch (i) {
+      case 14:
+        emit_byte(0x66); // size prefix
+      case 13:
+        emit_byte(0x66); // size prefix
+      case 12:
+        emit_byte(0x0F); // NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+        emit_byte(0x1F);
+        emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
+        emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+        emit_long(0);    // 32-bits offset
+        emit_byte(0x66); // size prefix
+        emit_byte(0x66); // size prefix
+        emit_byte(0x66); // size prefix
+        emit_byte(0x90); // nop
+        break;
+      case 11:
+        emit_byte(0x66); // size prefix
+      case 10:
+        emit_byte(0x66); // size prefix
+      case 9:
+        emit_byte(0x66); // size prefix
+      case 8: // NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
+        emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+        emit_long(0);    // 32-bits offset
+        break;
+      case 7: // NOP DWORD PTR [EAX+0] 32-bits offset
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
+        emit_long(0);    // 32-bits offset
+        break;
+      case 6:
+        emit_byte(0x66); // size prefix
+      case 5: // NOP DWORD PTR [EAX+EAX*0+0]
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
+        emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+        emit_byte(0);    // 8-bits offset
+        break;
+      case 4:            // NOP DWORD PTR [EAX+0]
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
+        emit_byte(0);    // 8-bits offset
+        break;
+      case 3: 
+        // Don't use "0x0F 0x1F 0x00" (NOP DWORD PTR [EAX])
+        emit_byte(0x66); // size prefix
+      case 2:
+        emit_byte(0x66); // size prefix
+      case 1:
+        emit_byte(0x90); // nop
+        break;
+      default:
+        assert(i == 0, " ");
+    }
+    return;
+  }
   // Using nops with size prefixes "0x66 0x90".
   // From AMD Optimization Guide:
   //  1: 0x90
@@ -6008,36 +6084,76 @@
 
 void MacroAssembler::incrementl(Register reg, int value)
 {
+  if (value == min_jint) { addl(reg, value); return; }
   if (value <  0) { decrementl(reg, -value); return; }
   if (value == 0) {                        ; return; }
-  if (value == 1) { incl(reg)              ; return; }
+  if (value == 1 && UseIncDec) { incl(reg) ; return; }
   /* else */      { addl(reg, value)       ; return; }
 }
 
 void MacroAssembler::decrementl(Register reg, int value)
 {
+  if (value == min_jint) { subl(reg, value); return; }
   if (value <  0) { incrementl(reg, -value); return; }
   if (value == 0) {                        ; return; }
-  if (value == 1) { decl(reg)              ; return; }
+  if (value == 1 && UseIncDec) { decl(reg) ; return; }
   /* else */      { subl(reg, value)       ; return; }
 }
 
 void MacroAssembler::incrementq(Register reg, int value)
 {
+  if (value == min_jint) { addq(reg, value); return; }
   if (value <  0) { decrementq(reg, -value); return; }
   if (value == 0) {                        ; return; }
-  if (value == 1) { incq(reg)              ; return; }
+  if (value == 1 && UseIncDec) { incq(reg) ; return; }
   /* else */      { addq(reg, value)       ; return; }
 }
 
 void MacroAssembler::decrementq(Register reg, int value)
 {
+  if (value == min_jint) { subq(reg, value); return; }
   if (value <  0) { incrementq(reg, -value); return; }
   if (value == 0) {                        ; return; }
-  if (value == 1) { decq(reg)              ; return; }
+  if (value == 1 && UseIncDec) { decq(reg) ; return; }
   /* else */      { subq(reg, value)       ; return; }
 }
 
+void MacroAssembler::incrementl(Address dst, int value)
+{
+  if (value == min_jint) { addl(dst, value); return; }
+  if (value <  0) { decrementl(dst, -value); return; }
+  if (value == 0) {                        ; return; }
+  if (value == 1 && UseIncDec) { incl(dst) ; return; }
+  /* else */      { addl(dst, value)       ; return; }
+}
+
+void MacroAssembler::decrementl(Address dst, int value)
+{
+  if (value == min_jint) { subl(dst, value); return; }
+  if (value <  0) { incrementl(dst, -value); return; }
+  if (value == 0) {                        ; return; }
+  if (value == 1 && UseIncDec) { decl(dst) ; return; }
+  /* else */      { subl(dst, value)       ; return; }
+}
+
+void MacroAssembler::incrementq(Address dst, int value)
+{
+  if (value == min_jint) { addq(dst, value); return; }
+  if (value <  0) { decrementq(dst, -value); return; }
+  if (value == 0) {                        ; return; }
+  if (value == 1 && UseIncDec) { incq(dst) ; return; }
+  /* else */      { addq(dst, value)       ; return; }
+}
+
+void MacroAssembler::decrementq(Address dst, int value)
+{
+  if (value == min_jint) { subq(dst, value); return; }
+  if (value <  0) { incrementq(dst, -value); return; }
+  if (value == 0) {                        ; return; }
+  if (value == 1 && UseIncDec) { decq(dst) ; return; }
+  /* else */      { subq(dst, value)       ; return; }
+}
+
 void MacroAssembler::align(int modulus)
 {
   if (offset() % modulus != 0) {
@@ -6729,7 +6845,7 @@
   int offset = (1 << shift_value) - 1 ;
 
   if (offset == 1) {
-    incl(reg);
+    incrementl(reg);
   } else {
     addl(reg, offset);
   }
@@ -7093,7 +7209,8 @@
 
 int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
                                          bool swap_reg_contains_mark,
-                                         Label& done, Label* slow_case) {
+                                         Label& done, Label* slow_case,
+                                         BiasedLockingCounters* counters) {
   assert(UseBiasedLocking, "why call this otherwise?");
   assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
   assert(tmp_reg != noreg, "tmp_reg must be supplied");
@@ -7103,6 +7220,9 @@
   Address klass_addr     (obj_reg, oopDesc::klass_offset_in_bytes());
   Address saved_mark_addr(lock_reg);
 
+  if (PrintBiasedLockingStatistics && counters == NULL)
+    counters = BiasedLocking::counters();
+
   // Biased locking
   // See whether the lock is currently biased toward our thread and
   // whether the epoch is still valid
@@ -7126,8 +7246,8 @@
   orq(tmp_reg, r15_thread);
   xorq(tmp_reg, swap_reg);
   andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((address) BiasedLocking::biased_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((address) counters->biased_lock_entry_count_addr(), relocInfo::none));
   }
   jcc(Assembler::equal, done);
 
@@ -7176,8 +7296,8 @@
   // another thread succeeded in biasing it toward itself and we
   // need to revoke that bias. The revocation will occur in the
   // interpreter runtime in the slow case.
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((address) counters->anonymously_biased_lock_entry_count_addr(), relocInfo::none));
   }
   if (slow_case != NULL) {
     jcc(Assembler::notZero, *slow_case);
@@ -7204,8 +7324,8 @@
   // If the biasing toward our thread failed, then another thread
   // succeeded in biasing it toward itself and we need to revoke that
   // bias. The revocation will occur in the runtime in the slow case.
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((address) BiasedLocking::rebiased_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((address) counters->rebiased_lock_entry_count_addr(), relocInfo::none));
   }
   if (slow_case != NULL) {
     jcc(Assembler::notZero, *slow_case);
@@ -7233,8 +7353,8 @@
   // Fall through to the normal CAS-based lock, because no matter what
   // the result of the above CAS, some thread must have succeeded in
   // removing the bias bit from the object's header.
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((address) BiasedLocking::revoked_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((address) counters->revoked_lock_entry_count_addr(), relocInfo::none));
   }
 
   bind(cas_label);
@@ -7287,12 +7407,16 @@
   Condition negated_cond = negate_condition(cond);
   Label L;
   jcc(negated_cond, L);
+  atomic_incl(counter_addr);
+  bind(L);
+}
+
+void MacroAssembler::atomic_incl(Address counter_addr) {
   pushfq();
   if (os::is_MP())
     lock();
-  incl(counter_addr);
+  incrementl(counter_addr);
   popfq();
-  bind(L);
 }
 
 SkipIfEqual::SkipIfEqual(
--- a/hotspot/src/cpu/amd64/vm/assembler_amd64.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/assembler_amd64.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)assembler_amd64.hpp	1.39 07/05/05 17:04:03 JVM"
+#pragma ident "@(#)assembler_amd64.hpp	1.40 07/05/17 15:39:42 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -25,6 +25,8 @@
  *  
  */
 
+class BiasedLockingCounters;
+
 // Contains all the definitions needed for amd64 assembly code generation.
 
 // Calling convention
@@ -520,12 +522,24 @@
   void ucomiss(FloatRegister dst, FloatRegister src);
   void ucomisd(FloatRegister dst, FloatRegister src);
 
-  void decb(Register dst);
+ protected:
+  // Don't use next inc() and dec() methods directly. INC & DEC instructions 
+  // could cause a partial flag stall since they don't set CF flag.
+  // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
+  // which call inc() & dec() or add() & sub() in accordance with 
+  // the product flag UseIncDec value.
+
   void decl(Register dst);
   void decl(Address dst);
   void decq(Register dst);
   void decq(Address dst);
 
+  void incl(Register dst);
+  void incl(Address dst);
+  void incq(Register dst);
+  void incq(Address dst);
+
+ public:
   void idivl(Register src);
   void idivq(Register src);
   void cdql();
@@ -536,11 +550,6 @@
   void imulq(Register dst, Register src);
   void imulq(Register dst, Register src, int value);
 
-  void incl(Register dst);
-  void incl(Address dst);
-  void incq(Register dst);
-  void incq(Address dst);
-
   void leal(Register dst, Address src);
   void leaq(Register dst, Address src);
 
@@ -887,10 +896,15 @@
 
   // Support for inc/dec with optimal instruction selection depending
   // on value
-  void incrementl(Register reg, int value);
-  void decrementl(Register reg, int value);
-  void incrementq(Register reg, int value);
-  void decrementq(Register reg, int value);
+  void incrementl(Register reg, int value = 1);
+  void decrementl(Register reg, int value = 1);
+  void incrementq(Register reg, int value = 1);
+  void decrementq(Register reg, int value = 1);
+
+  void incrementl(Address dst, int value = 1);
+  void decrementl(Address dst, int value = 1);
+  void incrementq(Address dst, int value = 1);
+  void decrementq(Address dst, int value = 1);
 
   // Alignment
   void align(int modulus);
@@ -1476,7 +1490,6 @@
   void cmpq(Register dst, Register src)     { Assembler::cmpq(dst, src); }
   void ucomiss(FloatRegister dst, FloatRegister src) { Assembler::ucomiss(dst, src); }
   void ucomisd(FloatRegister dst, FloatRegister src) { Assembler::ucomisd(dst, src); }
-  void decb(Register dst)                   { Assembler::decb(dst); }
   void decl(Register dst)                   { Assembler::decl(dst); }
   void decq(Register dst)                   { Assembler::decq(dst); }
   void idivl(Register src)                  { Assembler::idivl(src); }
@@ -1772,7 +1785,8 @@
   // the calling code has already passed any potential faults.
   int biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
                            bool swap_reg_contains_mark,
-                           Label& done, Label* slow_case = NULL);
+                           Label& done, Label* slow_case = NULL,
+                           BiasedLockingCounters* counters = NULL);
   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 
   Condition negate_condition(Condition cond);
@@ -1780,6 +1794,8 @@
   // Helper functions for statistics gathering.
   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
   void cond_incl(Condition cond, Address counter_addr);
+  // Unconditional atomic increment.
+  void atomic_incl(Address counter_addr);
 };
 
 /**
--- a/hotspot/src/cpu/amd64/vm/disassembler_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/disassembler_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)disassembler_amd64.cpp	1.13 07/05/05 17:04:04 JVM"
+#pragma ident "@(#)disassembler_amd64.cpp	1.14 07/05/17 15:39:44 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -158,7 +158,7 @@
 {
   st = st ? st : tty;
 
-  const int show_bytes = true; // for disassembler debugging
+  const int show_bytes = false; // for disassembler debugging
 
   if (!load_library()) {
     st->print_cr("Could not load disassembler");
@@ -167,9 +167,14 @@
 
   amd64_env env(NULL, st);
   unsigned char* p = (unsigned char*) begin;
+  CodeBlob* cb = CodeCache::find_blob_unsafe(begin);
   while (p < (unsigned char*) end) {
+    if (cb != NULL) {
+      cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin()));
+    }
+
     unsigned char* p0 = p;
-    st->print(INTPTR_FORMAT ": ", p);
+    st->print("  " INTPTR_FORMAT ": ", p);
     p = decode_instruction(p, &env);
     if (show_bytes) {
       st->print("\t\t\t");
--- a/hotspot/src/cpu/amd64/vm/icache_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/icache_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)icache_amd64.cpp	1.12 07/05/05 17:04:06 JVM"
+#pragma ident "@(#)icache_amd64.cpp	1.13 07/05/17 15:39:46 JVM"
 #endif
 /*
  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -53,7 +53,7 @@
   __ bind(flush_line);
   __ clflush(Address(addr));
   __ addq(addr, ICache::line_size);
-  __ decl(lines);
+  __ decrementl(lines);
   __ jcc(Assembler::notZero, flush_line);
 
   __ mfence();
--- a/hotspot/src/cpu/amd64/vm/interp_masm_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/interp_masm_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)interp_masm_amd64.cpp	1.41 07/05/05 17:04:05 JVM"
+#pragma ident "@(#)interp_masm_amd64.cpp	1.42 07/05/17 15:39:48 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -817,11 +817,7 @@
 
   bind(no_unlock);
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi/jvmti support
-#else // !JVMPI_SUPPORT
   // jvmti support
-#endif // JVMPI_SUPPORT
   if (notify_jvmdi) {
     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
   } else {
@@ -1573,31 +1569,6 @@
                                     InterpreterRuntime::post_method_entry));
     bind(L);
   }
-#ifdef JVMPI_SUPPORT
-  Label E;
-  Label S;
-  cmpl(Address((address) 
-        jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY),
-        relocInfo::none),
-      (int) JVMPI_EVENT_ENABLED);
-  jcc(Assembler::equal, S);
-  cmpl(Address((address) 
-        jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2),
-        relocInfo::none),
-      (int) JVMPI_EVENT_ENABLED);
-  jcc(Assembler::notEqual, E);
-  bind(S);
-  // notify method entry
-  get_method(c_rarg1);
-  // get receiver
-  xorl(c_rarg2, c_rarg2);             // receiver = NULL for a static method
-  movl(rax, Address(c_rarg1, methodOopDesc::access_flags_offset()));
-  testl(rax, JVM_ACC_STATIC); // check if method is static
-  cmovq(Assembler::zero, c_rarg2, Address(r14));    // otherwise get receiver
-  call_VM(noreg, CAST_FROM_FN_PTR(address, 
-                   SharedRuntime::jvmpi_method_entry), c_rarg1, c_rarg2);
-  bind(E);
-#endif // JVMPI_SUPPORT
 
   {
     SkipIfEqual skip(this, &DTraceMethodProbes, false);
@@ -1629,23 +1600,6 @@
     pop(state);     
   }
 
-#ifdef JVMPI_SUPPORT
-  Label E;
-  cmpl(Address((address) 
-        jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT),
-        relocInfo::none),
-      (int) JVMPI_EVENT_ENABLED);
-  jcc(Assembler::notEqual, E);
-
-  // notify method exit
-  push(state);
-  get_method(c_rarg1);
-  call_VM(noreg, 
-          CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), c_rarg1);
-  pop(state);
-  bind(E);
-#endif // JVMPI_SUPPORT
-
   {
     SkipIfEqual skip(this, &DTraceMethodProbes, false);
     push(state);
--- a/hotspot/src/cpu/amd64/vm/interp_masm_amd64.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/interp_masm_amd64.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)interp_masm_amd64.hpp	1.20 07/05/05 17:04:07 JVM"
+#pragma ident "@(#)interp_masm_amd64.hpp	1.21 07/05/17 15:39:50 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -243,11 +243,7 @@
 
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
 
-#ifdef JVMPI_SUPPORT
-  // support for jvmpi/jvmti/dtrace
-#else // !JVMPI_SUPPORT
   // support for jvmti/dtrace
-#endif // JVMPI_SUPPORT
   void notify_method_entry();
   void notify_method_exit(TosState state, NotifyMethodExitMode mode);
 };
--- a/hotspot/src/cpu/amd64/vm/interpreter_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/interpreter_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)interpreter_amd64.cpp	1.61 07/05/05 17:04:07 JVM"
+#pragma ident "@(#)interpreter_amd64.cpp	1.62 07/05/17 15:39:52 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -431,7 +431,7 @@
                                  InvocationCounter::counter_offset());
 
   if (ProfileInterpreter) { // %%% Merge this into methodDataOop
-    __ incl(Address(rbx,
+    __ incrementl(Address(rbx,
                     methodOopDesc::interpreter_invocation_counter_offset()));
   }
   // Update standard invocation counters
@@ -1084,11 +1084,7 @@
   }
 #endif
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi support
-#else // !JVMPI_SUPPORT
   // jvmti support
-#endif // JVMPI_SUPPORT
   __ notify_method_entry();
 
   // work registers
@@ -1392,11 +1388,7 @@
     __ bind(L);
   }
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi support
-#else // !JVMPI_SUPPORT
   // jvmti support
-#endif // JVMPI_SUPPORT
   // Note: This must happen _after_ handling/throwing any exceptions since
   //       the exception handler code notifies the runtime of method exits
   //       too. If this happens before, method entry/exit notifications are
@@ -1462,7 +1454,7 @@
   __ subl(rdx, rcx); // rdx = no. of additional locals
 
   // YYY
-//   __ incl(rdx);
+//   __ incrementl(rdx);
 //   __ andl(rdx, -2);
 
   // see if we've got enough room on the stack for locals plus overhead.
@@ -1485,7 +1477,7 @@
     __ bind(loop);
     if (TaggedStackInterpreter) __ pushq((int) NULL);  // push tag
     __ pushq((int) NULL); // initialize local variables
-    __ decl(rdx); // until everything initialized
+    __ decrementl(rdx); // until everything initialized
     __ jcc(Assembler::greater, loop);
     __ bind(exit);
   }
@@ -1582,11 +1574,7 @@
   }
 #endif
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi support
-#else // !JVMPI_SUPPORT
   // jvmti support
-#endif // JVMPI_SUPPORT
   __ notify_method_entry();
  
   __ dispatch_next(vtos);
@@ -2082,10 +2070,8 @@
 
 //-----------------------------------------------------------------------------
 
-#ifdef JVMPI_SUPPORT
-// when JVM/PI is retired this method can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+// Non-product code
+#ifndef PRODUCT
 address AbstractInterpreterGenerator::generate_trace_code(TosState state)
 {
   address entry = __ pc();
@@ -2111,19 +2097,16 @@
 
   return entry;
 }
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 
-// Non-product code
-#ifndef PRODUCT
 void AbstractInterpreterGenerator::count_bytecode()
 {
-  __ incl(Address((address) &BytecodeCounter::_counter_value,
+  __ incrementl(Address((address) &BytecodeCounter::_counter_value,
                   relocInfo::none));
 }
 
 void AbstractInterpreterGenerator::histogram_bytecode(Template* t)
 {
-  __ incl(Address((address) &BytecodeHistogram::_counters[t->bytecode()],
+  __ incrementl(Address((address) &BytecodeHistogram::_counters[t->bytecode()],
                   relocInfo::none));
 }
 
@@ -2139,15 +2122,10 @@
                   relocInfo::none),
           rbx);
   __ movq(rscratch1, (int64_t) BytecodePairHistogram::_counters);
-  __ incl(Address(rscratch1, rbx, Address::times_4));
+  __ incrementl(Address(rscratch1, rbx, Address::times_4));
 }
-#endif // !PRODUCT
 
 
-#ifdef JVMPI_SUPPORT
-// when JVM/PI is retired this method can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 void AbstractInterpreterGenerator::trace_bytecode(Template* t)
 {
   // Call a little run-time stub to avoid blow-up for each bytecode.
@@ -2160,11 +2138,8 @@
   __ call(entry, relocInfo::none);
   __ movq(rsp, r12); // restore sp
 }
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 
 
-// Non-product code
-#ifndef PRODUCT
 void AbstractInterpreterGenerator::stop_interpreter_at()
 {
   Label L;
--- a/hotspot/src/cpu/amd64/vm/sharedRuntime_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/sharedRuntime_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)sharedRuntime_amd64.cpp	1.38 07/05/05 17:04:09 JVM"
+#pragma ident "@(#)sharedRuntime_amd64.cpp	1.39 07/05/17 15:39:55 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1250,13 +1250,6 @@
   int lock_slot_offset = 0;
   bool is_static = false;
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    oop_temp_slot_offset = stack_slots;
-    stack_slots += VMRegImpl::slots_per_word;
-  }
-#endif // JVMPI_SUPPORT
-
   if (method->is_static()) {
     klass_slot_offset = stack_slots;
     stack_slots += VMRegImpl::slots_per_word;
@@ -1286,10 +1279,6 @@
   //      |---------------------| <- lock_slot_offset
   //      | klass (if static)   |
   //      |---------------------| <- klass_slot_offset
-#ifdef JVMPI_SUPPORT
-  //      | oop_temp (jvmpi)    |
-  //      |---------------------| <- oop_temp_slot_offset
-#endif // JVMPI_SUPPORT
   //      | oopHandle area      |
   //      |---------------------| <- oop_handle_offset (6 java arg registers)
   //      | outbound memory     |
@@ -1380,11 +1369,7 @@
 
   //
   // We immediately shuffle the arguments so that any vm call we have to
-#ifdef JVMPI_SUPPORT
-  // make from here on out (sync slow path, jvmpi, etc.) we will have
-#else // !JVMPI_SUPPORT
   // make from here on out (sync slow path, jvmti, etc.) we will have
-#endif // JVMPI_SUPPORT
   // captured the oops from our caller and have a valid oopMap for
   // them.
 
@@ -1499,13 +1484,6 @@
     c_arg--;
   }
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    __ movq(Address(rsp, oop_temp_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
-    map->set_oop(VMRegImpl::stack2reg(oop_temp_slot_offset));
-  }
-#endif // JVMPI_SUPPORT
-
   // Change state to native (we save the return address in the thread, since it might not
   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
   // points into the right code segment. It does not have to be the correct return pc.
@@ -1520,35 +1498,6 @@
   // We have all of the arguments setup at this point. We must not touch any register
   // argument registers at this point (what if we save/restore them there are no oop?
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi support
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY) ||
-      jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY2)) {
-
-    // protect the args we've loaded
-    save_args(masm, total_c_args, c_arg, out_regs);
-
-    // No receiver if static
-    if (method()->is_static()) {
-      __ movq(c_rarg2, NULL_WORD);
-    } else {
-      __ movq(c_rarg2, c_rarg1);
-    }
-    __ movq(c_rarg0, r15_thread);
-    __ movq(c_rarg1, JNIHandles::make_local(method()));
-    
-
-    // Not a leaf but we have last_Java_frame setup as we want
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_entry), 3);
-
-    restore_args(masm, total_c_args, c_arg, out_regs);
-    // Any exception pending?
-    __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
-    __ jcc(Assembler::notEqual, exception_pending);
-
-  }
-#endif // JVMPI_SUPPORT
-
   {
     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
     // protect the args we've loaded
@@ -1791,34 +1740,6 @@
 
   }
 
-#ifdef JVMPI_SUPPORT
-  // Tell jvmpi about this method exit
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    save_native_result(masm, ret_type, stack_slots);
-    // Save any pending exception and clear it from the thread
-    __ movq(rax, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
-    __ movq(Address(rsp, oop_temp_slot_offset * VMRegImpl::stack_slot_size), rax);
-    __ movq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
-
-    __ movq(c_rarg1, JNIHandles::make_local(method()));
-    __ movq(c_rarg0, r15_thread);
-    __ movq(r12, rsp); // remember sp
-    __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
-    __ andq(rsp, -16); // align stack as required by ABI
-    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), relocInfo::runtime_call_type);
-    __ movq(rsp, r12); // restore sp
-
-    Label L;
-    // If we had a pending exception before jvmpi call it takes precedence
-    __ movq(rax, Address(rsp, oop_temp_slot_offset * VMRegImpl::stack_slot_size));
-    __ testq(rax, rax);
-    __ jcc(Assembler::equal, L);
-    __ movq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rax);
-    __ bind(L);
-    restore_native_result(masm, ret_type, stack_slots);
-  }
-#endif // JVMPI_SUPPORT
-
   {
     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
     save_native_result(masm, ret_type, stack_slots);
@@ -2158,7 +2079,7 @@
   __ movq(sender_sp, rsp);              // Pass sender_sp to next frame
   __ addq(rsi, wordSize);               // Bump array pointer (sizes)
   __ addq(rcx, wordSize);               // Bump array pointer (pcs)
-  __ decl(rdx);                         // Decrement counter
+  __ decrementl(rdx);                   // Decrement counter
   __ jcc(Assembler::notZero, loop);
   __ pushq(Address(rcx));               // Save final return address
 
@@ -2334,7 +2255,7 @@
   __ movq(sender_sp, rsp);    // Pass sender_sp to next frame
   __ addq(rsi, wordSize);     // Bump array pointer (sizes)
   __ addq(rcx, wordSize);     // Bump array pointer (pcs)
-  __ decl(rdx);               // Decrement counter
+  __ decrementl(rdx);         // Decrement counter
   __ jcc(Assembler::notZero, loop);
   __ pushq(Address(rcx));     // Save final return address
 
--- a/hotspot/src/cpu/amd64/vm/stubGenerator_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/stubGenerator_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)stubGenerator_amd64.cpp	1.40 07/05/05 17:04:08 JVM"
+#pragma ident "@(#)stubGenerator_amd64.cpp	1.41 07/05/17 15:40:00 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -34,6 +34,13 @@
 
 #define __ _masm->
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 const int MXCSR_MASK = 0xFFC0;  // Mask out any pending exceptions
 
 // Stub Code definitions
@@ -55,6 +62,19 @@
 
 class StubGenerator: public StubCodeGenerator {
  private:
+
+#ifdef PRODUCT
+#define inc_counter_np(counter) (0)
+#else
+  void inc_counter_np_(int& counter) {
+    Address counter_addr((address) &counter, relocInfo::none);
+    __ incrementl(counter_addr);
+  }
+#define inc_counter_np(counter) \
+  BLOCK_COMMENT("inc_counter " #counter); \
+  inc_counter_np_(counter);
+#endif
+
   // Call stubs are used to call Java from C
   //
   // Linux Arguments:
@@ -248,6 +268,7 @@
 #endif
 
     // pass parameters if any
+    BLOCK_COMMENT("pass parameters if any");
     Label parameters_done;
     __ movl(c_rarg3, parameter_size);
     __ testl(c_rarg3, c_rarg3);
@@ -256,7 +277,7 @@
     Label loop;
     __ movq(c_rarg2, parameters);     // parameter pointer
     __ movl(c_rarg1, c_rarg3);        // parameter counter is in c_rarg1
-    __ bind(loop);
+    __ BIND(loop);
     if (TaggedStackInterpreter) {
       __ movq(rax, Address(c_rarg2)); // get tag
       __ addq(c_rarg2, wordSize);     // advance to next tag
@@ -264,16 +285,19 @@
     }
     __ movq(rax, Address(c_rarg2));   // get parameter
     __ addq(c_rarg2, wordSize);       // advance to next parameter
-    __ decl(c_rarg1);                 // decrement counter
+    __ decrementl(c_rarg1);           // decrement counter
     __ pushq(rax);                    // pass parameter
     __ jcc(Assembler::notZero, loop);
 
     // call Java function
-    __ bind(parameters_done);
+    __ BIND(parameters_done);
     __ movq(rbx, method);             // get methodOop
     __ movq(c_rarg1, entry_point);    // get entry_point
     __ movq(r13, rsp);                // set sender sp
+    BLOCK_COMMENT("call Java function");
     __ call(c_rarg1, relocInfo::none);
+
+    BLOCK_COMMENT("call_stub_return_address:");
     return_address = __ pc();
 
     // store result depending on type (everything that is not
@@ -293,7 +317,7 @@
     // handle T_INT case
     __ movl(Address(c_rarg0), rax);
 
-    __ bind(exit);
+    __ BIND(exit);
 
     // pop parameters
     __ leaq(rsp, rsp_after_call);
@@ -336,15 +360,15 @@
     __ ret(0);
 
     // handle return types different from T_INT
-    __ bind(is_long);
+    __ BIND(is_long);
     __ movq(Address(c_rarg0), rax);
     __ jmp(exit);
 
-    __ bind(is_float);
+    __ BIND(is_float);
     __ movss(Address(c_rarg0), xmm0);
     __ jmp(exit);
 
-    __ bind(is_double);
+    __ BIND(is_double);
     __ movsd(Address(c_rarg0), xmm0);
     __ jmp(exit);
 
@@ -439,6 +463,7 @@
 
     // compute exception handler into rbx
     __ movq(c_rarg0, Address(rsp)); 
+    BLOCK_COMMENT("call exception_handler_for_return_address");
     __ call_VM_leaf(CAST_FROM_FN_PTR(address, 
                          SharedRuntime::exception_handler_for_return_address),
                     c_rarg0);
@@ -870,6 +895,7 @@
     Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
 
     __ subq(rsp, frame::arg_reg_save_area_bytes);
+    BLOCK_COMMENT("call handle_unsafe_access");
     __ call(CAST_FROM_FN_PTR(address, handle_unsafe_access),
             relocInfo::runtime_call_type);
     __ addq(rsp, frame::arg_reg_save_area_bytes);
@@ -902,7 +928,7 @@
     Label exit, error;
 
     __ pushfq();
-    __ incl(Address((address) StubRoutines::verify_oop_count_addr(),
+    __ incrementl(Address((address) StubRoutines::verify_oop_count_addr(),
                     relocInfo::none));
 
     // save c_rarg2 and c_rarg3
@@ -977,6 +1003,7 @@
     __ movq(r12, rsp);                           // remember rsp
     __ subq(rsp, frame::arg_reg_save_area_bytes);// windows
     __ andq(rsp, -16);                           // align stack as required by ABI
+    BLOCK_COMMENT("call MacroAssembler::debug");
     __ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), 
             relocInfo::runtime_call_type);
     __ movq(rsp, r12);                           // restore rsp
@@ -998,6 +1025,8 @@
   static address long_copy_entry;
   static address oop_copy_entry;
 
+  static address checkcast_copy_entry;
+
   //
   // Verify that a register contains clean 32-bits positive value
   // (high 32-bits are 0) so it could be used in 64-bits shifts.
@@ -1009,6 +1038,7 @@
   void assert_clean_int(Register Rint, Register Rtmp) {
 #ifdef ASSERT
     Label L;
+    assert_different_registers(Rtmp, Rint);
     __ movslq(Rtmp, Rint);
     __ cmpq(Rtmp, Rint);
     __ jccb(Assembler::equal, L);
@@ -1028,6 +1058,13 @@
   //     rax   - &from[element count - 1]
   //
   void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
+    assert(no_overlap_target != NULL, "must be generated");
+    array_overlap_test(no_overlap_target, NULL, sf);
+  }
+  void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
+    array_overlap_test(NULL, &L_no_overlap, sf);
+  }
+  void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
     const Register from     = c_rarg0;
     const Register to       = c_rarg1;
     const Register count    = c_rarg2;
@@ -1035,9 +1072,15 @@
 
     __ cmpq(to, from);
     __ leaq(end_from, Address(from, count, sf, 0));
-    __ jcc(Assembler::belowEqual, no_overlap_target);
+    if (NOLp == NULL)
+      __ jcc(Assembler::belowEqual, no_overlap_target);
+    else
+      __ jcc(Assembler::belowEqual, (*NOLp));
     __ cmpq(to, end_from);
-    __ jcc(Assembler::aboveEqual, no_overlap_target);
+    if (NOLp == NULL)
+      __ jcc(Assembler::aboveEqual, no_overlap_target);
+    else
+      __ jcc(Assembler::aboveEqual, (*NOLp));
   }
 
   // Shuffle first three arg regs on Windows into Linux/Solaris locations.
@@ -1046,23 +1089,29 @@
   //    rdi - rcx
   //    rsi - rdx
   //    rdx - r8
+  //    rcx - r9
   //
   // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
   // are non-volatile.  r9 and r10 should not be used by the caller.
   //
-  void setup_arg_regs() {
+  void setup_arg_regs(int nargs = 3) {
     const Register saved_rdi = r9;
     const Register saved_rsi = r10;
+    assert(nargs == 3 || nargs == 4, "else fix");
 #ifdef _WIN64
-    assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8,
+    assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
            "unexpected argument registers"); 
+    if (nargs >= 4)
+      __ movq(rax, r9);  // r9 is also saved_rdi
     __ movq(saved_rdi, rdi);
     __ movq(saved_rsi, rsi);
     __ movq(rdi, rcx); // c_rarg0
     __ movq(rsi, rdx); // c_rarg1
     __ movq(rdx, r8);  // c_rarg2
+    if (nargs >= 4)
+      __ movq(rcx, rax); // c_rarg3 (via rax)
 #else
-    assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx,
+    assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
            "unexpected argument registers"); 
 #endif
   }
@@ -1084,12 +1133,14 @@
   //     scratch  - scratch register
   //
   //  The input registers are overwritten.
+  //  The ending address is inclusive.
   //
   void array_store_check(Register start, Register end, Register scratch) {
     BarrierSet* bs = Universe::heap()->barrier_set();
     assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
     CardTableModRefBS* ct = (CardTableModRefBS*)bs;
     assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+    assert_different_registers(start, end, scratch);
 
     Label L_loop;
 
@@ -1100,9 +1151,9 @@
     const Register count = end; // 'end' register contains bytes count now 
     __ movq(scratch, (int64_t) ct->byte_map_base);
     __ addq(start, scratch);
-  __ bind(L_loop);
+  __ BIND(L_loop);
     __ movb(Address(start, count, Address::times_1), 0);
-    __ decq(count);
+    __ decrementq(count);
     __ jcc(Assembler::greaterEqual, L_loop);
   }
 
@@ -1122,7 +1173,7 @@
     DEBUG_ONLY(__ stop("enter at entry label, not here"));
     Label L_loop;
     __ align(16);
-  __ bind(L_loop);
+  __ BIND(L_loop);
     __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
     __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
     __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
@@ -1131,7 +1182,7 @@
     __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
     __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
     __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
-  __ bind(L_copy_32_bytes);
+  __ BIND(L_copy_32_bytes);
     __ addq(qword_count, 4);
     __ jcc(Assembler::lessEqual, L_loop);
     __ subq(qword_count, 4);
@@ -1155,7 +1206,7 @@
     DEBUG_ONLY(__ stop("enter at entry label, not here"));
     Label L_loop;
     __ align(16);
-  __ bind(L_loop);
+  __ BIND(L_loop);
     __ movq(to, Address(from, qword_count, Address::times_8, 24));
     __ movq(Address(dest, qword_count, Address::times_8, 24), to);
     __ movq(to, Address(from, qword_count, Address::times_8, 16));
@@ -1164,7 +1215,7 @@
     __ movq(Address(dest, qword_count, Address::times_8,  8), to);
     __ movq(to, Address(from, qword_count, Address::times_8,  0));
     __ movq(Address(dest, qword_count, Address::times_8,  0), to);
-  __ bind(L_copy_32_bytes);
+  __ BIND(L_copy_32_bytes);
     __ subq(qword_count, 4);
     __ jcc(Assembler::greaterEqual, L_loop);
     __ addq(qword_count, 4);
@@ -1209,13 +1260,15 @@
     // to the last unit copied:  end_to[0] := end_from[0]
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
     disjoint_byte_copy_entry = __ pc();
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'count' are now valid
     __ movq(byte_count, count);
     __ shrq(count, 3); // count => qword_count
@@ -1227,14 +1280,14 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
-    __ incq(qword_count);
+    __ incrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
     // Check for and copy trailing dword
-  __ bind(L_copy_4_bytes);
+  __ BIND(L_copy_4_bytes);
     __ testq(byte_count, 4);
     __ jccb(Assembler::zero, L_copy_2_bytes);
     __ movl(rax, Address(end_from, 8));
@@ -1244,7 +1297,7 @@
     __ addq(end_to, 4);
 
     // Check for and copy trailing word
-  __ bind(L_copy_2_bytes);
+  __ BIND(L_copy_2_bytes);
     __ testq(byte_count, 2);
     __ jccb(Assembler::zero, L_copy_byte);
     __ movw(rax, Address(end_from, 8));
@@ -1254,13 +1307,14 @@
     __ addq(end_to, 2);
 
     // Check for and copy trailing byte
-  __ bind(L_copy_byte);
+  __ BIND(L_copy_byte);
     __ testq(byte_count, 1);
     __ jccb(Assembler::zero, L_exit);
     __ movb(rax, Address(end_from, 8));
     __ movb(Address(end_to, 8), rax);
 
-  __ bind(L_exit);
+  __ BIND(L_exit);
+    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1301,14 +1355,16 @@
     const Register qword_count = count;
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
     byte_copy_entry = __ pc();
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     array_overlap_test(disjoint_byte_copy_entry, Address::times_1);
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'count' are now valid
     __ movq(byte_count, count);
     __ shrq(count, 3);   // count => qword_count
@@ -1320,17 +1376,17 @@
     __ jcc(Assembler::zero, L_copy_2_bytes);
     __ movb(rax, Address(from, byte_count, Address::times_1, -1));
     __ movb(Address(to, byte_count, Address::times_1, -1), rax);
-    __ decq(byte_count); // Adjust for possible trailing word 
+    __ decrementq(byte_count); // Adjust for possible trailing word 
 
     // Check for and copy trailing word
-  __ bind(L_copy_2_bytes);
+  __ BIND(L_copy_2_bytes);
     __ testq(byte_count, 2);
     __ jcc(Assembler::zero, L_copy_4_bytes);
     __ movw(rax, Address(from, byte_count, Address::times_1, -2));
     __ movw(Address(to, byte_count, Address::times_1, -2), rax);
 
     // Check for and copy trailing dword
-  __ bind(L_copy_4_bytes);
+  __ BIND(L_copy_4_bytes);
     __ testq(byte_count, 4);
     __ jcc(Assembler::zero, L_copy_32_bytes);
     __ movl(rax, Address(from, qword_count, Address::times_8));
@@ -1338,12 +1394,13 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
-    __ decq(qword_count);
+    __ decrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
+    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1352,6 +1409,7 @@
     // Copy in 32-bytes chunks
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
+    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1396,13 +1454,15 @@
     // to the last unit copied:  end_to[0] := end_from[0]
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
     disjoint_short_copy_entry = __ pc();
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'count' are now valid
     __ movq(word_count, count);
     __ shrq(count, 2); // count => qword_count
@@ -1414,17 +1474,17 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
-    __ incq(qword_count);
+    __ incrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
     // Original 'dest' is trashed, so we can't use it as a
     // base register for a possible trailing word copy
 
     // Check for and copy trailing dword
-  __ bind(L_copy_4_bytes);
+  __ BIND(L_copy_4_bytes);
     __ testq(word_count, 2);
     __ jccb(Assembler::zero, L_copy_2_bytes);
     __ movl(rax, Address(end_from, 8));
@@ -1434,13 +1494,14 @@
     __ addq(end_to, 4);
 
     // Check for and copy trailing word
-  __ bind(L_copy_2_bytes);
+  __ BIND(L_copy_2_bytes);
     __ testq(word_count, 1);
     __ jccb(Assembler::zero, L_exit);
     __ movw(rax, Address(end_from, 8));
     __ movw(Address(end_to, 8), rax);
 
-  __ bind(L_exit);
+  __ BIND(L_exit);
+    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1481,14 +1542,16 @@
     const Register qword_count = count;
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
     short_copy_entry = __ pc();
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     array_overlap_test(disjoint_short_copy_entry, Address::times_2);
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'count' are now valid
     __ movq(word_count, count);
     __ shrq(count, 2); // count => qword_count
@@ -1502,7 +1565,7 @@
     __ movw(Address(to, word_count, Address::times_2, -2), rax);
 
     // Check for and copy trailing dword
-  __ bind(L_copy_4_bytes);
+  __ BIND(L_copy_4_bytes);
     __ testq(word_count, 2);
     __ jcc(Assembler::zero, L_copy_32_bytes);
     __ movl(rax, Address(from, qword_count, Address::times_8));
@@ -1510,12 +1573,13 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
-    __ decq(qword_count);
+    __ decrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
+    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1524,6 +1588,7 @@
     // Copy in 32-bytes chunks
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
+    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1567,13 +1632,15 @@
     // to the last unit copied:  end_to[0] := end_from[0]
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
     disjoint_int_copy_entry = __ pc();
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'count' are now valid
     __ movq(dword_count, count);
     __ shrq(count, 1); // count => qword_count
@@ -1585,20 +1652,21 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
-    __ incq(qword_count);
+    __ incrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
     // Check for and copy trailing dword
-  __ bind(L_copy_4_bytes);
+  __ BIND(L_copy_4_bytes);
     __ testq(dword_count, 1); // Only byte test since the value is 0 or 1
     __ jccb(Assembler::zero, L_exit);
     __ movl(rax, Address(end_from, 8));
     __ movl(Address(end_to, 8), rax);
 
-  __ bind(L_exit);
+  __ BIND(L_exit);
+    inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1638,14 +1706,16 @@
     const Register qword_count = count;
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
     int_copy_entry = __ pc();
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     array_overlap_test(disjoint_int_copy_entry, Address::times_4);
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'count' are now valid
     __ movq(dword_count, count);
     __ shrq(count, 1); // count => qword_count
@@ -1660,12 +1730,13 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
-    __ decq(qword_count);
+    __ decrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
+    inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1674,6 +1745,7 @@
     // Copy in 32-bytes chunks
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
+    inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1714,17 +1786,19 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
     // Save no-overlap entry point for generate_conjoint_long_oop_copy()
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
     if (is_oop) {
       disjoint_oop_copy_entry  = __ pc();
     } else {
       disjoint_long_copy_entry = __ pc();
     }
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(qword_count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'qword_count' are now valid
 
     // Copy from low to high addresses.  Use 'to' as scratch.
@@ -1734,15 +1808,16 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
-    __ incq(qword_count);
+    __ incrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
     if (is_oop) {
       __ jmp(L_exit);
     } else {
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
       restore_arg_regs();
       __ xorq(rax, rax); // return 0
       __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1753,8 +1828,11 @@
     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
     if (is_oop) {
-    __ bind(L_exit);
+    __ BIND(L_exit);
       array_store_check(saved_to, end_to, rax);
+      inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
+    } else {
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
     }
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
@@ -1787,21 +1865,23 @@
     const Register saved_count = rcx;
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
-
+    assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
+
+    address disjoint_copy_entry = NULL;
     if (is_oop) {
+      disjoint_copy_entry = disjoint_oop_copy_entry;
       oop_copy_entry  = __ pc();
-      array_overlap_test(disjoint_oop_copy_entry, Address::times_8);
-
     } else {
+      disjoint_copy_entry = disjoint_long_copy_entry;
       long_copy_entry = __ pc();
-      array_overlap_test(disjoint_long_copy_entry, Address::times_8);
-   }
-
+    }
+    BLOCK_COMMENT("Entry:");
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+
+    array_overlap_test(disjoint_copy_entry, Address::times_8);
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
-    assert_clean_int(qword_count, rax); // Make sure 'count' is clean int.
-
     // 'from', 'to' and 'qword_count' are now valid
 
     if (is_oop) {
@@ -1814,15 +1894,16 @@
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
-    __ decq(qword_count);
+    __ decrementq(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
     if (is_oop) {
       __ jmp(L_exit);
     } else {
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
       restore_arg_regs();
       __ xorq(rax, rax); // return 0
       __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1833,9 +1914,12 @@
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
     if (is_oop) {
-    __ bind(L_exit);
+    __ BIND(L_exit);
       __ leaq(rcx, Address(to, saved_count, Address::times_8, -8));
       array_store_check(to, rcx, rax);
+      inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
+    } else {
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
     }
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
@@ -1845,6 +1929,346 @@
     return start;
   }
 
+
+  // Helper for generating a dynamic type check.
+  // Smashes no registers.
+  void generate_type_check(Register sub_klass,
+                           Register super_check_offset,
+                           Register super_klass,
+                           Label& L_success) {
+    assert_different_registers(sub_klass, super_check_offset, super_klass);
+
+    BLOCK_COMMENT("type_check:");
+
+    Label L_miss;
+
+    // a couple of useful fields in sub_klass:
+    int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
+                     Klass::secondary_supers_offset_in_bytes());
+    int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
+                     Klass::secondary_super_cache_offset_in_bytes());
+    Address secondary_supers_addr(sub_klass, ss_offset);
+    Address super_cache_addr(     sub_klass, sc_offset);
+
+    // if the pointers are equal, we are done (e.g., String[] elements)
+    __ cmpq(super_klass, sub_klass);
+    __ jcc(Assembler::equal, L_success);
+
+    // check the supertype display:
+    Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
+    __ cmpq(super_klass, super_check_addr); // test the super type
+    __ jcc(Assembler::equal, L_success);
+
+    // if it was a primary super, we can just fail immediately
+    __ cmpl(super_check_offset, sc_offset);
+    __ jcc(Assembler::notEqual, L_miss);
+
+    // Now do a linear scan of the secondary super-klass chain.
+    // The repne_scan instruction uses fixed registers, which we must spill.
+    // (We need a couple more temps in any case.)
+    // This code is rarely used, so simplicity is a virtue here.
+    inc_counter_np(SharedRuntime::_partial_subtype_ctr);
+    {
+      __ pushq(rax);
+      __ pushq(rcx);
+      __ pushq(rdi);
+      assert_different_registers(sub_klass, super_klass, rax, rcx, rdi);
+
+      __ movq(rdi, secondary_supers_addr);
+      // Load the array length.
+      __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); 
+      // Skip to start of data.
+      __ addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+      // Scan rcx words at [rdi] for occurance of rax
+      // Set NZ/Z based on last compare
+      __ movq(rax, super_klass);
+      __ repne_scan();
+
+      // Unspill the temp. registers:
+      __ popq(rdi);
+      __ popq(rcx);
+      __ popq(rax);
+
+      __ jcc(Assembler::notEqual, L_miss);
+    }
+
+    // Success.  Cache the super we found and proceed in triumph.
+    __ movq(super_cache_addr, super_klass); // note: rax is dead
+    __ jmp(L_success);
+
+    // Fall through on failure!
+    __ BIND(L_miss);
+  }
+
+  //
+  //  Generate checkcasting array copy stub
+  //
+  //  Input:
+  //    c_rarg0   - source array address
+  //    c_rarg1   - destination array address
+  //    c_rarg2   - element count, treated as ssize_t, can be zero
+  //    c_rarg3   - size_t ckoff (super_check_offset)
+  // not Win64
+  //    c_rarg4   - oop ckval (super_klass)
+  // Win64
+  //    rsp+40    - oop ckval (super_klass)
+  //
+  //  Output:
+  //    rax ==  0  -  success
+  //    rax == -1^K - failure, where K is partial transfer count
+  //
+  address generate_checkcast_copy(const char *name) {
+
+    Label L_load_element, L_store_element, L_do_card_marks, L_done;
+
+    // Input registers (after setup_arg_regs)
+    const Register from        = rdi;   // source array address
+    const Register to          = rsi;   // destination array address
+    const Register length      = rdx;   // elements count
+    const Register ckoff       = rcx;   // super_check_offset
+    const Register ckval       = r8;    // super_klass
+
+    // Registers used as temps (r13, r14 are save-on-entry)
+    const Register end_from    = from;  // source array end address
+    const Register end_to      = r13;   // destination array end address
+    const Register count       = rdx;   // -(count_remaining)
+    const Register r14_length  = r14;   // saved copy of length
+    // End pointers are inclusive, and if length is not zero they point
+    // to the last unit copied:  end_to[0] := end_from[0]
+
+    const Register rax_oop    = rax;    // actual oop copied
+    const Register r11_klass  = r11;    // oop._klass
+
+    //---------------------------------------------------------------
+    // Assembler stub will be used for this call to arraycopy 
+    // if the two arrays are subtypes of Object[] but the
+    // destination array type is not equal to or a supertype
+    // of the source type.  Each element must be separately
+    // checked.
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    checkcast_copy_entry  = __ pc();
+    BLOCK_COMMENT("Entry:");
+
+#ifdef ASSERT
+    // caller guarantees that the arrays really are different
+    // otherwise, we would have to make conjoint checks
+    { Label L;
+      array_overlap_test(L, Address::times_8);
+      __ stop("checkcast_copy within a single array");
+      __ bind(L);
+    }
+#endif //ASSERT
+
+    // allocate spill slots for r13, r14
+    enum {
+      saved_r13_offset,
+      saved_r14_offset,
+      saved_rbp_offset,
+      saved_rip_offset,
+      saved_rarg0_offset
+    };
+    __ subq(rsp, saved_rbp_offset * wordSize);
+    __ movq(Address(rsp, saved_r13_offset * wordSize), r13);
+    __ movq(Address(rsp, saved_r14_offset * wordSize), r14);
+    setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
+                       // ckoff => rcx, ckval => r8
+                       // r9 and r10 may be used to save non-volatile registers
+#ifdef _WIN64
+    // last argument (#4) is on stack on Win64
+    const int ckval_offset = saved_rarg0_offset + 4;
+    __ movq(ckval, Address(rsp, ckval_offset * wordSize));
+#endif
+
+    // check that int operands are properly extended to size_t
+    assert_clean_int(length, rax);
+    assert_clean_int(ckoff, rax);
+
+#ifdef ASSERT
+    BLOCK_COMMENT("assert consistent ckoff/ckval");
+    // The ckoff and ckval must be mutually consistent,
+    // even though caller generates both.
+    { Label L;
+      int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
+                        Klass::super_check_offset_offset_in_bytes());
+      __ cmpl(ckoff, Address(ckval, sco_offset));
+      __ jcc(Assembler::equal, L);
+      __ stop("super_check_offset inconsistent");
+      __ bind(L);
+    }
+#endif //ASSERT
+
+    // Loop-invariant addresses.  They are exclusive end pointers.
+    Address end_from_addr(from, length, Address::times_8, 0);
+    Address   end_to_addr(to,   length, Address::times_8, 0);
+    // Loop-variant addresses.  They assume post-incremented count < 0.
+    Address from_element_addr(end_from, count, Address::times_8, 0);
+    Address   to_element_addr(end_to,   count, Address::times_8, 0);
+    Address oop_klass_addr(rax_oop, oopDesc::klass_offset_in_bytes());
+
+    // Copy from low to high addresses, indexed from the end of each array.
+    __ leaq(end_from, end_from_addr);
+    __ leaq(end_to,   end_to_addr);
+    __ movq(r14_length, length);        // save a copy of the length
+    assert(length == count, "");        // else fix next line:
+    __ negq(count);                     // negate and test the length
+    __ jcc(Assembler::notZero, L_load_element);
+
+    // Empty array:  Nothing to do.
+    __ xorq(rax, rax);                  // return 0 on (trivial) success
+    __ jmp(L_done);
+
+    // ======== begin loop ========
+    // (Loop is rotated; its entry is L_load_element.)
+    // Loop control:
+    //   for (count = -count; count != 0; count++)
+    // Base pointers src, dst are biased by 8*(count-1),to last element.
+    __ align(16);
+    
+    __ BIND(L_store_element);
+    __ movq(to_element_addr, rax_oop);  // store the oop
+    __ incrementq(count);               // increment the count toward zero
+    __ jcc(Assembler::zero, L_do_card_marks);
+
+    // ======== loop entry is here ========
+    __ BIND(L_load_element);
+    __ movq(rax_oop, from_element_addr); // load the oop
+    __ testq(rax_oop, rax_oop);
+    __ jcc(Assembler::zero, L_store_element);
+
+    __ movq(r11_klass, oop_klass_addr); // query the object klass
+    generate_type_check(r11_klass, ckoff, ckval, L_store_element);
+    // ======== end loop ========
+
+    // It was a real error; we must depend on the caller to finish the job.
+    // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
+    // Emit GC store barriers for the oops we have copied (r14 + rdx),
+    // and report their number to the caller.
+    assert_different_registers(rax, r14_length, count, to, end_to, rcx);
+    __ leaq(end_to, to_element_addr);
+    array_store_check(to, end_to, rcx);
+    __ movq(rax, r14_length);           // original oops
+    __ addq(rax, count);                // K = (original - remaining) oops
+    __ notq(rax);                       // report (-1^K) to caller
+    __ jmp(L_done);
+
+    // Come here on success only.
+    __ BIND(L_do_card_marks);
+    __ addq(end_to, -wordSize);         // make an inclusive end pointer
+    array_store_check(to, end_to, rcx);
+    __ xorq(rax, rax);                  // return 0 on success
+
+    // Common exit point (success or failure).
+    __ BIND(L_done);
+    __ movq(r13, Address(rsp, saved_r13_offset * wordSize));
+    __ movq(r14, Address(rsp, saved_r14_offset * wordSize));
+    inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
+    restore_arg_regs();
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+  //
+  //  Generate 'unsafe' array copy stub
+  //  Though just as safe as the other stubs, it takes an unscaled
+  //  size_t argument instead of an element count.
+  //
+  //  Input:
+  //    c_rarg0   - source array address
+  //    c_rarg1   - destination array address
+  //    c_rarg2   - byte count, treated as ssize_t, can be zero
+  //
+  // Examines the alignment of the operands and dispatches
+  // to a long, int, short, or byte copy loop.
+  //
+  address generate_unsafe_copy(const char *name) {
+
+    Label L_long_aligned, L_int_aligned, L_short_aligned;
+
+    // Input registers (before setup_arg_regs)
+    const Register from        = c_rarg0;  // source array address
+    const Register to          = c_rarg1;  // destination array address
+    const Register size        = c_rarg2;  // byte count (size_t)
+
+    // Register used as a temp
+    const Register bits        = rax;      // test copy of low bits
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    // bump this on entry, not on exit:
+    inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
+
+    __ movq(bits, from);
+    __ orq(bits, to);
+    __ orq(bits, size);
+
+    __ testb(bits, BytesPerLong-1);
+    __ jccb(Assembler::zero, L_long_aligned);
+
+    __ testb(bits, BytesPerInt-1);
+    __ jccb(Assembler::zero, L_int_aligned);
+
+    __ testb(bits, BytesPerShort-1);
+    __ jcc(Assembler::notZero, byte_copy_entry, relocInfo::runtime_call_type);
+
+    __ BIND(L_short_aligned);
+    __ shrq(size, LogBytesPerShort); // size => short_count
+    __ jmp(short_copy_entry, relocInfo::runtime_call_type);
+
+    __ BIND(L_int_aligned);
+    __ shrq(size, LogBytesPerInt); // size => int_count
+    __ jmp(int_copy_entry, relocInfo::runtime_call_type);
+
+    __ BIND(L_long_aligned);
+    __ shrq(size, LogBytesPerLong); // size => qword_count
+    __ jmp(long_copy_entry, relocInfo::runtime_call_type);
+
+    return start;
+  }
+
+  // Perform range checks on the proposed arraycopy.
+  // Kills temp, but nothing else.
+  // Also, clean the sign bits of src_pos and dst_pos.
+  void arraycopy_range_checks(Register src,     // source array oop (c_rarg0)
+                              Register src_pos, // source position (c_rarg1)
+                              Register dst,     // destination array oo (c_rarg2)
+                              Register dst_pos, // destination position (c_rarg3)
+                              Register length,
+                              Register temp,
+                              Label& L_failed) {
+    BLOCK_COMMENT("arraycopy_range_checks:");
+
+    //  if (src_pos + length > arrayOop(src)->length())  FAIL;
+    __ movl(temp, length);
+    __ addl(temp, src_pos);             // src_pos + length
+    __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
+    __ jcc(Assembler::above, L_failed);
+
+    //  if (dst_pos + length > arrayOop(dst)->length())  FAIL;
+    __ movl(temp, length);
+    __ addl(temp, dst_pos);             // dst_pos + length
+    __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
+    __ jcc(Assembler::above, L_failed);
+
+    // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
+    // Move with sign extension can be used since they are positive.
+    __ movslq(src_pos, src_pos);
+    __ movslq(dst_pos, dst_pos);
+
+    BLOCK_COMMENT("arraycopy_range_checks done");
+  }
+
   //
   //  Generate generic array copy stubs
   //
@@ -1860,7 +2284,7 @@
   //
   //  Output:
   //    rax ==  0  -  success
-  //    rax == -1  -  need to call System.arraycopy
+  //    rax == -1^K - failure, where K is partial transfer count
   //
   address generate_generic_copy(const char *name) {
 
@@ -1874,9 +2298,9 @@
     const Register dst_pos    = c_rarg3;  // destination position
     // elements count is on stack on Win64
 #ifdef _WIN64
-#define LENGTH Address(rsp, 6 * wordSize)
+#define C_RARG4 Address(rsp, 6 * wordSize)
 #else
-#define LENGTH c_rarg4
+#define C_RARG4 c_rarg4
 #endif
 
     __ align(CodeEntryAlignment);
@@ -1885,6 +2309,9 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
+    // bump this on entry, not on exit:
+    inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
+
     //-----------------------------------------------------------------------
     // Assembler stub will be used for this call to arraycopy 
     // if the following conditions are met:
@@ -1916,29 +2343,38 @@
     __ jcc(Assembler::negative, L_failed);
 
     // registers used as temp
-    const Register r11_length = r11;      // elements count to copy
-    const Register rax_klass  = rax;      // array klass
+    const Register r11_length    = r11; // elements count to copy
+    const Register r10_src_klass = r10; // array klass
 
     //  if (length < 0) return -1;
-    __ movl(r11_length, LENGTH); // length (elements count, 32-bits value)
+    __ movl(r11_length, C_RARG4);       // length (elements count, 32-bits value)
     __ testl(r11_length, r11_length);
     __ jcc(Assembler::negative, L_failed);
 
-    //  if (src->klass() == NULL) return -1;
-    __ movq(rax_klass, Address(src, oopDesc::klass_offset_in_bytes()));
-    __ testq(rax_klass, rax_klass);
-    __ jcc(Assembler::zero, L_failed);  // it is broken if klass is NULL
-
-    //  if (src->klass() != dst->klass()) return -1;
-    __ cmpq(rax_klass, Address(dst, oopDesc::klass_offset_in_bytes()));
-    __ jcc(Assembler::notEqual, L_failed);
+    Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
+    Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
+    __ movq(r10_src_klass, src_klass_addr);
+#ifdef ASSERT
+    //  assert(src->klass() != NULL);
+    BLOCK_COMMENT("assert klasses not null");
+    { Label L1, L2;
+      __ testq(r10_src_klass, r10_src_klass);
+      __ jcc(Assembler::notZero, L2);   // it is broken if klass is NULL
+      __ bind(L1);
+      __ stop("broken null klass");
+      __ bind(L2);
+      __ cmpq(dst_klass_addr, 0);
+      __ jcc(Assembler::equal, L1);     // this would be broken also
+      BLOCK_COMMENT("assert done");
+    }
+#endif
 
     // Load layout helper (32-bits)
     //
     //  |array_tag|     | header_size | element_type |     |log2_element_size|
     // 32        30    24            16              8     2                 0
     //
-    //   array_tag: typeArray = 0x3, objArray = 0x2
+    //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
     //
 
     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
@@ -1946,32 +2382,33 @@
 
     const Register rax_lh = rax;  // layout helper
 
-    __ movl(rax_lh, Address(rax_klass, lh_offset));
+    __ movl(rax_lh, Address(r10_src_klass, lh_offset));
+
+    // Handle objArrays completely differently...
+    jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
+    __ cmpl(rax_lh, objArray_lh);
+    __ jcc(Assembler::equal, L_objArray);
+
+    //  if (src->klass() != dst->klass()) return -1;
+    __ cmpq(r10_src_klass, dst_klass_addr);
+    __ jcc(Assembler::notEqual, L_failed);
 
     //  if (!src->is_Array()) return -1;
     __ cmpl(rax_lh, Klass::_lh_neutral_value);
     __ jcc(Assembler::greaterEqual, L_failed);
 
-    //  if (src_pos + length > arrayOop(src)->length() ) return ac_failed;
-    const Register r10_copy_end = r10;  // element number after last copied
-    __ movl(r10_copy_end, r11_length);
-    __ addl(r10_copy_end, src_pos);     // src_pos + length
-    __ cmpl(r10_copy_end, Address(src, arrayOopDesc::length_offset_in_bytes()));
-    __ jcc(Assembler::above, L_failed);
-
-    //  if (dst_pos + length > arrayOop(dst)->length() ) return ac_failed;
-    __ movl(r10_copy_end, r11_length);
-    __ addl(r10_copy_end, dst_pos);     // dst_pos + length
-    __ cmpl(r10_copy_end, Address(dst, arrayOopDesc::length_offset_in_bytes()));
-    __ jcc(Assembler::above, L_failed);
-
-    // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
-    // Move with sign extension can be used since they are positive.
-    __ movslq(src_pos, src_pos);
-    __ movslq(dst_pos, dst_pos);
-
-    __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
-    __ jcc(Assembler::less, L_objArray);
+    // At this point, it is known to be a typeArray (array_tag 0x3).
+#ifdef ASSERT
+    { Label L;
+      __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
+      __ jcc(Assembler::greaterEqual, L);
+      __ stop("must be a primitive array");
+      __ bind(L);
+    }
+#endif
+
+    arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
+                           r10, L_failed);
 
     // typeArrayKlass
     //
@@ -1987,6 +2424,7 @@
     __ andq(r10_offset, Klass::_lh_header_size_mask);   // array_offset
     __ addq(src, r10_offset);           // src array offset
     __ addq(dst, r10_offset);           // dst array offset
+    BLOCK_COMMENT("choose copy loop based on element size");
     __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
 
     // next registers should be set before the jump to corresponding stub
@@ -1997,7 +2435,7 @@
     // 'from', 'to', 'count' registers should be set in such order
     // since they are the same as 'src', 'src_pos', 'dst'.
 
-  __ bind(L_copy_bytes);
+  __ BIND(L_copy_bytes);
     __ cmpl(rax_elsize, 0);
     __ jccb(Assembler::notEqual, L_copy_shorts);
     __ leaq(from, Address(src, src_pos, Address::times_1, 0));// src_addr
@@ -2005,7 +2443,7 @@
     __ movslq(count, r11_length); // length
     __ jmp(byte_copy_entry, relocInfo::runtime_call_type);
 
-  __ bind(L_copy_shorts);
+  __ BIND(L_copy_shorts);
     __ cmpl(rax_elsize, LogBytesPerShort);
     __ jccb(Assembler::notEqual, L_copy_ints);
     __ leaq(from, Address(src, src_pos, Address::times_2, 0));// src_addr
@@ -2013,7 +2451,7 @@
     __ movslq(count, r11_length); // length
     __ jmp(short_copy_entry, relocInfo::runtime_call_type);
 
-  __ bind(L_copy_ints);
+  __ BIND(L_copy_ints);
     __ cmpl(rax_elsize, LogBytesPerInt);
     __ jccb(Assembler::notEqual, L_copy_longs);
     __ leaq(from, Address(src, src_pos, Address::times_4, 0));// src_addr
@@ -2021,10 +2459,14 @@
     __ movslq(count, r11_length); // length
     __ jmp(int_copy_entry, relocInfo::runtime_call_type);
 
-  __ bind(L_copy_longs);
+  __ BIND(L_copy_longs);
 #ifdef ASSERT
-    __ cmpl(rax_elsize, LogBytesPerLong);
-    __ jcc(Assembler::notEqual, L_failed);
+    { Label L;
+      __ cmpl(rax_elsize, LogBytesPerLong);
+      __ jcc(Assembler::equal, L);
+      __ stop("must be long copy, but elsize is wrong");
+      __ bind(L);
+    }
 #endif
     __ leaq(from, Address(src, src_pos, Address::times_8, 0));// src_addr
     __ leaq(to,   Address(dst, dst_pos, Address::times_8, 0));// dst_addr
@@ -2032,17 +2474,82 @@
     __ jmp(long_copy_entry, relocInfo::runtime_call_type);
 
     // objArrayKlass
-  __ bind(L_objArray); 
+  __ BIND(L_objArray);
+    // live at this point:  r10_src_klass, src[_pos], dst[_pos]
+
+    Label L_plain_copy, L_checkcast_copy;
+    //  test array classes for subtyping
+    __ cmpq(r10_src_klass, dst_klass_addr); // usual case is exact equality
+    __ jcc(Assembler::notEqual, L_checkcast_copy);
+
+    // Identically typed arrays can be copied without element-wise checks.
+    arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
+                           r10, L_failed);
+
     __ leaq(from, Address(src, src_pos, Address::times_8,
                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
     __ leaq(to,   Address(dst, dst_pos, Address::times_8,
                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
     __ movslq(count, r11_length); // length
+  __ BIND(L_plain_copy);
     __ jmp(oop_copy_entry, relocInfo::runtime_call_type);
 
-  __ bind(L_failed);
+  __ BIND(L_checkcast_copy);
+    // live at this point:  r10_src_klass, !r11_length
+    {
+      // assert(r11_length == C_RARG4); // will reload from here
+      Register r11_dst_klass = r11;
+      __ movq(r11_dst_klass, dst_klass_addr);
+
+      // Before looking at dst.length, make sure dst is also an objArray.
+      __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
+      __ jcc(Assembler::notEqual, L_failed);
+
+      // It is safe to examine both src.length and dst.length.
+#ifndef _WIN64
+      arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,
+                             rax, L_failed);
+#else
+      __ movl(r11_length, C_RARG4);     // reload
+      arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
+                             rax, L_failed);
+      __ movl(r11_dst_klass, dst_klass_addr); // reload
+#endif
+
+      // Marshal the base address arguments now, freeing registers.
+      __ leaq(from, Address(src, src_pos, Address::times_8,
+                   arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+      __ leaq(to,   Address(dst, dst_pos, Address::times_8,
+                   arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+      __ movl(count, C_RARG4);          // length (reloaded)
+      Register sco_temp = c_rarg3;      // this register is free now
+      assert_different_registers(from, to, count, sco_temp,
+                                 r11_dst_klass, r10_src_klass);
+      assert_clean_int(count, sco_temp);
+
+      // Generate the type check.
+      int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
+                        Klass::super_check_offset_offset_in_bytes());
+      __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
+      assert_clean_int(sco_temp, rax);
+      generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
+
+      // Fetch destination element klass from the objArrayKlass header.
+      int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
+                       objArrayKlass::element_klass_offset_in_bytes());
+      __ movq(r11_dst_klass, Address(r11_dst_klass, ek_offset));
+      __ movl(sco_temp,      Address(r11_dst_klass, sco_offset));
+      assert_clean_int(sco_temp, rax);
+
+      // the checkcast_copy loop needs two extra arguments:
+      assert(c_rarg3 == sco_temp, "#3 already in place");
+      __ movq(C_RARG4, r11_dst_klass);  // dst.klass.element_klass
+      __ jmp(checkcast_copy_entry, relocInfo::runtime_call_type);
+    }
+
+  __ BIND(L_failed);
     __ xorq(rax, rax);
-    __ decq(rax); // return -1
+    __ notq(rax); // return -1
     __ leave();   // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
 
@@ -2070,7 +2577,9 @@
     StubRoutines::_oop_disjoint_arraycopy    = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
     StubRoutines::_oop_arraycopy             = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
 
-    StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
+    StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
+    StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
+    StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
 
     // We don't generate specialized code for HeapWord-aligned source
     // arrays, so just use the code we've already generated
@@ -2157,6 +2666,7 @@
 
     // Call runtime
     __ movq(c_rarg0, r15_thread);
+    BLOCK_COMMENT("call runtime_entry");
     __ call(runtime_entry, relocInfo::runtime_call_type);
 
     // Generate oop map
@@ -2311,6 +2821,8 @@
 address StubGenerator::long_copy_entry  = NULL;
 address StubGenerator::oop_copy_entry   = NULL;
 
+address StubGenerator::checkcast_copy_entry = NULL;
+
 void StubGenerator_generate(CodeBuffer* code, bool all) {
   StubGenerator g(code, all);
 }
--- a/hotspot/src/cpu/amd64/vm/templateTable_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/templateTable_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)templateTable_amd64.cpp	1.53 07/05/05 17:04:09 JVM"
+#pragma ident "@(#)templateTable_amd64.cpp	1.54 07/05/17 15:45:52 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1571,7 +1571,7 @@
     __ jccb(Assembler::above, done);
     __ movl(rax, 0);
     __ jccb(Assembler::equal, done);
-    __ decl(rax);
+    __ decrementl(rax);
   }
   __ bind(done);
 }
@@ -1919,7 +1919,7 @@
   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
   __ jcc(Assembler::equal, found);
   __ bind(loop_entry);
-  __ decl(rcx);
+  __ decrementl(rcx);
   __ jcc(Assembler::greaterEqual, loop);
   // default case
   __ profile_switch_default(rax);
@@ -2859,7 +2859,7 @@
                            ConstantPoolCacheEntry::f2_offset())));
   // make sure exception is reported in correct bcp range (getfield is
   // next instruction)
-  __ incq(r13);
+  __ incrementq(r13);
   __ null_check(rax);
   switch (state) {
   case itos: 
@@ -2889,7 +2889,7 @@
   //   __ bind(notVolatile);
   // }
 
-  __ decq(r13);
+  __ decrementq(r13);
 }
 
 
@@ -3298,7 +3298,7 @@
       __ movq(Address(rax, rdx, Address::times_8,
 		      sizeof(oopDesc) - oopSize), 
 	      rcx);
-      __ decl(rdx);
+      __ decrementl(rdx);
       __ jcc(Assembler::notZero, loop);
     }
 
@@ -3609,7 +3609,7 @@
   // handling for async. exceptions work correctly.
   // The object has already been poped from the stack, so the
   // expression stack looks correct.
-  __ incq(r13);
+  __ incrementq(r13);
 
   // store object  
   __ movq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); 
--- a/hotspot/src/cpu/amd64/vm/vm_version_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/vm_version_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)vm_version_amd64.cpp	1.18 07/05/05 17:04:09 JVM"
+#pragma ident "@(#)vm_version_amd64.cpp	1.19 07/05/17 15:46:04 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -240,8 +240,29 @@
   if( !supports_sse () ) // Drop to 0 if no SSE  support
     UseSSE = 0;
 
-  if( is_intel() && FLAG_IS_DEFAULT(UseStoreImmI16) )
-    UseStoreImmI16 = false; // don't use it on Intel cpus
+  if( is_intel() ) { // Intel cpus specific settings
+    if( FLAG_IS_DEFAULT(UseStoreImmI16) ) {
+      UseStoreImmI16 = false; // don't use it on Intel cpus
+    }
+    if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus
+      if( FLAG_IS_DEFAULT(UseAddressNop) ) {
+        UseAddressNop = true; // use it on new Intel cpus
+      }
+#ifdef COMPILER2
+      if( FLAG_IS_DEFAULT(MaxLoopPad) ) {
+        // For new Intel cpus do the next optimization:
+        // don't align the beginning of a loop if there are enough instructions
+        // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
+        // in current fetch line (OptoLoopAlignment) or the padding 
+        // is big (> MaxLoopPad).
+        // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
+        // generated NOP instructions. 11 is the largest size of one
+        // address NOP instruction '0F 1F' (see Assembler::nop(i)).
+        MaxLoopPad = 11;
+      }
+#endif // COMPILER2
+    }
+  }
 
   assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value");
   assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");
--- a/hotspot/src/cpu/amd64/vm/vtableStubs_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/amd64/vm/vtableStubs_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)vtableStubs_amd64.cpp	1.19 07/05/05 17:04:10 JVM"
+#pragma ident "@(#)vtableStubs_amd64.cpp	1.20 07/05/17 15:46:10 JVM"
 #endif
 /*
  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -48,7 +48,7 @@
 
 #ifndef PRODUCT
   if (CountCompiledCalls) {
-    __ incl(Address((address) SharedRuntime::nof_megamorphic_calls_addr(),
+    __ incrementl(Address((address) SharedRuntime::nof_megamorphic_calls_addr(),
                     relocInfo::none));
   }
 #endif
@@ -119,7 +119,7 @@
 
 #ifndef PRODUCT
   if (CountCompiledCalls) {
-    __ incl(Address((address) SharedRuntime::nof_megamorphic_calls_addr(),
+    __ incrementl(Address((address) SharedRuntime::nof_megamorphic_calls_addr(),
                     relocInfo::none));
   }
 #endif
--- a/hotspot/src/cpu/i486/vm/assembler_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/assembler_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)assembler_i486.cpp	1.234 07/05/05 17:04:11 JVM"
+#pragma ident "@(#)assembler_i486.cpp	1.235 07/05/17 15:46:14 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -983,19 +983,14 @@
 }
 
 
-void Assembler::decb(Register dst) {
-  assert(dst->has_byte_register(), "must have byte register"); 
-  emit_byte(0xFE);
-  emit_byte(0xC8 | dst->encoding());
-}
-
-
 void Assembler::decl(Register dst) {
+  // Don't use it directly. Use MacroAssembler::decrement() instead.
   emit_byte(0x48 | dst->encoding());
 }
 
 
 void Assembler::decl(Address dst) {
+  // Don't use it directly. Use MacroAssembler::decrement() instead.
   InstructionMark im(this);
   emit_byte(0xFF);
   emit_operand(ecx, dst);
@@ -1034,11 +1029,13 @@
 
 
 void Assembler::incl(Register dst) {
+  // Don't use it directly. Use MacroAssembler::increment() instead.
   emit_byte(0x40 | dst->encoding());
 }
 
 
 void Assembler::incl(Address dst) {
+  // Don't use it directly. Use MacroAssembler::increment() instead.
   InstructionMark im(this);
   emit_byte(0xFF);
   emit_operand(eax, dst);
@@ -1320,7 +1317,6 @@
 
 
 void Assembler::xchgl(Register dst, Register src) {
-  InstructionMark im(this);
   emit_byte(0x87);
   emit_byte(0xc0 | dst->encoding() << 3 | src->encoding());
 }
@@ -1368,6 +1364,87 @@
 
 void Assembler::nop(int i) {
   assert(i > 0, " ");
+  if (UseAddressNop) {
+    // Using multi-bytes nops "0x0F 0x1F [address]"
+    while(i >= 15) {
+      // don't generate sequential addess nops (mix with regular nops).
+      i -= 15;
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x0F);   // NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+      emit_byte(0x1F);
+      emit_byte(0x84);   // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
+      emit_byte(0x00);   // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+      emit_long(0);      // 32-bits offset
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x66);   // size prefix
+      emit_byte(0x90);   // nop
+    }
+    switch (i) {
+      case 14:
+        emit_byte(0x66); // size prefix
+      case 13:
+        emit_byte(0x66); // size prefix
+      case 12:
+        emit_byte(0x0F); // NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+        emit_byte(0x1F);
+        emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
+        emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+        emit_long(0);    // 32-bits offset
+        emit_byte(0x66); // size prefix
+        emit_byte(0x66); // size prefix
+        emit_byte(0x66); // size prefix
+        emit_byte(0x90); // nop
+        break;
+      case 11:
+        emit_byte(0x66); // size prefix
+      case 10:
+        emit_byte(0x66); // size prefix
+      case 9:
+        emit_byte(0x66); // size prefix
+      case 8: // NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
+        emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+        emit_long(0);    // 32-bits offset
+        break;
+      case 7: // NOP DWORD PTR [EAX+0] 32-bits offset
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
+        emit_long(0);    // 32-bits offset
+        break;
+      case 6:
+        emit_byte(0x66); // size prefix
+      case 5: // NOP DWORD PTR [EAX+EAX*0+0]
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
+        emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+        emit_byte(0);    // 8-bits offset
+        break;
+      case 4:            // NOP DWORD PTR [EAX+0]
+        emit_byte(0x0F);
+        emit_byte(0x1F);
+        emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
+        emit_byte(0);    // 8-bits offset
+        break;
+      case 3: 
+        // Don't use "0x0F 0x1F 0x00" (NOP DWORD PTR [EAX])
+        emit_byte(0x66); // size prefix
+      case 2:
+        emit_byte(0x66); // size prefix
+      case 1:
+        emit_byte(0x90); // nop
+        break;
+      default:
+        assert(i == 0, " ");
+    }
+    return;
+  }
   // Using nops with size prefixes "0x66 0x90".
   // From AMD Optimization Guide:
   //  1: 0x90
@@ -2332,7 +2409,6 @@
 void Assembler::movd(Register dst, XMMRegister src) {
   assert(VM_Version::supports_sse2(), "");
 
-  InstructionMark im(this);
   emit_byte(0x66);
   emit_byte(0x0F);
   emit_byte(0x7E);
@@ -2353,7 +2429,6 @@
   assert(isByte(mode), "invalid value");
   assert(VM_Version::supports_sse2(), "");
 
-  InstructionMark im(this);
   emit_byte(0x66);
   emit_byte(0x0F);
   emit_byte(0x70);
@@ -2377,7 +2452,6 @@
   assert(isByte(mode), "invalid value");
   assert(VM_Version::supports_sse2(), "");
 
-  InstructionMark im(this);
   emit_byte(0xF2);
   emit_byte(0x0F);
   emit_byte(0x70);
@@ -2400,7 +2474,6 @@
 void Assembler::psrlq(XMMRegister dst, int shift) {
   assert(VM_Version::supports_sse2(), "");
 
-  InstructionMark im(this);
   emit_byte(0x66);
   emit_byte(0x0F);
   emit_byte(0x73);
@@ -2550,20 +2623,36 @@
 
 
 void MacroAssembler::increment(Register reg, int value) {
+  if (value == min_jint) {addl(reg, value); return; }
   if (value <  0) { decrement(reg, -value); return; }
   if (value == 0) {                       ; return; }
-  if (value == 1) { incl(reg)             ; return; }
+  if (value == 1 && UseIncDec) { incl(reg); return; }
   /* else */      { addl(reg, value)      ; return; }
 }
 
+void MacroAssembler::increment(Address dst, int value) {
+  if (value == min_jint) {addl(dst, value); return; }
+  if (value <  0) { decrement(dst, -value); return; }
+  if (value == 0) {                       ; return; }
+  if (value == 1 && UseIncDec) { incl(dst); return; }
+  /* else */      { addl(dst, value)      ; return; }
+}
 
 void MacroAssembler::decrement(Register reg, int value) {
+  if (value == min_jint) {subl(reg, value); return; }
   if (value <  0) { increment(reg, -value); return; }
   if (value == 0) {                       ; return; }
-  if (value == 1) { decl(reg)             ; return; }
+  if (value == 1 && UseIncDec) { decl(reg); return; }
   /* else */      { subl(reg, value)      ; return; }
 }
 
+void MacroAssembler::decrement(Address dst, int value) {
+  if (value == min_jint) {subl(dst, value); return; }
+  if (value <  0) { increment(dst, -value); return; }
+  if (value == 0) {                       ; return; }
+  if (value == 1 && UseIncDec) { decl(dst); return; }
+  /* else */      { subl(dst, value)      ; return; }
+}
 
 void MacroAssembler::align(int modulus) {
   if (offset() % modulus != 0) nop(modulus - (offset() % modulus));
@@ -3028,12 +3117,12 @@
 
   bind(high);
   xorl(x_hi, x_hi);
-  incl(x_hi);
+  increment(x_hi);
   jmp(done);
 
   bind(low);
   xorl(x_hi, x_hi);
-  decl(x_hi);
+  decrement(x_hi);
 
   bind(done);
 }
@@ -3250,14 +3339,14 @@
     jcc(Assembler::below , L);
     movl(dst, 0);
     jcc(Assembler::equal , L);
-    incl(dst);
+    increment(dst);
   } else { // unordered is greater
     movl(dst, 1);
     jcc(Assembler::parity, L);
     jcc(Assembler::above , L);
     movl(dst, 0);
     jcc(Assembler::equal , L);
-    decl(dst);
+    decrement(dst);
   }
   bind(L);
 }
@@ -3272,14 +3361,14 @@
     jcc(Assembler::below , L);
     movl(dst, 0);
     jcc(Assembler::equal , L);
-    incl(dst);
+    increment(dst);
   } else { // unordered is greater
     movl(dst, 1);
     jcc(Assembler::parity, L);
     jcc(Assembler::above , L);
     movl(dst, 0);
     jcc(Assembler::equal , L);
-    decl(dst);
+    decrement(dst);
   }
   bind(L);
 }
@@ -3294,14 +3383,14 @@
     jcc(Assembler::below , L);
     movl(dst, 0);
     jcc(Assembler::equal , L);
-    incl(dst);
+    increment(dst);
   } else { // unordered is greater
     movl(dst, 1);
     jcc(Assembler::parity, L);
     jcc(Assembler::above , L);
     movl(dst, 0);
     jcc(Assembler::equal , L);
-    decl(dst);
+    decrement(dst);
   }
   bind(L);
 }
@@ -3341,11 +3430,7 @@
   jcc (Assembler::positive, _is_positive);
   int offset = (1 << shift_value) - 1 ;
 
-  if (offset == 1) {
-    incl(reg);
-  } else {
-    addl(reg, offset);
-  }
+  increment(reg, offset);
 
   bind (_is_positive);
   sarl(reg, shift_value);
@@ -4133,10 +4218,15 @@
 
 int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
                                          bool swap_reg_contains_mark,
-                                         Label& done, Label* slow_case) {
+                                         Label& done, Label* slow_case,
+                                         BiasedLockingCounters* counters) {
   assert(UseBiasedLocking, "why call this otherwise?");
   assert(swap_reg == eax, "swap_reg must be eax for cmpxchg");
   assert_different_registers(lock_reg, obj_reg, swap_reg);
+
+  if (PrintBiasedLockingStatistics && counters == NULL)
+    counters = BiasedLocking::counters();
+
   bool need_tmp_reg = false;
   if (tmp_reg == noreg) {
     need_tmp_reg = true;
@@ -4193,8 +4283,8 @@
   if (need_tmp_reg) {
     popl(tmp_reg);
   }
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((int) BiasedLocking::biased_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((int) counters->biased_lock_entry_count_addr(), relocInfo::none));
   }
   jcc(Assembler::equal, done);
 
@@ -4250,8 +4340,8 @@
   // another thread succeeded in biasing it toward itself and we
   // need to revoke that bias. The revocation will occur in the
   // interpreter runtime in the slow case.
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((int) BiasedLocking::anonymously_biased_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((int) counters->anonymously_biased_lock_entry_count_addr(), relocInfo::none));
   }
   if (slow_case != NULL) {
     jcc(Assembler::notZero, *slow_case);
@@ -4285,8 +4375,8 @@
   // If the biasing toward our thread failed, then another thread
   // succeeded in biasing it toward itself and we need to revoke that
   // bias. The revocation will occur in the runtime in the slow case.
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((int) BiasedLocking::rebiased_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((int) counters->rebiased_lock_entry_count_addr(), relocInfo::none));
   }
   if (slow_case != NULL) {
     jcc(Assembler::notZero, *slow_case);
@@ -4321,8 +4411,8 @@
   // Fall through to the normal CAS-based lock, because no matter what
   // the result of the above CAS, some thread must have succeeded in
   // removing the bias bit from the object's header.
-  if (PrintBiasedLockingStatistics) {
-    cond_incl(Assembler::zero, Address((int) BiasedLocking::revoked_lock_entry_count_addr(), relocInfo::none));
+  if (counters != NULL) {
+    cond_incl(Assembler::zero, Address((int) counters->revoked_lock_entry_count_addr(), relocInfo::none));
   }
 
   bind(cas_label);
@@ -4375,12 +4465,16 @@
   Condition negated_cond = negate_condition(cond);
   Label L;
   jcc(negated_cond, L);
+  atomic_incl(counter_addr);
+  bind(L);
+}
+
+void MacroAssembler::atomic_incl(Address counter_addr) {
   pushfd();
   if (os::is_MP())
     lock();
-  incl(counter_addr);
+  increment(counter_addr);
   popfd();
-  bind(L);
 }
 
 SkipIfEqual::SkipIfEqual(
--- a/hotspot/src/cpu/i486/vm/assembler_i486.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/assembler_i486.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)assembler_i486.hpp	1.162 07/05/05 17:04:11 JVM"
+#pragma ident "@(#)assembler_i486.hpp	1.163 07/05/17 15:46:17 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -25,6 +25,8 @@
  *  
  */
 
+class BiasedLockingCounters;
+
 // Contains all the definitions needed for x86 assembly code generation.
 
 // Calling convention
@@ -307,19 +309,26 @@
   void cmpl(Register dst, Register src);
   void cmpl(Register dst, Address src);
 
-  void decb(Register dst);
+ protected:
+  // Don't use next inc() and dec() methods directly. INC & DEC instructions 
+  // could cause a partial flag stall since they don't set CF flag.
+  // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
+  // which call inc() & dec() or add() & sub() in accordance with 
+  // the product flag UseIncDec value.
+
   void decl(Register dst);
   void decl(Address dst);
 
+  void incl(Register dst);
+  void incl(Address dst);
+
+ public:
   void idivl(Register src);
   void cdql();
 
   void imull(Register dst, Register src);
   void imull(Register dst, Register src, int value);
 
-  void incl(Register dst);
-  void incl(Address dst);
-
   void leal(Register dst, Address src);
 
   void mull(Address src);
@@ -764,8 +773,10 @@
   void extend_sign(Register hi, Register lo);
 
   // Support for inc/dec with optimal instruction selection depending on value
-  void increment(Register reg, int value);
-  void decrement(Register reg, int value);
+  void increment(Register reg, int value = 1);
+  void decrement(Register reg, int value = 1);
+  void increment(Address  dst, int value = 1);
+  void decrement(Address  dst, int value = 1);
 
   // Alignment
   void align(int modulus);
@@ -991,7 +1002,8 @@
   // the calling code has already passed any potential faults.
   int biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
                            bool swap_reg_contains_mark,
-                           Label& done, Label* slow_case = NULL);
+                           Label& done, Label* slow_case = NULL,
+                           BiasedLockingCounters* counters = NULL);
   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 
   Condition negate_condition(Condition cond);
@@ -999,6 +1011,8 @@
   // Helper functions for statistics gathering.
   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
   void cond_incl(Condition cond, Address counter_addr);
+  // Unconditional atomic increment.
+  void atomic_incl(Address counter_addr);
 };
 
 /**
--- a/hotspot/src/cpu/i486/vm/c1_CodeStubs_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/c1_CodeStubs_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_CodeStubs_i486.cpp	1.97 07/05/05 17:04:12 JVM"
+#pragma ident "@(#)c1_CodeStubs_i486.cpp	1.98 07/05/17 15:46:20 JVM"
 #endif
 /*
  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -57,7 +57,7 @@
 
   // input is > 0 -> return maxInt
   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
-  __ decl(result()->as_register()); 
+  __ decrement(result()->as_register()); 
   __ jmpb(do_return);
 
   // input is NaN -> return 0
@@ -442,7 +442,7 @@
   ce->add_call_info_here(info());
 
 #ifndef PRODUCT
-  __ incl(Address((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, relocInfo::none));
+  __ increment(Address((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, relocInfo::none));
 #endif
   
   __ jmp(_continuation);
--- a/hotspot/src/cpu/i486/vm/c1_LIRAssembler_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/c1_LIRAssembler_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_LIRAssembler_i486.cpp	1.162 07/05/05 17:04:14 JVM"
+#pragma ident "@(#)c1_LIRAssembler_i486.cpp	1.163 07/05/17 15:46:23 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -451,15 +451,7 @@
 
   // unwind activation and forward exception to caller
   // eax: exception
-#ifdef JVMPI_SUPPORT
-  if (compilation()->jvmpi_event_method_exit_enabled()) {
-    __ jmp(Runtime1::entry_for(Runtime1::jvmpi_unwind_exception_id), relocInfo::runtime_call_type);
-  } else {
-#endif // JVMPI_SUPPORT
-    __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
-#ifdef JVMPI_SUPPORT
-  }
-#endif // JVMPI_SUPPORT
+  __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
 
   assert(code_offset() - offset <= exception_handler_size, "overflow");
 
@@ -546,7 +538,7 @@
   __ subl(ecx, ebx);
   __ jcc(Assembler::notZero, haveResult);
   // starting loop
-  __ decl(eax); // we already tested index: skip one
+  __ decrement(eax); // we already tested index: skip one
   __ jcc(Assembler::zero, noLoop);
 
   // set esi.edi to the end of the arrays (arrays have same length)
@@ -565,7 +557,7 @@
   __ load_unsigned_word(ebx, Address(esi, eax, Address::times_2, 0));
   __ subl(ecx, ebx);
   __ jcc(Assembler::notZero, haveResult);
-  __ incl(eax);
+  __ increment(eax);
   __ jcc(Assembler::notZero, loop);
 
   // strings are equal up to min length
@@ -592,23 +584,14 @@
   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 
   // Note: we do not need to round double result; float result has the right precision
-#ifdef JVMPI_SUPPORT
-  if (compilation()->jvmpi_event_method_exit_enabled()) {
-    __ movl(ecx, method()->encoding());
-    __ jmp(Runtime1::entry_for(Runtime1::jvmpi_method_exit_id), relocInfo::runtime_call_type);
-  } else {
-#endif // JVMPI_SUPPORT
-    // the poll sets the condition code, but no data registers
-    Address polling_page((int)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
-                         relocInfo::none);
-
-    __ relocate(relocInfo::poll_return_type);
-    __ testl(eax, polling_page);
-
-    __ ret(0);
-#ifdef JVMPI_SUPPORT
-  }
-#endif // JVMPI_SUPPORT
+  // the poll sets the condition code, but no data registers
+  Address polling_page((int)os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
+                       relocInfo::none);
+
+  __ relocate(relocInfo::poll_return_type);
+  __ testl(eax, polling_page);
+
+  __ ret(0);
 }
 
 
@@ -1107,7 +1090,8 @@
       break;
     }
 
-    case T_OBJECT: // fall through
+    case T_ADDRESS: // fall through
+    case T_OBJECT:  // fall through
     case T_ARRAY:   // fall through
     case T_INT:
       __ movl(dest->as_register(), from_addr);
@@ -1844,19 +1828,11 @@
       jint c = right->as_constant_ptr()->as_jint();
       switch (code) {
         case lir_add: {
-          switch (c) {
-            case  1: __ incl(lreg);    break;
-            case -1: __ decl(lreg);    break;
-            default: __ addl(lreg, c); break;
-          }
+          __ increment(lreg, c);
           break;
         }
         case lir_sub: {
-          switch (c) {
-            case  1: __ decl(lreg);    break;
-            case -1: __ incl(lreg);    break;
-            default: __ subl(lreg, c); break;
-          }
+          __ decrement(lreg, c);
           break;
         }
         default: ShouldNotReachHere();
@@ -2088,19 +2064,11 @@
       jint c = right->as_constant_ptr()->as_jint();
       switch (code) {
         case lir_add: {
-          switch (c) {
-            case  1: __ incl(laddr);    break;
-            case -1: __ decl(laddr);    break;
-            default: __ addl(laddr, c); break;
-          }
+          __ increment(laddr, c);
           break;
         }
         case lir_sub: {
-          switch (c) {
-            case  1: __ decl(laddr);    break;
-            case -1: __ incl(laddr);    break;
-            default: __ subl(laddr, c); break;
-          }
+          __ decrement(laddr, c);
           break;
         }
         default: ShouldNotReachHere();
@@ -2337,9 +2305,9 @@
       __ movl(dreg, lreg);
       __ andl(dreg, 0x80000000 | (divisor - 1));
       __ jcc(Assembler::positive, done);
-      __ decl(dreg);
+      __ decrement(dreg);
       __ orl(dreg, ~(divisor - 1));
-      __ incl(dreg);
+      __ increment(dreg);
       __ bind(done);
     } else {
       ShouldNotReachHere();
--- a/hotspot/src/cpu/i486/vm/c1_MacroAssembler_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/c1_MacroAssembler_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_MacroAssembler_i486.cpp	1.56 07/05/05 17:04:13 JVM"
+#pragma ident "@(#)c1_MacroAssembler_i486.cpp	1.57 07/05/17 15:46:25 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -175,7 +175,12 @@
   }
 #endif
   xorl(t1, t1);      // use _zero reg to clear memory (shorter code)
-  shrl(index, 3);    // divide by 8 and set carry flag if bit 2 was set
+  if (UseIncDec) {
+    shrl(index, 3);  // divide by 8 and set carry flag if bit 2 was set
+  } else {
+    shrl(index, 2);  // use 2 instructions to avoid partial flag stall
+    shrl(index, 1);
+  }
   // index could have been not a multiple of 8 (i.e., bit 2 was set)
   { Label even;
     // note: if index was a multiple of 8, than it cannot
@@ -193,7 +198,7 @@
     bind(loop);
     movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
     movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);
-    decl(index);
+    decrement(index);
     jcc(Assembler::notZero, loop);
   }
 
@@ -248,7 +253,7 @@
 	hdr_size_in_bytes - (1*BytesPerWord)), t1_zero);
       movl(Address(obj, index, Address::times_8, 
 	hdr_size_in_bytes - (2*BytesPerWord)), t1_zero);
-      decl(index);
+      decrement(index);
       jcc(Assembler::notZero, loop);
     }
   }
--- a/hotspot/src/cpu/i486/vm/c1_Runtime1_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/c1_Runtime1_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_Runtime1_i486.cpp	1.190 07/05/05 17:04:12 JVM"
+#pragma ident "@(#)c1_Runtime1_i486.cpp	1.191 07/05/17 15:46:28 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1102,9 +1102,11 @@
 
         __ bind(register_finalizer);
         __ enter();
-        OopMap* map = save_live_registers(sasm, 2 /*num_rt_args */);
-
-        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), eax);
+        OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
+        int call_offset = __ call_RT(noreg, noreg,
+                                     CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), eax);
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, oop_map);
 
         // Now restore all the live registers
         restore_live_registers(sasm);
@@ -1157,23 +1159,6 @@
       }
       break;
 
-#ifdef JVMPI_SUPPORT
-    case jvmpi_unwind_exception_id:
-      { StubFrame f(sasm, "jvmpi_unwind_exception", dont_gc_arguments);
-
-        // notify the exit
-        f.load_argument(1, eax);
-        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), eax);
-
-        // grab the exception off the stack and leave the pushed frame.
-        f.load_argument(0, eax);
-        __ leave();
-
-        generate_unwind_exception(sasm);
-      }
-      break;
-#endif // JVMPI_SUPPORT
-
     case throw_array_store_exception_id:
       { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
         // tos + 0: link
@@ -1260,40 +1245,6 @@
       }
       break;
 
-#ifdef JVMPI_SUPPORT
-    case monitorenter_with_jvmpi_id:
-      {
-        StubFrame f(sasm, "monitorenter_with_jvmpi", dont_gc_arguments);
-        OopMap* map = save_live_registers(sasm, 3);
-
-        f.load_argument(2, ecx); // ecx: method
-        f.load_argument(1, eax); // eax: object
-        f.load_argument(0, ebx); // ebx: lock address
-
-        __ movl(Address(ebx, JavaThread::vm_result_offset()), eax);
-        int call_offset = __ call_RT(eax, noreg, CAST_FROM_FN_PTR(address, monitorenter), eax, ebx);
-
-        oop_maps = new OopMapSet();
-        oop_maps->add_gc_map(call_offset, map);
-
-	Label no_deopt;
-        // Get current instruction where will return to no to see if we must
-        // deopt
-        __ movl(ecx, Address(ebp, wordSize));
-        __ movl(ecx, Address(ecx, 0));
-	__ cmpl(ecx, Address(esp, 0));
-   	__ jcc(Assembler::equal, no_deopt);
-        // pass a dummy second argument to call_RT so the stack depths are all ok for the oopmaps.
-        call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, jvmpi_method_entry_after_deopt), eax, eax);
-        __ bind(no_deopt);
-
-        oop_maps->add_gc_map(call_offset, map->deep_copy());
-
-        restore_live_registers(sasm);
-      }
-      break;
-#endif // JVMPI_SUPPORT
-
     case monitorexit_nofpu_id:
       save_fpu_registers = false;
       // fall through
@@ -1343,51 +1294,6 @@
       }
       break;
 
-#ifdef JVMPI_SUPPORT
-    case jvmpi_method_entry_id:
-      { // eax: methodOop; ecx: receiver or null
-        StubFrame f(sasm, "jvmpi_method_entry", dont_gc_arguments);
-        OopMap* map = save_live_registers(sasm, 3, false);
-
-        f.load_argument(1, ecx); // ecx: methodOop
-        f.load_argument(0, eax); // eax: receiver
-        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_entry), eax, ecx);
-
-        oop_maps = new OopMapSet();
-        oop_maps->add_gc_map(call_offset, map);
-        restore_live_registers(sasm, false);
-      }
-      break;
-    case jvmpi_method_exit_id:
-      { // ecx: methodOop;
-        __ set_info("jvmpi_method_exit", dont_gc_arguments);
-        __ enter();
-        __ subl(esp, 6 * wordSize);
-        // preserve potential oop result in current thread
-        __ get_thread(ebx);
-        __ movl(Address(esp, 0), eax);
-        __ movl(Address(esp, 4), edx);
-        __ fstp_d(Address(esp, sizeof(double)));
-        if (UseSSE >= 2) {
-          __ movsd(Address(esp, 2 * sizeof(double)), xmm0);
-        } else if (UseSSE == 1) {
-          __ movss(Address(esp, 2 * sizeof(double)), xmm0);
-      }
-        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), ecx);
-        __ movl(eax, Address(esp, 0));
-        __ movl(edx, Address(esp, 4));
-        __ fld_d(Address(esp, sizeof(double)));
-        if (UseSSE >= 2) {
-          __ movsd(xmm0, Address(esp, 2 * sizeof(double)));
-        } else if (UseSSE == 1) {
-          __ movss(xmm0, Address(esp, 2 * sizeof(double)));
-        }
-        __ leave();
-        __ ret(0);
-      }
-      break;
-#endif // JVMPI_SUPPORT
-
     case dtrace_object_alloc_id:
       { // eax: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/cpu/i486/vm/i486.ad	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/i486.ad	Fri May 25 00:49:14 2007 +0000
@@ -1645,6 +1645,10 @@
     // to allow platform-specific tweaking on sparc.
     __ cmpl(Reax, Resi);
     __ jcc(Assembler::equal, hit);
+#ifndef PRODUCT
+    int* ps_counter = &SharedRuntime::_partial_subtype_ctr;
+    __ increment(Address((int) ps_counter, relocInfo::none));
+#endif //PRODUCT
     __ movl(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
     __ movl(Recx,Address(Redi,arrayOopDesc::length_offset_in_bytes()));
     __ addl(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT));
@@ -2840,23 +2844,18 @@
   // instructions set the EFLAGS directly. It becomes simpler than
   // the float version above.
   enc_class CmpX_Result(eRegI dst) %{
-    // jcc(Assembler::parity, nan);
-    emit_opcode( cbuf, 0x7A );
-    emit_d8    ( cbuf, 0x04 );
-    // jcc(Assembler::equal, exit);
-    emit_opcode( cbuf, 0x74 );
-    emit_d8    ( cbuf, 0x06 );
-    // jcc(Assembler::above, inc);
-    emit_opcode( cbuf, 0x77 );
-    emit_d8    ( cbuf, 0x03 );
-    // Must be below or NAN (unordered).  DEC to -1
-    emit_opcode(cbuf,0x48+$dst$$reg);
-    // JMP,s done
-    emit_opcode(cbuf,0xEB);
-    emit_d8(cbuf, 1 );
-    // inc:
-    emit_opcode(cbuf,0x40+$dst$$reg);
-    // exit:
+    MacroAssembler _masm(&cbuf);
+    Label nan, inc, done;
+
+    __ jccb(Assembler::parity, nan);
+    __ jccb(Assembler::equal,  done);
+    __ jccb(Assembler::above,  inc);
+    __ bind(nan);
+    __ decrement(as_Register($dst$$reg));
+    __ jmpb(done);
+    __ bind(inc);
+    __ increment(as_Register($dst$$reg));
+    __ bind(done);
   %}
 
   // Compare the longs and set flags
@@ -2874,39 +2873,6 @@
 // done:
   %}
 
-  // Compare the longs and set -1, 0, or 1 into dst
-  enc_class cmpl3_flag( eRegL src1, eRegL src2, eRegI dst ) %{
-    // XOR    $dst,$dst
-    emit_opcode(cbuf,0x33);
-    emit_rm(cbuf, 0x3, $dst$$reg,$dst$$reg);
-    // CMP    $src1.hi,$src2.hi
-    emit_opcode( cbuf, 0x3B );
-    emit_rm(cbuf, 0x3, HIGH_FROM_LOW($src1$$reg), HIGH_FROM_LOW($src2$$reg) );
-    // JLT,s  m_one
-    emit_opcode(cbuf,0x7C);
-    emit_d8(cbuf, 11 );
-    // JGT,s  p_one
-    emit_opcode(cbuf,0x7F);
-    emit_d8(cbuf, 6 );
-    // CMP    $src1.lo,$src2.lo
-    emit_opcode( cbuf, 0x3B );
-    emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
-    // JB,s  m_one
-    emit_opcode(cbuf,0x72);
-    emit_d8(cbuf, 5 );
-    // JEQ,s  done
-    emit_opcode(cbuf,0x74);
-    emit_d8(cbuf, 4 );
-// p_one:  INC $dst
-    emit_opcode(cbuf,0x40+$dst$$reg);
-    // JMP,s done
-    emit_opcode(cbuf,0xEB);
-    emit_d8(cbuf, 1 );
-// m_one: DEC $dst
-    emit_opcode(cbuf,0x48+$dst$$reg);
-// done:
-  %}
-
   enc_class convert_int_long( regL dst, eRegI src ) %{
     // mov $dst.lo,$src
     int dst_encoding = $dst$$reg;
@@ -3250,6 +3216,9 @@
     
     MacroAssembler masm(&cbuf);
 
+    if (_counters != NULL) {
+      masm.atomic_incl(Address((int) _counters->total_entry_count_addr(), relocInfo::none));
+    }
     if (EmitSync & 1) {
         // set box->dhw = unused_mark (3)
         // Force all sync thru slow-path: slow_enter() and slow_exit() 
@@ -3260,7 +3229,7 @@
         Label DONE_LABEL ;           
         if (UseBiasedLocking) {
            // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
-           masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL);
+           masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
         }
 
         masm.movl  (tmpReg, Address(objReg)) ;          // fetch markword 
@@ -3301,7 +3270,7 @@
       // at [FETCH], below, will never observe a biased encoding (*101b).
       // If this invariant is not held we risk exclusion (safety) failure.
       if (UseBiasedLocking) { 
-        masm.biased_locking_enter (boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL);
+        masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
       }
 
       masm.movl  (tmpReg, Address(objReg)) ;           // [FETCH]
@@ -3313,8 +3282,8 @@
       masm.movl  (Address(boxReg), tmpReg);              // Anticipate successful CAS
       if (os::is_MP()) { masm.lock();  }
       masm.cmpxchg(boxReg, Address(objReg));             // Updates tmpReg
-      if (PrintBiasedLockingStatistics) {
-        masm.cond_incl(Assembler::equal, Address((int) BiasedLocking::fast_path_entry_count_addr(), relocInfo::none));
+      if (_counters != NULL) {
+        masm.cond_incl(Assembler::equal, Address((int) _counters->fast_path_entry_count_addr(), relocInfo::none));
       }
       masm.jccb (Assembler::equal, DONE_LABEL);
 
@@ -3322,8 +3291,8 @@
       masm.subl(tmpReg, esp);
       masm.andl(tmpReg, 0xFFFFF003 );
       masm.movl(Address(boxReg), tmpReg);
-      if (PrintBiasedLockingStatistics) {
-        masm.cond_incl(Assembler::equal, Address((int) BiasedLocking::fast_path_entry_count_addr(), relocInfo::none));
+      if (_counters != NULL) {
+        masm.cond_incl(Assembler::equal, Address((int) _counters->fast_path_entry_count_addr(), relocInfo::none));
       }
       masm.jmp  (DONE_LABEL) ;
 
@@ -3753,7 +3722,7 @@
     // Compare first characters
     masm.subl(ecx, edi);
     masm.jcc(Assembler::notZero,  POP_LABEL);
-    masm.decl(esi);
+    masm.decrement(esi);
     masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
 
     {
@@ -3782,7 +3751,7 @@
     masm.load_unsigned_word(edi, Address(eax, esi, Address::times_2, 0));
     masm.subl(ecx, edi);
     masm.jcc(Assembler::notZero, POP_LABEL);
-    masm.incl(esi);
+    masm.increment(esi);
     masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
     
     // Strings are equal up to min length.  Return the length difference.
@@ -7478,6 +7447,7 @@
 %}
 
 instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
+  predicate(UseIncDec);
   match(Set dst (AddI dst src));
   effect(KILL cr);
 
@@ -7509,6 +7479,7 @@
 %}
 
 instruct decI_eReg(eRegI dst, immI_M1 src, eFlagsReg cr) %{
+  predicate(UseIncDec);
   match(Set dst (AddI dst src));
   effect(KILL cr);
 
@@ -12007,8 +11978,22 @@
             "JMP,s  done\n"
     "m_one:\tDEC    $dst\n"
      "done:" %}
-  opcode(0x3B, 0x1B);
-  ins_encode( cmpl3_flag(src1,src2,dst) );
+  ins_encode %{
+    Label p_one, m_one, done;
+    __ xorl($dst$$Register, $dst$$Register);
+    __ cmpl(HIGH_FROM_LOW($src1$$Register), HIGH_FROM_LOW($src2$$Register));
+    __ jccb(Assembler::less,    m_one);
+    __ jccb(Assembler::greater, p_one);
+    __ cmpl($src1$$Register, $src2$$Register);
+    __ jccb(Assembler::below,   m_one);
+    __ jccb(Assembler::equal,   done);
+    __ bind(p_one);
+    __ increment($dst$$Register);
+    __ jmpb(done);
+    __ bind(m_one);
+    __ decrement($dst$$Register);
+    __ bind(done);
+  %}
   ins_pipe( pipe_slow );
 %}
 
--- a/hotspot/src/cpu/i486/vm/interp_masm_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/interp_masm_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)interp_masm_i486.cpp	1.164 07/05/05 17:04:14 JVM"
+#pragma ident "@(#)interp_masm_i486.cpp	1.165 07/05/17 15:46:48 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -794,11 +794,7 @@
 
   bind(no_unlock);
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi support
-#else // !JVMPI_SUPPORT
   // jvmti support
-#endif // JVMPI_SUPPORT
   if (notify_jvmdi) {
     notify_method_exit(state, NotifyJVMTI);     // preserve TOSCA
   } else {
@@ -1485,30 +1481,6 @@
     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
     bind(L);
   }
-#ifdef JVMPI_SUPPORT
-  Label E;
-  Label S;
-  cmpl(Address((int)jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ), relocInfo::none), (int)JVMPI_EVENT_ENABLED);
-  jcc(Assembler::equal, S);
-  cmpl(Address((int)jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2), relocInfo::none), (int)JVMPI_EVENT_ENABLED);
-  jcc(Assembler::notEqual, E);
-  bind(S);
-  // notify method entry
-  { get_method(ebx);
-    // get receiver
-    { Label L;
-      xorl(ecx, ecx);             // receiver = NULL for a static method
-      movl(eax, Address(ebx, methodOopDesc::access_flags_offset()));
-      testl(eax, JVM_ACC_STATIC); // check if method is static
-      jcc(Assembler::notZero, L); // if static we're done
-      movl(ecx, Address(edi, Interpreter::local_offset_in_bytes(0)));// otherwise get receiver
-      bind(L);
-    }
-    call_VM(noreg, CAST_FROM_FN_PTR(address, 
-          SharedRuntime::jvmpi_method_entry), ebx, ecx);
-  }
-  bind(E);
-#endif // JVMPI_SUPPORT
 
   {
     SkipIfEqual skip_if(this, &DTraceMethodProbes, 0);
@@ -1541,19 +1513,6 @@
     pop(state);     
   }
 
-#ifdef JVMPI_SUPPORT
-  Label E;
-  cmpl(Address((int)jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT), relocInfo::none), (int)JVMPI_EVENT_ENABLED);
-  jcc(Assembler::notEqual, E);
-  // notify method exit
-  push(state);
-  get_method(ebx);
-  call_VM(noreg, 
-          CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), ebx);
-  pop(state);
-  bind(E);
-#endif // JVMPI_SUPPORT
-
   {
     SkipIfEqual skip_if(this, &DTraceMethodProbes, 0);
     push(state);
--- a/hotspot/src/cpu/i486/vm/interp_masm_i486.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/interp_masm_i486.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)interp_masm_i486.hpp	1.85 07/05/05 17:04:16 JVM"
+#pragma ident "@(#)interp_masm_i486.hpp	1.86 07/05/17 15:46:51 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -207,11 +207,7 @@
     
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
 
-#ifdef JVMPI_SUPPORT
-  // support for jvmpi/jvmti
-#else // !JVMPI_SUPPORT
   // support for jvmti
-#endif // JVMPI_SUPPORT
   void notify_method_entry();
   void notify_method_exit(TosState state, NotifyMethodExitMode mode);
   
--- a/hotspot/src/cpu/i486/vm/interpreter_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/interpreter_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)interpreter_i486.cpp	1.370 07/05/05 17:04:15 JVM"
+#pragma ident "@(#)interpreter_i486.cpp	1.371 07/05/17 15:46:56 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -337,7 +337,7 @@
   const Address backedge_counter  (ebx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
 
   if (ProfileInterpreter) { // %%% Merge this into methodDataOop
-    __ incl(Address(ebx,methodOopDesc::interpreter_invocation_counter_offset()));
+    __ increment(Address(ebx,methodOopDesc::interpreter_invocation_counter_offset()));
   }
   // Update standard invocation counters
   __ movl(eax, backedge_counter);              	// load backedge counter
@@ -987,11 +987,7 @@
   }
 #endif
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi/dtrace support
-#else // !JVMPI_SUPPORT
   // jvmti/dtrace support
-#endif // JVMPI_SUPPORT
   __ notify_method_entry();
 
   // work registers
@@ -1261,11 +1257,7 @@
     __ bind(L);
   }    
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi/dtrace support
-#else // !JVMPI_SUPPORT
   // jvmti/dtrace support
-#endif // JVMPI_SUPPORT
   // Note: This must happen _after_ handling/throwing any exceptions since
   //       the exception handler code notifies the runtime of method exits
   //       too. If this happens before, method entry/exit notifications are
@@ -1341,7 +1333,7 @@
     __ bind(loop);
     if (TaggedStackInterpreter) __ pushl(NULL_WORD);  // push tag
     __ pushl(NULL_WORD);                              // initialize local variables
-    __ decl(edx);                                     // until everything initialized
+    __ decrement(edx);                                // until everything initialized
     __ jcc(Assembler::greater, loop);
     __ bind(exit);
   }
@@ -1431,11 +1423,7 @@
   }
 #endif
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi support
-#else // !JVMPI_SUPPORT
   // jvmti support
-#endif // JVMPI_SUPPORT
   __ notify_method_entry();
  
   __ dispatch_next(vtos);
@@ -1904,10 +1892,8 @@
 
 //------------------------------------------------------------------------------------------------------------------------
 
-#ifdef JVMPI_SUPPORT
-// when JVM/PI is retired this method can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+// Non-product code
+#ifndef PRODUCT
 address AbstractInterpreterGenerator::generate_trace_code(TosState state) {
   address entry = __ pc();
 
@@ -1925,18 +1911,15 @@
 
   return entry;
 }
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 
 
-// Non-product code
-#ifndef PRODUCT
 void AbstractInterpreterGenerator::count_bytecode() { 
-  __ incl(Address((int)&BytecodeCounter::_counter_value, relocInfo::none)); 
+  __ increment(Address((int)&BytecodeCounter::_counter_value, relocInfo::none)); 
 }
 
 
 void AbstractInterpreterGenerator::histogram_bytecode(Template* t) { 
-  __ incl(Address((int)&BytecodeHistogram::_counters[t->bytecode()], relocInfo::none));
+  __ increment(Address((int)&BytecodeHistogram::_counters[t->bytecode()], relocInfo::none));
 }
 
 
@@ -1945,15 +1928,10 @@
   __ shrl(ebx, BytecodePairHistogram::log2_number_of_codes);
   __ orl(ebx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
   __ movl(Address((int)&BytecodePairHistogram::_index, relocInfo::none), ebx);  
-  __ incl(Address(noreg, ebx, Address::times_4, (int)BytecodePairHistogram::_counters));
+  __ increment(Address(noreg, ebx, Address::times_4, (int)BytecodePairHistogram::_counters));
 }
-#endif // !PRODUCT
 
 
-#ifdef JVMPI_SUPPORT
-// when JVM/PI is retired this method can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 void AbstractInterpreterGenerator::trace_bytecode(Template* t) {
   // Call a little run-time stub to avoid blow-up for each bytecode.
   // The run-time runtime saves the right registers, depending on
@@ -1962,11 +1940,8 @@
   assert(entry != NULL, "entry must have been generated");
   __ call(entry, relocInfo::none);
 }
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 
 
-// Non-product code
-#ifndef PRODUCT
 void AbstractInterpreterGenerator::stop_interpreter_at() {
   Label L;
   __ cmpl(Address(int(&BytecodeCounter::_counter_value), relocInfo::none), StopInterpreterAt);
--- a/hotspot/src/cpu/i486/vm/sharedRuntime_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/sharedRuntime_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)sharedRuntime_i486.cpp	1.49 07/05/05 17:04:20 JVM"
+#pragma ident "@(#)sharedRuntime_i486.cpp	1.50 07/05/17 15:47:00 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1171,13 +1171,6 @@
   bool is_static = false;
   int oop_temp_slot_offset = 0;
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    oop_temp_slot_offset = stack_slots;
-    stack_slots += VMRegImpl::slots_per_word;
-  }
-#endif // JVMPI_SUPPORT
-
   if (method->is_static()) {
     klass_slot_offset = stack_slots;
     stack_slots += VMRegImpl::slots_per_word;
@@ -1207,10 +1200,6 @@
   //      |---------------------| <- lock_slot_offset  (-lock_slot_ebp_offset)
   //      | klass (if static)   |
   //      |---------------------| <- klass_slot_offset
-#ifdef JVMPI_SUPPORT
-  //      | oop_temp (jvmpi)    |
-  //      |---------------------| <- oop_temp_slot_offset
-#endif // JVMPI_SUPPORT
   //      | oopHandle area      |
   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
   //      | outbound memory     |
@@ -1266,7 +1255,7 @@
   int vep_offset = ((intptr_t)__ pc()) - start;
 
 #ifdef COMPILER1
-  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hash) {
+  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
     // Object.hashCode can pull the hashCode from the header word
     // instead of doing a full VM transition once it's been computed.
     // Since hashCode is usually polymorphic at call sites we can't do
@@ -1354,11 +1343,7 @@
 
   //
   // We immediately shuffle the arguments so that any vm call we have to
-#ifdef JVMPI_SUPPORT
-  // make from here on out (sync slow path, jvmpi, etc.) we will have
-#else // !JVMPI_SUPPORT
   // make from here on out (sync slow path, jvmti, etc.) we will have
-#endif // JVMPI_SUPPORT
   // captured the oops from our caller and have a valid oopMap for
   // them.
 
@@ -1446,13 +1431,6 @@
     __ movl(Address(esp, wordSize), oop_handle_reg);
   }
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    __ movl(Address(esp, oop_temp_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
-    map->set_oop(VMRegImpl::stack2reg(oop_temp_slot_offset));
-  }
-#endif // JVMPI_SUPPORT
-
   // Change state to native (we save the return address in the thread, since it might not
   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
   // points into the right code segment. It does not have to be the correct return pc.
@@ -1467,30 +1445,6 @@
   // We have all of the arguments setup at this point. We must not touch any register
   // argument registers at this point (what if we save/restore them there are no oop?
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi support
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY) ||
-      jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY2)) {
-    
-    // push the receiver
-    if (method()->is_static()) {
-      __ pushl((int) NULL_WORD);
-    } else {
-      __ pushl(Address(esp, receiver_offset));
-    }
-
-    __ movl(eax, JNIHandles::make_local(method()));
-    __ pushl(eax);
-
-    __ pushl(thread);
-    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_entry), relocInfo::runtime_call_type);
-    __ addl(esp, 3*wordSize);
-    // Any exception pending?
-    __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
-    __ jcc(Assembler::notEqual, exception_pending);
-  }
-#endif // JVMPI_SUPPORT
-
   { 
     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
     __ movl(eax, JNIHandles::make_local(method()));
@@ -1724,29 +1678,6 @@
 
   }
 
-#ifdef JVMPI_SUPPORT
-  // Tell jvmpi about this method exit
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    save_native_result(masm, ret_type, stack_slots);
-    // Save any pending exception and clear it from the thread
-    __ movl(eax, Address(thread, in_bytes(Thread::pending_exception_offset())));
-    __ movl(Address(ebp, oop_temp_slot_ebp_offset), eax);
-    __ movl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
-    __ pushl(JNIHandles::make_local(method()));
-    __ pushl(thread);
-    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), relocInfo::runtime_call_type);
-    __ addl(esp, 2*wordSize);
-    Label L;
-    // If we had a pending exception before jvmpi call it takes precedence
-    __ movl(eax, Address(ebp, oop_temp_slot_ebp_offset));
-    __ testl(eax, eax);
-    __ jcc(Assembler::equal, L);
-    __ movl(Address(thread, in_bytes(Thread::pending_exception_offset())), eax);
-    __ bind(L);
-    restore_native_result(masm, ret_type, stack_slots);
-  }
-#endif // JVMPI_SUPPORT
-
   { 
     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
     // Tell dtrace about this method exit
@@ -2176,7 +2107,7 @@
   __ movl(sp_temp, esp);                // pass to next frame
   __ addl(esi, 4);                      // Bump array pointer (sizes)
   __ addl(ecx, 4);                      // Bump array pointer (pcs)
-  __ decl(counter);                     // decrement counter
+  __ decrement(counter);                // decrement counter
   __ jcc(Assembler::notZero, loop);
   __ pushl(Address(ecx));               // save final return address
 
@@ -2378,7 +2309,7 @@
   __ movl(sp_temp, esp);                // pass to next frame
   __ addl(esi, 4);                      // Bump array pointer (sizes)
   __ addl(ecx, 4);                      // Bump array pointer (pcs)
-  __ decl(counter);                     // decrement counter
+  __ decrement(counter);                // decrement counter
   __ jcc(Assembler::notZero, loop);
   __ pushl(Address(ecx));               // save final return address
 
--- a/hotspot/src/cpu/i486/vm/stubGenerator_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/stubGenerator_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)stubGenerator_i486.cpp	1.84 07/05/05 17:04:20 JVM"
+#pragma ident "@(#)stubGenerator_i486.cpp	1.85 07/05/17 15:47:05 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -34,6 +34,14 @@
 
 #define __ _masm->
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
 const int MXCSR_MASK  = 0xFFC0;  // Mask out any pending exceptions
 const int FPU_CNTRL_WRD_MASK = 0xFFFF;
 
@@ -58,6 +66,31 @@
 class StubGenerator: public StubCodeGenerator {
  private:
 
+#ifdef PRODUCT
+#define inc_counter_np(counter) (0)
+#else
+  void inc_counter_np_(int& counter) {
+    Address counter_addr((int) &counter, relocInfo::none);
+    __ increment(counter_addr);
+  }
+#define inc_counter_np(counter) \
+  BLOCK_COMMENT("inc_counter " #counter); \
+  inc_counter_np_(counter);
+#endif //PRODUCT
+
+  void inc_copy_counter_np(BasicType t) {
+#ifndef PRODUCT
+    switch (t) {
+    case T_BYTE:    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return;
+    case T_SHORT:   inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return;
+    case T_INT:     inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return;
+    case T_LONG:    inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return;
+    case T_OBJECT:  inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return;
+    }
+    ShouldNotReachHere();
+#endif //PRODUCT
+  }
+
   //------------------------------------------------------------------------------------------------------------------------
   // Call stubs are used to call Java from C
   //
@@ -142,6 +175,7 @@
 #endif
 
     // pass parameters if any
+    BLOCK_COMMENT("pass parameters if any");
     Label parameters_done;
     __ movl(ecx, parameter_size);  // parameter counter
     __ testl(ecx, ecx);
@@ -158,7 +192,7 @@
     __ movl(edx, parameters);          // parameter pointer
     __ xorl(ebx, ebx);
 
-    __ bind(loop);
+    __ BIND(loop);
     if (TaggedStackInterpreter) {
       __ movl(eax, Address(edx, ecx, Interpreter::stackElementScale(),
                       -2*wordSize));                          // get tag
@@ -170,21 +204,24 @@
     __ movl(eax, Address(edx, ecx, Interpreter::stackElementScale(), -wordSize));
     __ movl(Address(esp, ebx, Interpreter::stackElementScale(), 
                     Interpreter::expr_offset_in_bytes(0)), eax);          // store parameter
-    __ incl(ebx);
-    __ decl(ecx); 
+    __ increment(ebx);
+    __ decrement(ecx); 
     __ jcc(Assembler::notZero, loop);
 
     // call Java function
-    __ bind(parameters_done);
+    __ BIND(parameters_done);
     __ movl(ebx, method);              // get methodOop
     __ movl(eax, entry_point);         // get entry_point
     __ movl(esi, esp);                 // set sender sp
+    BLOCK_COMMENT("call Java function");
     __ call(eax, relocInfo::none);
+
+    BLOCK_COMMENT("call_stub_return_address:");
     return_address = __ pc();
 
     Label common_return;
 
-    __ bind(common_return);
+    __ BIND(common_return);
 
     // store result depending on type
     // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
@@ -200,7 +237,7 @@
 
     // handle T_INT case
     __ movl(Address(edi), eax);
-    __ bind(exit);
+    __ BIND(exit);
 
     // check that FPU stack is empty
     __ verify_FPU(0, "generate_call_stub");
@@ -224,12 +261,12 @@
     __ ret(0);
 
     // handle return types different from T_INT
-    __ bind(is_long);
+    __ BIND(is_long);
     __ movl(Address(edi, 0 * wordSize), eax);
     __ movl(Address(edi, 1 * wordSize), edx);
     __ jmp(exit);
 
-    __ bind(is_float);
+    __ BIND(is_float);
     // interpreter uses xmm0 for return values
     if (UseSSE >= 1) {
       __ movss(Address(edi), xmm0);
@@ -238,7 +275,7 @@
     }
     __ jmp(exit);
 
-    __ bind(is_double);
+    __ BIND(is_double);
     // interpreter uses xmm0 for return values
     if (UseSSE >= 2) {
       __ movsd(Address(edi), xmm0);
@@ -253,6 +290,7 @@
     // stack. compiled code will be set to return here instead of the
     // return above that handles interpreter returns.
 
+    BLOCK_COMMENT("call_stub_compiled_return:");
     StubRoutines::i486::set_call_stub_compiled_return( __ pc());
 
 #ifdef COMPILER2
@@ -359,6 +397,7 @@
 
     // compute exception handler into ebx
     __ movl(eax, Address(esp));
+    BLOCK_COMMENT("call exception_handler_for_return_address");
     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), eax);
     __ movl(ebx, eax);
 
@@ -502,7 +541,7 @@
   // Input:  FPU TOS: float value
   // Output: eax (edx): integer (long) result
 
-  address generate_d2i_wrapper( address fcn ) {
+  address generate_d2i_wrapper(BasicType t, address fcn) {
     StubCodeMark mark(this, "StubRoutines", "d2i_wrapper");
     address start = __ pc();
 
@@ -542,6 +581,10 @@
     // Prepare FPU for doing math in C-land
     __ empty_FPU_stack();
     // Call the C code to massage the double.  Result in EAX
+    if (t == T_INT)
+      { BLOCK_COMMENT("SharedRuntime::d2i"); }
+    else if (t == T_LONG)
+      { BLOCK_COMMENT("SharedRuntime::d2l"); }
     __ call_VM_leaf( fcn, 2 );
 
     // Restore CPU & FPU state
@@ -570,6 +613,7 @@
     __ pushl(0);                      // hole for return address-to-be
     __ pushad();                      // push registers
     Address next_pc(esp, RegisterImpl::number_of_registers * BytesPerWord);
+    BLOCK_COMMENT("call handle_unsafe_access");
     __ call(CAST_FROM_FN_PTR(address, handle_unsafe_access), relocInfo::runtime_call_type);
     __ movl(next_pc, eax);            // stuff next address 
     __ popad();
@@ -597,7 +641,7 @@
     
     Label exit, error;
     __ pushfd();
-    __ incl(Address((int)StubRoutines::verify_oop_count_addr(), relocInfo::none));
+    __ increment(Address((int)StubRoutines::verify_oop_count_addr(), relocInfo::none));
     __ pushl(edx);                               // save edx
     // make sure object is 'reasonable'
     __ movl(eax, Address(esp, 4 * wordSize));    // get object
@@ -649,6 +693,7 @@
     __ popl(edx);                                // get saved edx back
     __ popfd();                                  // get saved EFLAGS off stack -- will be ignored
     __ pushad();                                 // push registers (eip = return address & msg are already pushed)
+    BLOCK_COMMENT("call MacroAssembler::debug");
     __ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
     __ popad();
     __ ret(3 * wordSize);                        // pop arguments
@@ -659,29 +704,28 @@
   //  Generate store check for array
   //
   //  Input:
-  //     %edi    -  starting address
-  //     %ecx    -  element count
+  //     start   -  starting address
+  //     end     -  element count
   //
   //  The 2 input registers are overwritten
   //
-  void array_store_check() {
+  void array_store_check(Register start, Register end) {
     BarrierSet* bs = Universe::heap()->barrier_set();
     assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
     CardTableModRefBS* ct = (CardTableModRefBS*)bs;
     assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
     Label L_loop;
-    const Register start = edi;  // starting array address
-    const Register count = ecx;  // elements count
-    const Register end   = ecx;  // start+count-1
+    assert_different_registers(start, end);
+    const Register count = end;  // elements count; end == start+count-1
 
     __ leal(end,   Address(start, count, Address::times_4, -4));
     __ shrl(start, CardTableModRefBS::card_shift);
     __ shrl(end,   CardTableModRefBS::card_shift);
     __ subl(end, start); // end --> count
-  __ bind(L_loop);
+  __ BIND(L_loop);
     __ movb(Address(start, count, Address::times_1, (int)ct->byte_map_base), 0);
-    __ decl(count);
+    __ decrement(count);
     __ jcc(Assembler::greaterEqual, L_loop);
   }
 
@@ -697,7 +741,7 @@
     // Copy 64-byte chunks
     __ jmpb(L_copy_64_bytes);
     __ align(16);
-  __ bind(L_copy_64_bytes_loop);
+  __ BIND(L_copy_64_bytes_loop);
     __ movq(mmx0, Address(from, 0));
     __ movq(mmx1, Address(from, 8));
     __ movq(mmx2, Address(from, 16));
@@ -715,7 +759,7 @@
     __ movq(Address(from, to_from, Address::times_1, 48), mmx6);
     __ movq(Address(from, to_from, Address::times_1, 56), mmx7);
     __ addl(from, 64);
-  __ bind(L_copy_64_bytes);
+  __ BIND(L_copy_64_bytes);
     __ subl(qword_count, 8);
     __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
     __ addl(qword_count, 8);
@@ -723,13 +767,13 @@
     //
     // length is too short, just copy qwords
     //
-  __ bind(L_copy_8_bytes);
+  __ BIND(L_copy_8_bytes);
     __ movq(mmx0, Address(from));
     __ movq(Address(from, to_from, Address::times_1), mmx0);
     __ addl(from, 8);
-    __ decl(qword_count);
+    __ decrement(qword_count);
     __ jcc(Assembler::greater, L_copy_8_bytes);
-  __ bind(L_exit);
+  __ BIND(L_exit);
     __ emms();
   }
 
@@ -764,10 +808,11 @@
     }
 
     *entry = __ pc(); // Entry point from conjoint arraycopy stub.
+    BLOCK_COMMENT("Entry:");
 
     __ subl(to, from); // to --> to_from
     __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
-    __ jcc(Assembler::less, L_copy_4_bytes);
+    __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
       // align source address at 4 bytes address boundary
       if (t == T_BYTE) {
@@ -776,9 +821,9 @@
         __ jccb(Assembler::zero, L_skip_align1);
         __ movb(eax, Address(from));
         __ movb(Address(from, to_from, Address::times_1, 0), eax);
-        __ incl(from);
-        __ decl(count);
-      __ bind(L_skip_align1);
+        __ increment(from);
+        __ decrement(count);
+      __ BIND(L_skip_align1);
       }
       // Two bytes misalignment happens only for byte and short (char) arrays
       __ testl(from, 2);
@@ -787,7 +832,7 @@
       __ movw(Address(from, to_from, Address::times_1, 0), eax);
       __ addl(from, 2);
       __ subl(count, 1<<(shift-1));
-    __ bind(L_skip_align2);
+    __ BIND(L_skip_align2);
     }
     if (!VM_Version::supports_mmx()) {
       __ movl(eax, count);     // save 'count'
@@ -805,7 +850,7 @@
       __ movl(Address(from, to_from, Address::times_1, 0), eax);
       __ addl(from, 4);
       __ subl(count, 1<<shift);
-    __ bind(L_copy_64_bytes);
+    __ BIND(L_copy_64_bytes);
       __ movl(eax, count);
       __ shrl(eax, shift+1);  // 8 bytes chunk count
       //
@@ -814,14 +859,14 @@
       mmx_copy_forward(from, to_from, eax);
     }
     // copy tailing dword
-  __ bind(L_copy_4_bytes);
+  __ BIND(L_copy_4_bytes);
     __ testl(count, 1<<shift);
     __ jccb(Assembler::zero, L_copy_2_bytes);
     __ movl(eax, Address(from));
     __ movl(Address(from, to_from, Address::times_1, 0), eax);
     if (t == T_BYTE || t == T_SHORT) {
       __ addl(from, 4);
-    __ bind(L_copy_2_bytes);
+    __ BIND(L_copy_2_bytes);
       // copy tailing word
       __ testl(count, 1<<(shift-1));
       __ jccb(Assembler::zero, L_copy_byte);
@@ -829,26 +874,27 @@
       __ movw(Address(from, to_from, Address::times_1, 0), eax);
       if (t == T_BYTE) {
         __ addl(from, 2);
-      __ bind(L_copy_byte);
+      __ BIND(L_copy_byte);
         // copy tailing byte
         __ testl(count, 1);
         __ jccb(Assembler::zero, L_exit);
         __ movb(eax, Address(from));
         __ movb(Address(from, to_from, Address::times_1, 0), eax);
-      __ bind(L_exit);
+      __ BIND(L_exit);
       } else {
-      __ bind(L_copy_byte);
+      __ BIND(L_copy_byte);
       }
     } else {
-    __ bind(L_copy_2_bytes);
+    __ BIND(L_copy_2_bytes);
     }
 
     if (t == T_OBJECT) {
       __ movl(count, Address(esp, 12+12)); // reread 'count'
       __ movl(to, saved_to); // restore 'to'
-      array_store_check();
-    __ bind(L_0_count);
+      array_store_check(to, count);
+    __ BIND(L_0_count);
     }
+    inc_copy_counter_np(t);
     __ popl(edi);
     __ popl(esi);
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -887,6 +933,7 @@
 
     if (entry != NULL) {
       *entry = __ pc(); // Entry point from generic arraycopy stub.
+      BLOCK_COMMENT("Entry:");
     }
 
     if (t == T_OBJECT) {
@@ -905,7 +952,7 @@
 
     // copy from high to low
     __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
-    __ jcc(Assembler::less, L_copy_4_bytes);
+    __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
     if (t == T_BYTE || t == T_SHORT) {
       // Align the end of destination array at 4 bytes address boundary
       __ leal(end, Address(dst, count, sf, 0));
@@ -913,10 +960,10 @@
         // One byte misalignment happens only for byte arrays
         __ testl(end, 1);
         __ jccb(Assembler::zero, L_skip_align1);
-        __ decl(count);
+        __ decrement(count);
         __ movb(edx, Address(from, count, sf, 0));
         __ movb(Address(to, count, sf, 0), edx);
-      __ bind(L_skip_align1);
+      __ BIND(L_skip_align1);
       }
       // Two bytes misalignment happens only for byte and short (char) arrays
       __ testl(end, 2);
@@ -924,9 +971,9 @@
       __ subl(count, 1<<(shift-1));
       __ movw(edx, Address(from, count, sf, 0));
       __ movw(Address(to, count, sf, 0), edx);
-    __ bind(L_skip_align2);
+    __ BIND(L_skip_align2);
       __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
-      __ jcc(Assembler::less, L_copy_4_bytes);
+      __ jcc(Assembler::below, L_copy_4_bytes);
     }
 
     if (!VM_Version::supports_mmx()) {
@@ -954,16 +1001,16 @@
 
       __ align(16);
       // Move 8 bytes
-    __ bind(L_copy_8_bytes_loop);
+    __ BIND(L_copy_8_bytes_loop);
       __ movq(mmx0, Address(from, count, sf, 0));
       __ movq(Address(to, count, sf, 0), mmx0);
-    __ bind(L_copy_8_bytes);
+    __ BIND(L_copy_8_bytes);
       __ subl(count, 2<<shift);
       __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
       __ addl(count, 2<<shift);
       __ emms();
     }
-  __ bind(L_copy_4_bytes);
+  __ BIND(L_copy_4_bytes);
     // copy prefix qword
     __ testl(count, 1<<shift);
     __ jccb(Assembler::zero, L_copy_2_bytes);
@@ -973,7 +1020,7 @@
     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
       if (t == T_BYTE || t == T_SHORT) {
         __ subl(count, (1<<shift));
-      __ bind(L_copy_2_bytes);
+      __ BIND(L_copy_2_bytes);
         // copy prefix dword
         __ testl(count, 1<<(shift-1));
         __ jccb(Assembler::zero, L_copy_byte);
@@ -981,25 +1028,26 @@
         __ movw(Address(to, count, sf, -2), edx);
         if (t == T_BYTE) {
           __ subl(count, 1<<(shift-1));
-        __ bind(L_copy_byte);
+        __ BIND(L_copy_byte);
           // copy prefix byte
           __ testl(count, 1);
           __ jccb(Assembler::zero, L_exit);
           __ movb(edx, Address(from));
           __ movb(Address(to), edx);
-        __ bind(L_exit);
+        __ BIND(L_exit);
         } else {
-        __ bind(L_copy_byte);
+        __ BIND(L_copy_byte);
         }
       }
     } else {
-    __ bind(L_copy_2_bytes);
+    __ BIND(L_copy_2_bytes);
     }
     if (t == T_OBJECT) {
       __ movl(count, Address(esp, 12+12)); // reread count
-      array_store_check();
-    __ bind(L_0_count);
+      array_store_check(to, count);
+    __ BIND(L_0_count);
     }
+    inc_copy_counter_np(t);
     __ popl(edi);
     __ popl(esi);
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1026,6 +1074,7 @@
     __ movl(count, Address(esp, 8+8));       // count
 
     *entry = __ pc(); // Entry point from conjoint arraycopy stub.
+    BLOCK_COMMENT("Entry:");
 
     __ subl(to, from); // to --> to_from
     if (VM_Version::supports_mmx()) {
@@ -1033,14 +1082,15 @@
     } else {
       __ jmpb(L_copy_8_bytes);
       __ align(16);
-    __ bind(L_copy_8_bytes_loop);
+    __ BIND(L_copy_8_bytes_loop);
       __ fild_d(Address(from));
       __ fistp_d(Address(from, to_from, Address::times_1));
       __ addl(from, 8);
-    __ bind(L_copy_8_bytes);
-      __ decl(count);
+    __ BIND(L_copy_8_bytes);
+      __ decrement(count);
       __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
     }
+    inc_copy_counter_np(T_LONG);
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ xorl(eax, eax); // return 0
     __ ret(0);
@@ -1065,6 +1115,7 @@
     __ movl(count, Address(esp, 8+8));       // count
 
     *entry = __ pc(); // Entry point from generic arraycopy stub.
+    BLOCK_COMMENT("Entry:");
 
     // arrays overlap test
     __ cmpl(to, from);
@@ -1077,7 +1128,7 @@
     __ jmpb(L_copy_8_bytes);
 
     __ align(16);
-  __ bind(L_copy_8_bytes_loop);
+  __ BIND(L_copy_8_bytes_loop);
     if (VM_Version::supports_mmx()) {
       __ movq(mmx0, Address(from, count, Address::times_8));
       __ movq(Address(to, count, Address::times_8), mmx0);
@@ -1085,19 +1136,354 @@
       __ fild_d(Address(from, count, Address::times_8));
       __ fistp_d(Address(to, count, Address::times_8));
     }
-  __ bind(L_copy_8_bytes);
-    __ decl(count);
+  __ BIND(L_copy_8_bytes);
+    __ decrement(count);
     __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
 
     if (VM_Version::supports_mmx()) {
       __ emms();
     }
+    inc_copy_counter_np(T_LONG);
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ xorl(eax, eax); // return 0
     __ ret(0);
     return start;
   }
 
+
+  // Helper for generating a dynamic type check.
+  // The sub_klass must be one of {ebx, edx, esi}.
+  // The temp is killed.
+  void generate_type_check(Register sub_klass,
+                           Address& super_check_offset_addr,
+                           Address& super_klass_addr,
+                           Register temp,
+                           Label* L_success_ptr, Label* L_failure_ptr) {
+    BLOCK_COMMENT("type_check:");
+
+    Label L_fallthrough;
+    bool fall_through_on_success = (L_success_ptr == NULL);
+    if (fall_through_on_success) {
+      L_success_ptr = &L_fallthrough;
+    } else {
+      L_failure_ptr = &L_fallthrough;
+    }
+    Label& L_success = *L_success_ptr;
+    Label& L_failure = *L_failure_ptr;
+
+    assert_different_registers(sub_klass, temp);
+
+    // a couple of useful fields in sub_klass:
+    int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
+                     Klass::secondary_supers_offset_in_bytes());
+    int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
+                     Klass::secondary_super_cache_offset_in_bytes());
+    Address secondary_supers_addr(sub_klass, ss_offset);
+    Address super_cache_addr(     sub_klass, sc_offset);
+
+    // if the pointers are equal, we are done (e.g., String[] elements)
+    __ cmpl(sub_klass, super_klass_addr);
+    __ jcc(Assembler::equal, L_success);
+
+    // check the supertype display:
+    __ movl(temp, super_check_offset_addr);
+    Address super_check_addr(sub_klass, temp, Address::times_1, 0);
+    __ movl(temp, super_check_addr); // load displayed supertype
+    __ cmpl(temp, super_klass_addr); // test the super type
+    __ jcc(Assembler::equal, L_success);
+
+    // if it was a primary super, we can just fail immediately
+    __ cmpl(super_check_offset_addr, sc_offset);
+    __ jcc(Assembler::notEqual, L_failure);
+
+    // Now do a linear scan of the secondary super-klass chain.
+    // This code is rarely used, so simplicity is a virtue here.
+    inc_counter_np(SharedRuntime::_partial_subtype_ctr);
+    {
+      // The repne_scan instruction uses fixed registers, which we must spill.
+      // (We need a couple more temps in any case.)
+      __ pushl(eax);
+      __ pushl(ecx);
+      __ pushl(edi);
+      assert_different_registers(sub_klass, eax, ecx, edi);
+
+      __ movl(edi, secondary_supers_addr);
+      // Load the array length.
+      __ movl(ecx, Address(edi, arrayOopDesc::length_offset_in_bytes())); 
+      // Skip to start of data.
+      __ addl(edi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+      // Scan ecx words at [edi] for occurance of eax
+      // Set NZ/Z based on last compare
+      __ movl(eax, super_klass_addr);
+      __ repne_scan();
+
+      // Unspill the temp. registers:
+      __ popl(edi);
+      __ popl(ecx);
+      __ popl(eax);
+    }
+    __ jcc(Assembler::notEqual, L_failure);
+
+    // Success.  Cache the super we found and proceed in triumph.
+    __ movl(temp, super_klass_addr); // note: eax is dead
+    __ movl(super_cache_addr, temp);
+
+    if (!fall_through_on_success)
+      __ jmp(L_success);
+
+    // Fall through on failure!
+    __ bind(L_fallthrough);
+  }
+
+  //
+  //  Generate checkcasting array copy stub
+  //
+  //  Input:
+  //    4(esp)   - source array address
+  //    8(esp)   - destination array address
+  //   12(esp)   - element count, can be zero
+  //   16(esp)   - size_t ckoff (super_check_offset)
+  //   20(esp)   - oop ckval (super_klass)
+  //
+  //  Output:
+  //    eax ==  0  -  success
+  //    eax == -1^K - failure, where K is partial transfer count
+  //
+  address generate_checkcast_copy(const char *name, address* entry) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Label L_load_element, L_store_element, L_do_card_marks, L_done;
+
+    // register use:
+    //  eax, edx, ecx -- loop control (end_from, end_to, count)
+    //  edi, esi      -- element access (oop, klass)
+    //  ebx           -- temp
+    const Register from       = eax;    // source array address
+    const Register to         = edx;    // destination array address
+    const Register length     = ecx;    // elements count
+    const Register elem       = edi;    // each oop copied
+    const Register elem_klass = esi;    // each elem._klass (sub_klass)
+    const Register temp       = ebx;    // lone remaining temp
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    __ pushl(esi);
+    __ pushl(edi);
+    __ pushl(ebx);
+
+    Address   from_arg(esp, 16+ 4);     // from
+    Address     to_arg(esp, 16+ 8);     // to
+    Address length_arg(esp, 16+12);     // elements count
+    Address  ckoff_arg(esp, 16+16);     // super_check_offset
+    Address  ckval_arg(esp, 16+20);     // super_klass
+
+    // Load up:
+    __ movl(from,     from_arg);
+    __ movl(to,         to_arg);
+    __ movl(length, length_arg);
+
+    *entry = __ pc(); // Entry point from generic arraycopy stub.
+    BLOCK_COMMENT("Entry:");
+
+    //---------------------------------------------------------------
+    // Assembler stub will be used for this call to arraycopy 
+    // if the two arrays are subtypes of Object[] but the
+    // destination array type is not equal to or a supertype
+    // of the source type.  Each element must be separately
+    // checked.
+
+    // Loop-invariant addresses.  They are exclusive end pointers.
+    Address end_from_addr(from, length, Address::times_4, 0);
+    Address   end_to_addr(to,   length, Address::times_4, 0);
+
+    Register end_from = from;           // re-use
+    Register end_to   = to;             // re-use
+    Register count    = length;         // re-use
+
+    // Loop-variant addresses.  They assume post-incremented count < 0.
+    Address from_element_addr(end_from, count, Address::times_4, 0);
+    Address   to_element_addr(end_to,   count, Address::times_4, 0);
+    Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
+
+    // Copy from low to high addresses, indexed from the end of each array.
+    __ leal(end_from, end_from_addr);
+    __ leal(end_to,   end_to_addr);
+    assert(length == count, "");        // else fix next line:
+    __ negl(count);                     // negate and test the length
+    __ jccb(Assembler::notZero, L_load_element);
+
+    // Empty array:  Nothing to do.
+    __ xorl(eax, eax);                  // return 0 on (trivial) success
+    __ jmp(L_done);
+
+    // ======== begin loop ========
+    // (Loop is rotated; its entry is L_load_element.)
+    // Loop control:
+    //   for (count = -count; count != 0; count++)
+    // Base pointers src, dst are biased by 8*count,to last element.
+    __ align(16);
+    
+    __ BIND(L_store_element);
+    __ movl(to_element_addr, elem);     // store the oop
+    __ increment(count);                // increment the count toward zero
+    __ jccb(Assembler::zero, L_do_card_marks);
+
+    // ======== loop entry is here ========
+    __ BIND(L_load_element);
+    __ movl(elem, from_element_addr);   // load the oop
+    __ testl(elem, elem);
+    __ jccb(Assembler::zero, L_store_element);
+
+    // (Could do a trick here:  Remember last successful non-null
+    // element stored and make a quick oop equality check on it.)
+
+    __ movl(elem_klass, elem_klass_addr); // query the object klass
+    generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
+                        &L_store_element, NULL);
+      // (On fall-through, we have failed the element type check.)
+    // ======== end loop ========
+
+    // It was a real error; we must depend on the caller to finish the job.
+    // Register edx = -1 * number of *remaining* oops, r14 = *total* oops.
+    // Emit GC store barriers for the oops we have copied (r14 + edx),
+    // and report their number to the caller.
+    __ addl(count, length_arg);         // transfers = (length - remaining)
+    __ movl(eax, count);                // save the value
+    __ notl(eax);                       // report (-1^K) to caller
+    __ movl(to, to_arg);                // reload
+    assert_different_registers(to, count, eax);
+    array_store_check(to, count);
+    __ jmpb(L_done);
+
+    // Come here on success only.
+    __ BIND(L_do_card_marks);
+    __ movl(count, length_arg);
+    array_store_check(to, count);
+    __ xorl(eax, eax);                  // return 0 on success
+
+    // Common exit point (success or failure).
+    __ BIND(L_done);
+    __ popl(ebx);
+    __ popl(edi);
+    __ popl(esi);
+    inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
+  //
+  //  Generate 'unsafe' array copy stub
+  //  Though just as safe as the other stubs, it takes an unscaled
+  //  size_t argument instead of an element count.
+  //
+  //  Input:
+  //    4(esp)   - source array address
+  //    8(esp)   - destination array address
+  //   12(esp)   - byte count, can be zero
+  //
+  //  Output:
+  //    eax ==  0  -  success
+  //    eax == -1  -  need to call System.arraycopy
+  //
+  // Examines the alignment of the operands and dispatches
+  // to a long, int, short, or byte copy loop.
+  //
+  address generate_unsafe_copy(const char *name, 
+                               address byte_copy_entry,
+                               address short_copy_entry, 
+                               address int_copy_entry, 
+                               address long_copy_entry) {
+
+    Label L_long_aligned, L_int_aligned, L_short_aligned;
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    const Register from       = eax;  // source array address
+    const Register to         = edx;  // destination array address
+    const Register count      = ecx;  // elements count
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    __ pushl(esi);
+    __ pushl(edi);
+    Address  from_arg(esp, 12+ 4);      // from
+    Address    to_arg(esp, 12+ 8);      // to
+    Address count_arg(esp, 12+12);      // byte count
+
+    // Load up:
+    __ movl(from ,  from_arg);
+    __ movl(to   ,    to_arg);
+    __ movl(count, count_arg);
+
+    // bump this on entry, not on exit:
+    inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
+
+    const Register bits = esi;
+    __ movl(bits, from);
+    __ orl(bits, to);
+    __ orl(bits, count);
+
+    __ testl(bits, BytesPerLong-1);
+    __ jccb(Assembler::zero, L_long_aligned);
+
+    __ testl(bits, BytesPerInt-1);
+    __ jccb(Assembler::zero, L_int_aligned);
+
+    __ testl(bits, BytesPerShort-1);
+    __ jcc(Assembler::notZero, byte_copy_entry, relocInfo::runtime_call_type);
+
+    __ BIND(L_short_aligned);
+    __ shrl(count, LogBytesPerShort); // size => short_count
+    __ movl(count_arg, count);          // update 'count'
+    __ jmp(short_copy_entry, relocInfo::runtime_call_type);
+
+    __ BIND(L_int_aligned);
+    __ shrl(count, LogBytesPerInt); // size => int_count
+    __ movl(count_arg, count);          // update 'count'
+    __ jmp(int_copy_entry, relocInfo::runtime_call_type);
+
+    __ BIND(L_long_aligned);
+    __ shrl(count, LogBytesPerLong); // size => qword_count
+    __ movl(count_arg, count);          // update 'count'
+    __ popl(edi); // Do pops here since jlong_arraycopy stub does not do it.
+    __ popl(esi);
+    __ jmp(long_copy_entry, relocInfo::runtime_call_type);
+
+    return start;
+  }
+
+
+  // Perform range checks on the proposed arraycopy.
+  // Smashes src_pos and dst_pos.  (Uses them up for temps.)
+  void arraycopy_range_checks(Register src,
+                              Register src_pos,
+                              Register dst,
+                              Register dst_pos,
+                              Address& length,
+                              Label& L_failed) {
+    BLOCK_COMMENT("arraycopy_range_checks:");
+    const Register src_end = src_pos;   // source array end position
+    const Register dst_end = dst_pos;   // destination array end position
+    __ addl(src_end, length); // src_pos + length
+    __ addl(dst_end, length); // dst_pos + length
+
+    //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
+    __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes()));
+    __ jcc(Assembler::above, L_failed);
+
+    //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
+    __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes()));
+    __ jcc(Assembler::above, L_failed);
+
+    BLOCK_COMMENT("arraycopy_range_checks done");
+  }
+
+
   //
   //  Generate generic array copy stubs
   //
@@ -1110,30 +1496,39 @@
   //
   //  Output:
   //    eax ==  0  -  success
-  //    eax == -1  -  need to call System.arraycopy
+  //    eax == -1^K - failure, where K is partial transfer count
   //
   address generate_generic_copy(const char *name, 
                                 address entry_jbyte_arraycopy,
                                 address entry_jshort_arraycopy, 
                                 address entry_jint_arraycopy, 
                                 address entry_oop_arraycopy,
-                                address entry_jlong_arraycopy) {
-    Label L_failed, L_objArray;
+                                address entry_jlong_arraycopy,
+                                address entry_checkcast_arraycopy) {
+    Label L_failed, L_failed_0, L_objArray;
 
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", name);
+
+    // Short-hop target to L_failed:
+    __ BIND(L_failed_0);
+    __ jmp(L_failed);
+
     address start = __ pc();
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
     __ pushl(esi);
     __ pushl(edi);
 
+    // bump this on entry, not on exit:
+    inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
+
     // Input values
-#define SRC     Address(esp, 12+ 4)
-#define SRC_POS Address(esp, 12+ 8)
-#define DST     Address(esp, 12+12)
-#define DST_POS Address(esp, 12+16)
-#define LENGTH  Address(esp, 12+20)
+    Address SRC     (esp, 12+ 4);
+    Address SRC_POS (esp, 12+ 8);
+    Address DST     (esp, 12+12);
+    Address DST_POS (esp, 12+16);
+    Address LENGTH  (esp, 12+20);
 
     //-----------------------------------------------------------------------
     // Assembler stub will be used for this call to arraycopy 
@@ -1149,79 +1544,99 @@
     // (8) dst_pos + length must not exceed length of dst.
     // 
 
-    const Register src = eax;  // source array oop
-    const Register dst = edx;  // destination array oop
+    const Register src     = eax;       // source array oop
+    const Register src_pos = esi;
+    const Register dst     = edx;       // destination array oop
+    const Register dst_pos = edi;
+    const Register length  = ecx;       // transfer count
 
     //  if (src == NULL) return -1;
     __ movl(src, SRC);      // src oop
     __ testl(src, src);
-    __ jcc(Assembler::zero, L_failed);
+    __ jccb(Assembler::zero, L_failed_0);
 
     //  if (src_pos < 0) return -1;
-    __ movl(esi, SRC_POS);  // src_pos
-    __ testl(esi, esi);
-    __ jcc(Assembler::negative, L_failed);
+    __ movl(src_pos, SRC_POS);  // src_pos
+    __ testl(src_pos, src_pos);
+    __ jccb(Assembler::negative, L_failed_0);
 
     //  if (dst == NULL) return -1;
     __ movl(dst, DST);      // dst oop
     __ testl(dst, dst);
-    __ jcc(Assembler::zero, L_failed);
+    __ jccb(Assembler::zero, L_failed_0);
 
     //  if (dst_pos < 0) return -1;
-    __ movl(edi, DST_POS);  // dst_pos
-    __ testl(edi, edi);
-    __ jcc(Assembler::negative, L_failed);
+    __ movl(dst_pos, DST_POS);  // dst_pos
+    __ testl(dst_pos, dst_pos);
+    __ jccb(Assembler::negative, L_failed_0);
 
     //  if (length < 0) return -1;
-    __ movl(ecx, LENGTH);   // length
-    __ testl(ecx, ecx);
-    __ jcc(Assembler::negative, L_failed);
-
-    const Register src_end = esi;  // source array end position
-    const Register dst_end = edi;  // destination array  end position
-    const Register ecx_klass  = ecx;    // array klass
-
-    __ addl(src_end, ecx); // src_pos + length
-    __ addl(dst_end, ecx); // dst_pos + length
+    __ movl(length, LENGTH);   // length
+    __ testl(length, length);
+    __ jccb(Assembler::negative, L_failed_0);
 
     //  if (src->klass() == NULL) return -1;
-    __ movl(ecx_klass, Address(src, oopDesc::klass_offset_in_bytes()));
-    __ testl(ecx_klass, ecx_klass);
-    __ jcc(Assembler::zero, L_failed);  // it is broken if klass is NULL
+    Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
+    Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
+    const Register ecx_src_klass = ecx;    // array klass
+    __ movl(ecx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
 
-    //  if (src->klass() != dst->klass()) return -1;
-    __ cmpl(ecx_klass, Address(dst, oopDesc::klass_offset_in_bytes()));
-    __ jcc(Assembler::notEqual, L_failed);
+#ifdef ASSERT
+    //  assert(src->klass() != NULL);
+    BLOCK_COMMENT("assert klasses not null");
+    { Label L1, L2;
+      __ testl(ecx_src_klass, ecx_src_klass);
+      __ jccb(Assembler::notZero, L2);   // it is broken if klass is NULL
+      __ bind(L1);
+      __ stop("broken null klass");
+      __ bind(L2);
+      __ cmpl(dst_klass_addr, 0);
+      __ jccb(Assembler::equal, L1);      // this would be broken also
+      BLOCK_COMMENT("assert done");
+    }
+#endif //ASSERT
 
     // Load layout helper (32-bits)
     //
     //  |array_tag|     | header_size | element_type |     |log2_element_size|
     // 32        30    24            16              8     2                 0
     //
-    //   array_tag: typeArray = 0x3, objArray = 0x2
+    //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
     //
 
     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
                     Klass::layout_helper_offset_in_bytes();
+    Address src_klass_lh_addr(ecx_src_klass, lh_offset);
+
+    // Handle objArrays completely differently...
+    jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
+    __ cmpl(src_klass_lh_addr, objArray_lh);
+    __ jcc(Assembler::equal, L_objArray);
+
+    //  if (src->klass() != dst->klass()) return -1;
+    __ cmpl(ecx_src_klass, dst_klass_addr);
+    __ jccb(Assembler::notEqual, L_failed_0);
 
     const Register ecx_lh = ecx;  // layout helper
-
-    __ movl(ecx_lh, Address(ecx_klass, lh_offset));
+    assert(ecx_lh == ecx_src_klass, "known alias");
+    __ movl(ecx_lh, src_klass_lh_addr);
 
     //  if (!src->is_Array()) return -1;
     __ cmpl(ecx_lh, Klass::_lh_neutral_value);
-    __ jcc(Assembler::greaterEqual, L_failed);
-
-    //  if (src_pos + length > arrayOop(src)->length() ) return ac_failed;
-    __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes()));
-    __ jcc(Assembler::above, L_failed);
+    __ jccb(Assembler::greaterEqual, L_failed_0); // signed cmp
 
-    //  if (dst_pos + length > arrayOop(dst)->length() ) return ac_failed;
-    __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes()));
-    __ jcc(Assembler::above, L_failed);
+    // At this point, it is known to be a typeArray (array_tag 0x3).
+#ifdef ASSERT
+    { Label L;
+      __ cmpl(ecx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
+      __ jcc(Assembler::greaterEqual, L); // signed cmp
+      __ stop("must be a primitive array");
+      __ bind(L);
+    }
+#endif
 
-    __ cmpl(ecx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
-    __ jcc(Assembler::equal, L_objArray);
+    assert_different_registers(src, src_pos, dst, dst_pos, ecx_lh);
+    arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
 
     // typeArrayKlass
     //
@@ -1241,14 +1656,15 @@
     __ andl(ecx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize
 
     // next registers should be set before the jump to corresponding stub
-    const Register from       = eax; // source array address
-    const Register to         = edx; // destination array address
+    const Register from       = src; // source array address
+    const Register to         = dst; // destination array address
     const Register count      = ecx; // elements count
     // some of them should be duplicated on stack
 #define FROM   Address(esp, 12+ 4)
 #define TO     Address(esp, 12+ 8)   // Not used now
 #define COUNT  Address(esp, 12+12)   // Only for oop arraycopy
 
+    BLOCK_COMMENT("scale indexes to element size");
     __ movl(esi, SRC_POS);  // src_pos
     __ shll(esi); // src_pos << ecx (log2 elsize)
     assert(src_array == from, "");
@@ -1261,6 +1677,7 @@
     __ movl(edi_elsize, ecx_lh); // log2 elsize
     __ movl(count, LENGTH); // elements count
 
+    BLOCK_COMMENT("choose copy loop based on element size");
     __ cmpl(edi_elsize, 0);
     __ jcc(Assembler::equal, entry_jbyte_arraycopy);
     __ cmpl(edi_elsize, LogBytesPerShort);
@@ -1275,27 +1692,122 @@
     __ popl(esi);
     __ jmp(entry_jlong_arraycopy, relocInfo::runtime_call_type);
 
-    // objArrayKlass
-  __ bind(L_objArray); 
-    __ movl(count, LENGTH); // elements count
-    __ movl(esi, SRC_POS);  // src_pos
-    __ leal(from, Address(src_array, esi, Address::times_4, 
-                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
-    __ movl(edi, DST_POS);  // dst_pos
-    __ leal(to,   Address(dst_array, edi, Address::times_4, 
-                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
-    __ movl(FROM,  from);   // src_addr
-    __ movl(COUNT, count);  // count
-    __ jmp(entry_oop_arraycopy, relocInfo::runtime_call_type);
-
-  __ bind(L_failed);
+  __ BIND(L_failed);
     __ xorl(eax, eax);
-    __ decl(eax); // return -1
+    __ notl(eax); // return -1
     __ popl(edi);
     __ popl(esi);
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
 
+    // objArrayKlass
+  __ BIND(L_objArray);
+    // live at this point:  ecx_src_klass, src[_pos], dst[_pos]
+
+    Label L_plain_copy, L_checkcast_copy;
+    //  test array classes for subtyping
+    __ cmpl(ecx_src_klass, dst_klass_addr); // usual case is exact equality
+    __ jccb(Assembler::notEqual, L_checkcast_copy);
+
+    // Identically typed arrays can be copied without element-wise checks.
+    assert_different_registers(src, src_pos, dst, dst_pos, ecx_src_klass);
+    arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
+
+  __ BIND(L_plain_copy);
+    __ movl(count, LENGTH); // elements count
+    __ movl(src_pos, SRC_POS);  // reload src_pos
+    __ leal(from, Address(src, src_pos, Address::times_4, 
+                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
+    __ movl(dst_pos, DST_POS);  // reload dst_pos
+    __ leal(to,   Address(dst, dst_pos, Address::times_4, 
+                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
+    __ movl(FROM,  from);   // src_addr
+    __ movl(TO,    to);     // dst_addr
+    __ movl(COUNT, count);  // count
+    __ jmp(entry_oop_arraycopy, relocInfo::runtime_call_type);
+
+  __ BIND(L_checkcast_copy);
+    // live at this point:  ecx_src_klass, dst[_pos], src[_pos]
+    {
+      // Handy offsets:
+      int  ek_offset = (klassOopDesc::header_size() * HeapWordSize +
+                        objArrayKlass::element_klass_offset_in_bytes());
+      int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
+                        Klass::super_check_offset_offset_in_bytes());
+
+      Register esi_dst_klass = esi;
+      Register edi_temp      = edi;
+      assert(esi_dst_klass == src_pos, "expected alias w/ src_pos");
+      assert(edi_temp      == dst_pos, "expected alias w/ dst_pos");
+      Address dst_klass_lh_addr(esi_dst_klass, lh_offset);
+
+      // Before looking at dst.length, make sure dst is also an objArray.
+      __ movl(esi_dst_klass, dst_klass_addr);
+      __ cmpl(dst_klass_lh_addr, objArray_lh);
+      __ jccb(Assembler::notEqual, L_failed);
+
+      // It is safe to examine both src.length and dst.length.
+      __ movl(src_pos, SRC_POS);        // reload esi
+      arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
+      // (Now src_pos and dst_pos are killed, but not src and dst.)
+
+      // We'll need this temp (don't forget to pop it after the type check).
+      __ pushl(ebx);
+      Register ebx_src_klass = ebx;
+
+      __ movl(ebx_src_klass, ecx_src_klass); // spill away from ecx
+      __ movl(esi_dst_klass, dst_klass_addr);
+      Address super_check_offset_addr(esi_dst_klass, sco_offset);
+      Label L_fail_array_check;
+      generate_type_check(ebx_src_klass,
+                          super_check_offset_addr, dst_klass_addr,
+                          edi_temp, NULL, &L_fail_array_check);
+      // (On fall-through, we have passed the array type check.)
+      __ popl(ebx);
+      __ jmp(L_plain_copy);
+
+      __ BIND(L_fail_array_check);
+      // Reshuffle arguments so we can call checkcast_arraycopy:
+
+      // match initial saves for checkcast_arraycopy
+      // pushl(esi);    // already done; see above
+      // pushl(edi);    // already done; see above
+      // pushl(ebx);    // already done; see above
+
+      // Marshal outgoing arguments now, freeing registers.
+      Address   from_arg(esp, 16+ 4);   // from
+      Address     to_arg(esp, 16+ 8);   // to
+      Address length_arg(esp, 16+12);   // elements count
+      Address  ckoff_arg(esp, 16+16);   // super_check_offset
+      Address  ckval_arg(esp, 16+20);   // super_klass
+
+      Address SRC_POS_arg(esp, 16+ 8);
+      Address DST_POS_arg(esp, 16+16);
+      Address  LENGTH_arg(esp, 16+20);
+      // push ebx changed the incoming offsets (why not just use ebp??)
+      assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, "");
+
+      __ movl(ebx, Address(esi_dst_klass, ek_offset));
+      __ movl(length, LENGTH_arg);    // reload elements count
+      __ movl(src_pos, SRC_POS_arg);  // reload src_pos
+      __ movl(dst_pos, DST_POS_arg);  // reload dst_pos
+
+      __ movl(ckval_arg, ebx);          // destination element type
+      __ movl(ebx, Address(ebx, sco_offset));
+      __ movl(ckoff_arg, ebx);          // corresponding class check offset
+
+      __ movl(length_arg, length);      // outgoing length argument
+
+      __ leal(from, Address(src, src_pos, Address::times_4, 
+                            arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+      __ movl(from_arg, from);
+
+      __ leal(to, Address(dst, dst_pos, Address::times_4, 
+                          arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+      __ movl(to_arg, to);
+      __ jmp(entry_checkcast_arraycopy, relocInfo::runtime_call_type);
+    }
+
     return start;
   }
 
@@ -1306,6 +1818,7 @@
     address entry_jint_arraycopy;
     address entry_oop_arraycopy;
     address entry_jlong_arraycopy;
+    address entry_checkcast_arraycopy;
 
     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = 
         generate_disjoint_copy(T_BYTE,  true, Address::times_1, &entry, 
@@ -1365,13 +1878,25 @@
     StubRoutines::_arrayof_oop_arraycopy   = StubRoutines::_oop_arraycopy;
     StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
 
+    StubRoutines::_checkcast_arraycopy =
+        generate_checkcast_copy("checkcast_arraycopy",
+                                  &entry_checkcast_arraycopy);
+
+    StubRoutines::_unsafe_arraycopy =
+        generate_unsafe_copy("unsafe_arraycopy", 
+                               entry_jbyte_arraycopy,
+                               entry_jshort_arraycopy,
+                               entry_jint_arraycopy,
+                               entry_jlong_arraycopy);
+
     StubRoutines::_generic_arraycopy =
         generate_generic_copy("generic_arraycopy", 
                                entry_jbyte_arraycopy,
                                entry_jshort_arraycopy,
                                entry_jint_arraycopy,
                                entry_oop_arraycopy,
-                               entry_jlong_arraycopy);
+                               entry_jlong_arraycopy,
+                               entry_checkcast_arraycopy);
   }
 
  public:
@@ -1453,6 +1978,7 @@
     __ set_last_Java_frame(java_thread, esp, ebp, NULL);
 
     // Call runtime
+    BLOCK_COMMENT("call runtime_entry");
     __ call(runtime_entry, relocInfo::runtime_call_type);
     // Generate oop map
     OopMap* map =  new OopMap(framesize, 0);        
@@ -1534,8 +2060,10 @@
 
     StubRoutines::i486::_verify_mxcsr_entry                 = generate_verify_mxcsr();
     StubRoutines::i486::_verify_fpu_cntrl_wrd_entry         = generate_verify_fpu_cntrl_wrd();
-    StubRoutines::_d2i_wrapper                              = generate_d2i_wrapper( CAST_FROM_FN_PTR(address, SharedRuntime::d2i) );
-    StubRoutines::_d2l_wrapper                              = generate_d2i_wrapper( CAST_FROM_FN_PTR(address, SharedRuntime::d2l) );
+    StubRoutines::_d2i_wrapper                              = generate_d2i_wrapper(T_INT,
+                                                                                   CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
+    StubRoutines::_d2l_wrapper                              = generate_d2i_wrapper(T_LONG,
+                                                                                   CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
   }
 
 
--- a/hotspot/src/cpu/i486/vm/templateTable_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/templateTable_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)templateTable_i486.cpp	1.318 07/05/05 17:04:21 JVM"
+#pragma ident "@(#)templateTable_i486.cpp	1.319 07/05/17 15:47:10 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1825,7 +1825,7 @@
   __ cmpl(eax, Address(ebx, ecx, Address::times_8, 2 * wordSize));
   __ jccb(Assembler::equal, found);
   __ bind(loop_entry);
-  __ decl(ecx);
+  __ decrement(ecx);
   __ jcc(Assembler::greaterEqual, loop);
   // default case
   __ profile_switch_default(eax);
@@ -2733,7 +2733,7 @@
   __ get_cache_and_index_at_bcp(ecx, edx, 2);
   __ movl(ebx, Address(ecx, edx, Address::times_4, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
   // make sure exception is reported in correct bcp range (getfield is next instruction)
-  __ incl(esi);
+  __ increment(esi);
   __ null_check(eax);
   const Address lo = Address(eax, ebx, Address::times_1, 0*wordSize);
   if (state == itos) {
@@ -2746,7 +2746,7 @@
   } else {
     ShouldNotReachHere();
   }
-  __ decl(esi);
+  __ decrement(esi);
 }
 
 
@@ -3111,6 +3111,7 @@
 #ifdef ASSERT
     // make sure edx was multiple of 8
     Label L;
+    // Ignore partial flag stall after shrl() since it is debug VM
     __ jccb(Assembler::carryClear, L);
     __ stop("object size is not multiple of 2 - adjust this code");
     __ bind(L);
@@ -3122,7 +3123,7 @@
     __ bind(loop);
     __ movl(Address(eax, edx, Address::times_8, sizeof(oopDesc) - 1*oopSize), ecx);
     __ movl(Address(eax, edx, Address::times_8, sizeof(oopDesc) - 2*oopSize), ecx);
-    __ decl(edx);
+    __ decrement(edx);
     __ jcc(Assembler::notZero, loop);
     }
 
@@ -3411,7 +3412,7 @@
 
   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 
   // The object has already been poped from the stack, so the expression stack looks correct.
-  __ incl(esi);
+  __ increment(esi);
 
   __ movl(Address(edx, BasicObjectLock::obj_offset_in_bytes()), eax);     // store object  
   __ lock_object(edx);  
--- a/hotspot/src/cpu/i486/vm/vm_version_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/vm_version_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)vm_version_i486.cpp	1.63 07/05/05 17:04:20 JVM"
+#pragma ident "@(#)vm_version_i486.cpp	1.64 07/05/17 15:47:14 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -297,8 +297,29 @@
   if( !supports_sse () ) // Drop to 0 if no SSE  support
     UseSSE = 0;
 
-  if( is_intel() && FLAG_IS_DEFAULT(UseStoreImmI16) )
-    UseStoreImmI16 = false; // don't use it on Intel cpus
+  if( is_intel() ) { // Intel cpus specific settings
+    if( FLAG_IS_DEFAULT(UseStoreImmI16) ) {
+      UseStoreImmI16 = false; // don't use it on Intel cpus
+    }
+    if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus
+      if( FLAG_IS_DEFAULT(UseAddressNop) ) {
+        UseAddressNop = true; // use it on new Intel cpus
+      }
+#ifdef COMPILER2
+      if( FLAG_IS_DEFAULT(MaxLoopPad) ) {
+        // For new Intel cpus do the next optimization:
+        // don't align the beginning of a loop if there are enough instructions
+        // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
+        // in current fetch line (OptoLoopAlignment) or the padding 
+        // is big (> MaxLoopPad).
+        // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
+        // generated NOP instructions. 11 is the largest size of one
+        // address NOP instruction '0F 1F' (see Assembler::nop(i)).
+        MaxLoopPad = 11;
+      }
+#endif // COMPILER2
+    }
+  }
 
   assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value");
   assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");
--- a/hotspot/src/cpu/i486/vm/vtableStubs_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/i486/vm/vtableStubs_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)vtableStubs_i486.cpp	1.50 07/05/05 17:04:21 JVM"
+#pragma ident "@(#)vtableStubs_i486.cpp	1.51 07/05/17 15:47:16 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -50,7 +50,7 @@
   MacroAssembler* masm = new MacroAssembler(&cb);
 
 #ifndef PRODUCT
-  if (CountCompiledCalls) __ incl(Address((int)SharedRuntime::nof_megamorphic_calls_addr(), relocInfo::none));
+  if (CountCompiledCalls) __ increment(Address((int)SharedRuntime::nof_megamorphic_calls_addr(), relocInfo::none));
 #endif /* PRODUCT */
 
   // get receiver (need to skip return address on top of stack)
@@ -113,7 +113,7 @@
   //  ecx: Receiver
   
 #ifndef PRODUCT
-  if (CountCompiledCalls) __ incl(Address((int)SharedRuntime::nof_megamorphic_calls_addr(), relocInfo::none));
+  if (CountCompiledCalls) __ increment(Address((int)SharedRuntime::nof_megamorphic_calls_addr(), relocInfo::none));
 #endif /* PRODUCT */
   // get receiver (need to skip return address on top of stack)
  
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)assembler_sparc.cpp	1.205 07/05/05 17:04:23 JVM"
+#pragma ident "@(#)assembler_sparc.cpp	1.206 07/05/17 15:47:20 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -2557,14 +2557,15 @@
 }
 
 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
-                                          Label& done, Label* slow_case) {
+                                          Label& done, Label* slow_case,
+                                          BiasedLockingCounters* counters) {
   assert(UseBiasedLocking, "why call this otherwise?");
 
-#ifdef ASSERT
   if (PrintBiasedLockingStatistics) {
     assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
+    if (counters == NULL)
+      counters = BiasedLocking::counters();
   }
-#endif
 
   Label cas_label;
 
@@ -2583,8 +2584,8 @@
   or3(G2_thread, temp_reg, temp_reg);
   xor3(mark_reg, temp_reg, temp_reg);
   andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
-  if (PrintBiasedLockingStatistics) {
-    cond_inc(Assembler::equal, (address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, O7);
+  if (counters != NULL) {
+    cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
     // Reload mark_reg as we may need it later
     ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg);
   }
@@ -2637,8 +2638,8 @@
   // need to revoke that bias. The revocation will occur in the
   // interpreter runtime in the slow case.
   cmp(mark_reg, temp_reg);
-  if (PrintBiasedLockingStatistics) {
-    cond_inc(Assembler::zero, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp_reg, O7);
+  if (counters != NULL) {
+    cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
   }
   if (slow_case != NULL) {
     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
@@ -2667,8 +2668,8 @@
   // need to revoke that bias. The revocation will occur in the
   // interpreter runtime in the slow case.
   cmp(mark_reg, temp_reg);
-  if (PrintBiasedLockingStatistics) {
-    cond_inc(Assembler::zero, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, O7);
+  if (counters != NULL) {
+    cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
   }
   if (slow_case != NULL) {
     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
@@ -2696,9 +2697,9 @@
   // Fall through to the normal CAS-based lock, because no matter what
   // the result of the above CAS, some thread must have succeeded in
   // removing the bias bit from the object's header.
-  if (PrintBiasedLockingStatistics) {
+  if (counters != NULL) {
     cmp(mark_reg, temp_reg);
-    cond_inc(Assembler::zero, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, O7);
+    cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
   }
 
   bind(cas_label);
@@ -2754,12 +2755,17 @@
 // effect). 
   
 
-void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch) {
+void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch,
+                                          BiasedLockingCounters* counters) {
    Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
 
    verify_oop(Roop);
    Label done ; 
 
+   if (counters != NULL) {
+     inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
+   }
+
    if (EmitSync & 1) { 
      mov    (3, Rscratch) ;           
      st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
@@ -2773,7 +2779,7 @@
      ld_ptr(mark_addr, Rmark);
 
      if (UseBiasedLocking) {
-        biased_locking_enter(Roop, Rmark, Rscratch, done, NULL);
+        biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
      }
   
      // Save Rbox in Rscratch to be used for the cas operation
@@ -2816,7 +2822,7 @@
       ld_ptr (mark_addr, Rmark);           // fetch obj->mark
       // Triage: biased, stack-locked, neutral, inflated
       if (UseBiasedLocking) {
-        biased_locking_enter(Roop, Rmark, Rscratch, done, NULL);
+        biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
         // Invariant: if control reaches this point in the emitted stream
         // then Rmark has not been modified.  
       }
@@ -2880,7 +2886,7 @@
       // Triage: biased, stack-locked, neutral, inflated
 
       if (UseBiasedLocking) {
-        biased_locking_enter(Roop, Rmark, Rscratch, done, NULL);
+        biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
         // Invariant: if control reaches this point in the emitted stream
         // then Rmark has not been modified.  
       }
@@ -2904,6 +2910,9 @@
       brx    (Assembler::notZero, false, Assembler::pn, Recursive) ; 
       delayed() -> 
         st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
+      if (counters != NULL) {
+        cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
+      }
       br     (Assembler::always, false, Assembler::pt, done);
       delayed() -> 
         st_ptr (Rbox, mark_addr) ; 
@@ -2930,8 +2939,16 @@
       sub(Rscratch, SP, Rscratch); 
       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
       andcc  (Rscratch, 0xfffff003, Rscratch);
-      br     (Assembler::always, false, Assembler::pt, done) ; 
-      delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+      if (counters != NULL) {
+        // Accounting needs the Rscratch register
+        st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+        cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
+        br     (Assembler::always, false, Assembler::pt, done) ; 
+        delayed()->nop() ; 
+      } else {
+        br     (Assembler::always, false, Assembler::pt, done) ; 
+        delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+      }
 
       bind   (IsInflated) ; 
       if (EmitSync & 64) { 
@@ -3448,11 +3465,15 @@
   Label L;
   brx(negated_cond, false, Assembler::pt, L);
   delayed()->nop();
+  inc_counter(counter_ptr, Rtmp1, Rtmp2);
+  bind(L);
+}
+
+void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) {
   Address counter_addr(Rtmp1, counter_ptr);
   load_contents(counter_addr, Rtmp2);
   inc(Rtmp2);
   store_contents(Rtmp2, counter_addr);
-  bind(L);
 }
 
 SkipIfEqual::SkipIfEqual(
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)assembler_sparc.hpp	1.178 07/05/05 17:04:24 JVM"
+#pragma ident "@(#)assembler_sparc.hpp	1.179 07/05/17 15:47:51 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -25,6 +25,8 @@
  *  
  */
 
+class BiasedLockingCounters;
+
 // <sys/trap.h> promises that the system will not use traps 16-31
 #define ST_RESERVED_FOR_USER_0 0x10
 
@@ -2111,7 +2113,8 @@
 
   // These set the icc condition code to equal if the lock succeeded
   // and notEqual if it failed and requires a slow case
-  void compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch);
+  void compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch,
+                              BiasedLockingCounters* counters = NULL);
   void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch);
 
   // Biased locking support
@@ -2125,7 +2128,8 @@
   // In the fall-through case where the CAS-based lock is done,
   // mark_reg is not destroyed.
   void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
-                            Label& done, Label* slow_case = NULL);
+                            Label& done, Label* slow_case = NULL,
+                            BiasedLockingCounters* counters = NULL);
   // Upon entry, the base register of mark_addr must contain the oop.
   // Destroys temp_reg.
 
@@ -2173,6 +2177,8 @@
   // Helper functions for statistics gathering.
   // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
   void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
+  // Unconditional increment.
+  void inc_counter(address counter_addr, Register Rtemp1, Register Rtemp2);
 };
 
 /**
--- a/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_CodeStubs_sparc.cpp	1.78 07/05/05 17:04:23 JVM"
+#pragma ident "@(#)c1_CodeStubs_sparc.cpp	1.79 07/05/17 15:47:54 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -225,21 +225,11 @@
 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   __ mov(_obj_reg->as_register(), G4);
-#ifdef JVMPI_SUPPORT
-  if (ce->compilation()->jvmpi_event_method_entry_enabled() &&
-      _info->bci() == SynchronizationEntryBCI) {
-    // This stub will perform jvmpi notification if its caller gets deoptimized after the monitorenter.
-    __ call(Runtime1::entry_for(Runtime1::monitorenter_with_jvmpi_id), relocInfo::runtime_call_type);
+  if (ce->compilation()->has_fpu_code()) {
+    __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
   } else {
-#endif // JVMPI_SUPPORT
-    if (ce->compilation()->has_fpu_code()) {
-      __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
-    } else {
-      __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
-    }
-#ifdef JVMPI_SUPPORT
+    __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
   }
-#endif // JVMPI_SUPPORT
   __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_LIRAssembler_sparc.cpp	1.203 07/05/05 17:04:26 JVM"
+#pragma ident "@(#)c1_LIRAssembler_sparc.cpp	1.204 07/05/17 15:47:57 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -386,16 +386,7 @@
     __ delayed()->nop();
   }
 
-#ifdef JVMPI_SUPPORT
-  if (compilation()->jvmpi_event_method_exit_enabled()) {
-    jobject2reg(method->encoding(), O2);
-    __ call(Runtime1::entry_for(Runtime1::jvmpi_unwind_exception_id), relocInfo::runtime_call_type);
-  } else {
-#endif // JVMPI_SUPPORT
-    __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
-#ifdef JVMPI_SUPPORT
-  }
-#endif // JVMPI_SUPPORT
+  __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
   __ delayed()->nop();
   debug_only(__ stop("should have gone to the caller");)
   assert(code_offset() - offset <= exception_handler_size, "overflow");
@@ -1583,31 +1574,21 @@
 
 
 void LIR_Assembler::return_op(LIR_Opr result) {
-#ifdef JVMPI_SUPPORT
-  if (compilation()->jvmpi_event_method_exit_enabled()) {
-    jobject2reg(method()->encoding(), O0);
-    __ call(Runtime1::entry_for(Runtime1::jvmpi_method_exit_id), relocInfo::runtime_call_type);
-    __ delayed()->nop();
-  } else {
-#endif // JVMPI_SUPPORT
-    // the poll may need a register so just pick one that isn't the return register
+  // the poll may need a register so just pick one that isn't the return register
 #ifdef TIERED
-    if (result->type_field() == LIR_OprDesc::long_type) {
-      // Must move the result to G1
-      // Must leave proper result in O0,O1 and G1 (TIERED only)
-      __ sllx(I0, 32, G1);          // Shift bits into high G1
-      __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
-      __ or3 (I1, G1, G1);          // OR 64 bits into G1
-    }
+  if (result->type_field() == LIR_OprDesc::long_type) {
+    // Must move the result to G1
+    // Must leave proper result in O0,O1 and G1 (TIERED only)
+    __ sllx(I0, 32, G1);          // Shift bits into high G1
+    __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
+    __ or3 (I1, G1, G1);          // OR 64 bits into G1
+  }
 #endif // TIERED
-    __ set((intptr_t)os::get_polling_page(), L0);
-    __ relocate(relocInfo::poll_return_type);
-    __ ld_ptr(L0, 0, G0);
-    __ ret();
-    __ delayed()->restore();
-#ifdef JVMPI_SUPPORT
-  }
-#endif // JVMPI_SUPPORT
+  __ set((intptr_t)os::get_polling_page(), L0);
+  __ relocate(relocInfo::poll_return_type);
+  __ ld_ptr(L0, 0, G0);
+  __ ret();
+  __ delayed()->restore();
 }
 
 
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_Runtime1_sparc.cpp	1.148 07/05/05 17:04:25 JVM"
+#pragma ident "@(#)c1_Runtime1_sparc.cpp	1.149 07/05/17 15:48:01 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -462,7 +462,7 @@
           __ restore();
         }
         
-        generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
+        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
         // I0->O0: new instance
       }
 
@@ -471,7 +471,7 @@
 #ifdef TIERED
     case counter_overflow_id:
         // G4 contains bci
-        generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4);
+      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4);
       break;
 #endif // TIERED
 
@@ -580,9 +580,9 @@
         }
 
         if (id == new_type_array_id) {
-          generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
+          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
         } else {
-          generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
+          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
         }
         // I0 -> O0: new array
       }
@@ -593,7 +593,7 @@
         // O1: rank
         // O2: address of 1st dimension
         __ set_info("new_multi_array", dont_gc_arguments);
-        generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
+        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
         // I0 -> O0: new multi array
       }
       break;
@@ -617,9 +617,15 @@
         __ delayed()->nop();
 
         __ bind(register_finalizer);
-        save_live_registers(sasm);
-        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
+        OopMap* oop_map = save_live_registers(sasm);
+        int call_offset = __ call_RT(noreg, noreg,
+                                     CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, oop_map);
+
+        // Now restore all the live registers
         restore_live_registers(sasm);
+
         __ ret();
         __ delayed()->restore();
       }
@@ -665,31 +671,14 @@
       break;
 
     case unwind_exception_id:
-#ifdef JVMPI_SUPPORT
-    case jvmpi_unwind_exception_id:
-#endif // JVMPI_SUPPORT
       {
         // O0: exception
-#ifdef JVMPI_SUPPORT
-        // O1: methodOop if jvmpi_unwind_method_exception_id
-#endif // JVMPI_SUPPORT
         // I7: address of call to this method
 
-#ifdef JVMPI_SUPPORT
-        __ set_info("jvmpi_unwind_exception", dont_gc_arguments);
-#else // !JVMPI_SUPPORT
         __ set_info("unwind_exception", dont_gc_arguments);
-#endif // JVMPI_SUPPORT
         __ mov(Oexception, Oexception->after_save());
         __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
 
-#ifdef JVMPI_SUPPORT
-        if (id == jvmpi_unwind_exception_id) {
-          __ st_ptr(Oexception->after_save(), G2_thread, in_bytes(JavaThread::vm_result_offset()));
-          __ call_RT(Oexception->after_save(), noreg, CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), O1);
-        }
-#endif // JVMPI_SUPPORT
-
         __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
                         Oissuing_pc->after_save()); 
         __ verify_not_null_oop(Oexception->after_save());
@@ -784,40 +773,6 @@
       }
       break;
 
-#ifdef JVMPI_SUPPORT
-    case monitorenter_with_jvmpi_id:
-      { // This is used for slow-case synchronization at method entry when JVMPI method entry events are enabled.
-        // If the compiled activation has been deoptimized after the monitorenter, then jvmpi notification is done
-        // here since execution will resume in the interpreter at the first bytecode.
-        // G4: object
-        // G5: lock address
-        __ set_info("monitorenter", dont_gc_arguments);
-        OopMap* oop_map = save_live_registers(sasm);
-	// Retrieve the current instruction we plan to return to so we can tell if deopt
-	// has happened while we were in the vm
-	__ ld(I7, frame::pc_return_offset, L1);
-
-        // Preserve the method's receiver in case its needed for jvmpi method entry
-        Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
-        __ st_ptr(G5, vm_result_addr);
-        __ call_RT(G5, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5);
-
-        // if the exception handling frame was deoptimized, the instruction we would return to
-        // has been changed; check for that situation
-	Label no_deopt;
-	__ ld(I7, frame::pc_return_offset, L2);
-	__ cmp(L1, L2);
-        __ brx(Assembler::equal, false, Assembler::pt, no_deopt);
-        __ delayed()->nop();
-        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, jvmpi_method_entry_after_deopt), G5);
-
-        __ bind(no_deopt);
-        __ ret();
-        __ delayed()->restore();
-      }
-      break;
-#endif // JVMPI_SUPPORT
-
     case monitorexit_nofpu_id:
     case monitorexit_id:
       { // G4: lock address
@@ -857,44 +812,10 @@
     case jvmti_exception_throw_id:
       { // Oexception : exception
         __ set_info("jvmti_exception_throw", dont_gc_arguments);
-        generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
-      }
-      break;
-
-#ifdef JVMPI_SUPPORT
-    case jvmpi_method_entry_id:
-      { // O0: methodOop
-        // O1: receiver or NULL
-        __ set_info("jvmpi_method_entry", dont_gc_arguments);
-        generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_entry), I0, I1);
+        oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
       }
       break;
 
-    case jvmpi_method_exit_id:
-      { // O0: methodOop
-        // 
-        // This stub acts like a trampoline from the return to the
-        // caller.  Because of deopt the caller could go away while we
-        // are in the method exit notification so we have to directly
-        // return from here instead of returning to the nmethod.  All
-        // registers are dead at this point except result registers so
-        // save them.
-        __ set_info("jvmpi_method_exit", dont_gc_arguments);
-        int framesize = __ total_frame_size_in_bytes(sizeof(double) / BytesPerWord);
-
-        // Preserve any floating-point result.
-        Address saved_result_addr(FP, 0, -sizeof(double) + STACK_BIAS);
-        __ stf(FloatRegisterImpl::D, F0, saved_result_addr);
-        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), O0);
-        // Restore potential floating-point result and the return address.
-        __ ldf(FloatRegisterImpl::D, saved_result_addr, F0);
-
-        __ ret();
-        __ delayed()->restore();
-      }
-      break;
-#endif // JVMPI_SUPPORT
-
     case dtrace_object_alloc_id:
       { // O0: object
         __ set_info("dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/cpu/sparc/vm/disassembler_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/disassembler_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)disassembler_sparc.cpp	1.50 07/05/05 17:04:28 JVM"
+#pragma ident "@(#)disassembler_sparc.cpp	1.51 07/05/17 15:48:03 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -164,7 +164,12 @@
   }
   sparc_env env(NULL, st);
   unsigned char*  p = (unsigned char*) begin;
+  CodeBlob* cb = CodeCache::find_blob_unsafe(begin);
   while (p < (unsigned char*) end && p) {
+    if (cb != NULL) {
+      cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin()));
+    }
+
     unsigned char* p0 = p;
     st->print(INTPTR_FORMAT ": ", p);
     p = decode_instruction(p, &env);
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)interp_masm_sparc.cpp	1.197 07/05/05 17:04:29 JVM"
+#pragma ident "@(#)interp_masm_sparc.cpp	1.198 07/05/17 15:48:06 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1209,11 +1209,7 @@
 
   unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
 
-#ifdef JVMPI_SUPPORT
-  // save result (push state before jvmti/jvmpi call and pop it afterwards) and notify jvmti/jvmpi
-#else // !JVMPI_SUPPORT
   // save result (push state before jvmti call and pop it afterwards) and notify jvmti
-#endif // JVMPI_SUPPORT
   notify_method_exit(false, state, NotifyJVMTI); 
 
   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
@@ -2434,12 +2430,6 @@
 // if (DTraceMethodProbes) {
 //   SharedRuntime::dtrace_method_entry(method, reciever);
 // }
-#ifdef JVMPI_SUPPORT
-// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) ||
-//     *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2)   ) {
-//   SharedRuntime::jvmpi_method_entry(method, receiver);
-// }
-#endif // JVMPI_SUPPORT
 
 void InterpreterMacroAssembler::notify_method_entry() {
   // Whenever JVMTI puts a thread in interp_only_mode, method
@@ -2459,44 +2449,6 @@
     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
     bind(L);
   }
-#ifdef JVMPI_SUPPORT
-  Label E;
-  Register temp_reg = O5;
-  Label S;
-  Address event0(
-      temp_reg, 
-      (address)jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ), 
-      relocInfo::none);
-  load_contents(event0, temp_reg);
-  cmp(temp_reg, (int)JVMPI_EVENT_ENABLED);
-  br(equal, false, pn, S);
-  delayed()->nop();
-  Address event1(
-      temp_reg, 
-      (address)jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2), 
-      relocInfo::none);
-  load_contents(event1, temp_reg);
-  cmp(temp_reg, (int)JVMPI_EVENT_ENABLED);
-  br(notEqual, false, pt, E);
-  delayed()->nop();
-  bind(S);
-
-  // notify method entry
-  Label L;
-  const Address access_flags(Lmethod, 0, 
-      in_bytes(methodOopDesc::access_flags_offset()));
-  ld(access_flags, temp_reg);
-  and3(temp_reg, JVM_ACC_STATIC, temp_reg);
-  cmp(temp_reg, JVM_ACC_STATIC); // check if method is static
-  br(equal, true, pn, L);        // if static we're done
-  delayed()->mov(G0, temp_reg);  // receiver = NULL for a static method
-  ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), temp_reg);  // otherwise get receiver
-  bind(L);
-  call_VM(noreg, 
-      CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_entry), 
-      Lmethod, temp_reg);
-  bind(E);
-#endif // JVMPI_SUPPORT
 
   {
     Register temp_reg = O5;
@@ -2515,13 +2467,6 @@
 //   InterpreterRuntime::post_method_exit();
 //   // restore result
 // }
-#ifdef JVMPI_SUPPORT
-// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) {
-//   // save result
-//   SharedRuntime::jvmpi_method_exit();
-//   // restore result
-// }
-#endif // JVMPI_SUPPORT
 // if (DTraceMethodProbes) {
 //   SharedRuntime::dtrace_method_exit(thread, method);
 // }
@@ -2559,25 +2504,6 @@
     bind(L);
   }
 
-#ifdef JVMPI_SUPPORT
-  Label E;
-  Register temp_reg = O5;
-  Address event0(temp_reg, 
-      (address)jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT), 
-      relocInfo::none);
-  load_contents(event0, temp_reg);
-  cmp(temp_reg, (int)JVMPI_EVENT_ENABLED);
-  br(notEqual, false, pn, E);
-  delayed()->nop();
-
-  save_return_value(state, is_native_method);
-  call_VM(noreg, 
-          CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), 
-          Lmethod);
-  restore_return_value(state, is_native_method);
-  bind(E);
-#endif // JVMPI_SUPPORT
-
   {
     Register temp_reg = O5;
     // Dtrace notification
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)interp_masm_sparc.hpp	1.103 07/05/05 17:04:29 JVM"
+#pragma ident "@(#)interp_masm_sparc.hpp	1.104 07/05/17 15:48:09 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -303,11 +303,7 @@
   void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
   void verify_FPU(int stack_depth, TosState state = ftos); // only if +VerifyFPU  && (state == ftos || state == dtos)
 
-#ifdef JVMPI_SUPPORT
-  // support for JVMTI/JVMPI/Dtrace
-#else // !JVMPI_SUPPORT
   // support for JVMTI/Dtrace
-#endif // JVMPI_SUPPORT
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
   void notify_method_entry();
   void notify_method_exit(
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)interpreter_sparc.cpp	1.260 07/05/05 17:04:30 JVM"
+#pragma ident "@(#)interpreter_sparc.cpp	1.261 07/05/17 15:48:13 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -991,11 +991,7 @@
   // start execution
   __ verify_thread();
 
-#ifdef JVMPI_SUPPORT
-  // JVMTI/JVMPI support 
-#else // !JVMPI_SUPPORT
   // JVMTI support 
-#endif // JVMPI_SUPPORT
   __ notify_method_entry();
 
   // native call
@@ -1280,11 +1276,7 @@
     __ bind(L);
   }
 
-#ifdef JVMPI_SUPPORT
-  // JVMTI/JVMPI support (preserves thread register) 
-#else // !JVMPI_SUPPORT
   // JVMTI support (preserves thread register) 
-#endif // JVMPI_SUPPORT
   __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);  
 
   if (synchronized) {
@@ -1486,11 +1478,7 @@
 
   __ verify_thread();
 
-#ifdef JVMPI_SUPPORT
-  // jvmti/jvmpi support 
-#else // !JVMPI_SUPPORT
   // jvmti support 
-#endif // JVMPI_SUPPORT
   __ notify_method_entry();
 
   // start executing instructions
@@ -2046,9 +2034,6 @@
   __ set_vm_result(Oexception);
   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi support (preserves thread register)
-#endif // JVMPI_SUPPORT
   __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
 
   __ get_vm_result(Oexception);
@@ -2146,10 +2131,8 @@
 
 // --------------------------------------------------------------------------------
 
-#ifdef JVMPI_SUPPORT
-// when JVM/PI is retired this method can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+// Non-product code
+#ifndef PRODUCT
 address AbstractInterpreterGenerator::generate_trace_code(TosState state) {
   address entry = __ pc();
 
@@ -2166,11 +2149,8 @@
 
   return entry;
 }
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 
 
-// Non-product code
-#ifndef PRODUCT
 // helpers for generate_and_dispatch
 
 void AbstractInterpreterGenerator::count_bytecode() { 
@@ -2217,14 +2197,9 @@
   __ inc (G4_scratch);
   __ st (G4_scratch, 0, G3_scratch);
 }
-#endif // not PRODUCT
 
 
 
-#ifdef JVMPI_SUPPORT
-// when JVM/PI is retired this method can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 void AbstractInterpreterGenerator::trace_bytecode(Template* t) {
   // Call a little run-time stub to avoid blow-up for each bytecode.
   // The run-time runtime saves the right registers, depending on
@@ -2234,11 +2209,8 @@
   __ call(entry, relocInfo::none);
   __ delayed()->nop();
 }
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
 
 
-// Non-product code
-#ifndef PRODUCT
 void AbstractInterpreterGenerator::stop_interpreter_at() {
   Address counter(G3_scratch , (address)&BytecodeCounter::_counter_value);
   __ load_contents    (counter, G3_scratch );
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)sharedRuntime_sparc.cpp	1.50 07/05/05 17:04:32 JVM"
+#pragma ident "@(#)sharedRuntime_sparc.cpp	1.51 07/05/17 15:48:17 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1837,7 +1837,7 @@
   int vep_offset = ((intptr_t)__ pc()) - start;
 
 #ifdef COMPILER1
-  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hash) {
+  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
     // Object.hashCode can pull the hashCode from the header word
     // instead of doing a full VM transition once it's been computed.
     // Since hashCode is usually polymorphic at call sites we can't do
@@ -1944,13 +1944,6 @@
   int lock_slot_offset = 0;
   bool is_static = false;
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    oop_temp_slot_offset = stack_slots;
-    stack_slots += VMRegImpl::slots_per_word;
-  }
-#endif // JVMPI_SUPPORT
-
   if (method->is_static()) {
     klass_slot_offset = stack_slots;
     stack_slots += VMRegImpl::slots_per_word;
@@ -1979,10 +1972,6 @@
   //      |---------------------| <- lock_slot_offset
   //      | klass (if static)   |
   //      |---------------------| <- klass_slot_offset
-#ifdef JVMPI_SUPPORT
-  //      | oop_temp            |
-  //      |---------------------| <- oop_temp_slot_offset (jvmpi notify exit only)
-#endif // JVMPI_SUPPORT
   //      | oopHandle area      |
   //      |---------------------| <- oop_handle_offset
   //      | outbound memory     |
@@ -2016,11 +2005,7 @@
 
   //
   // We immediately shuffle the arguments so that any vm call we have to
-#ifdef JVMPI_SUPPORT
-  // make from here on out (sync slow path, jvmpi, etc.) we will have
-#else // !JVMPI_SUPPORT
   // make from here on out (sync slow path, jvmti, etc.) we will have
-#endif // JVMPI_SUPPORT
   // captured the oops from our caller and have a valid oopMap for
   // them.
 
@@ -2123,14 +2108,6 @@
     __ add(SP, klass_offset + STACK_BIAS, O1);
   }
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    // NULL the slot we use to store a pending exception around notify exit call
-    __ st_ptr(G0, SP, (oop_temp_slot_offset * VMRegImpl::stack_slot_size) + STACK_BIAS);
-    map->set_oop(VMRegImpl::stack2reg(oop_temp_slot_offset));
-  }
-#endif // JVMPI_SUPPORT
-
 
   const Register L6_handle = L6;
 
@@ -2169,37 +2146,6 @@
   // when we create it we must also save G2_thread
   bool inner_frame_created = false; 
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi support
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY) ||
-      jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY2)) {
-    create_inner_frame(masm, &inner_frame_created);
-
-    if (method()->is_static()) {
-      __ mov(G0, O2);
-    } else {
-      __ ld_ptr(I1, 0, O2);
-    }
-
-    // We only need to walk the outer frame
-    __ set_last_Java_frame(FP, I7);
-    __ set_oop_constant(JNIHandles::make_local(method()), O1);
-    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_entry), 
-            relocInfo::runtime_call_type);
-    __ delayed()->mov(G2_thread, O0);
-    __ restore_thread(L7_thread_cache); // restore G2_thread
-    __ reset_last_Java_frame();
-    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
-    Label L;
-    __ br_null(G3_scratch, false, Assembler::pt, L);
-    __ delayed()->nop();
-    // pop the inner frame
-    __ restore();
-    check_forward_pending_exception(masm, G3_scratch);
-    __ bind(L);
-  }
-#endif // JVMPI_SUPPORT
-
   // dtrace method entry support
   {
     SkipIfEqual skip_if(
@@ -2218,11 +2164,7 @@
   // we are in one frame deeper (the "inner" frame). If we are in the 
   // "inner" frames the args are in the Iregs and if the jni frame then
   // they are in the Oregs.
-#ifdef JVMPI_SUPPORT
-  // If we ever need to go to the VM (for locking, jvmti/jvmpi) then
-#else // !JVMPI_SUPPORT
   // If we ever need to go to the VM (for locking, jvmti) then
-#endif // JVMPI_SUPPORT
   // we will always be in the "inner" frame.
 
   // Lock a synchronized method
@@ -2492,33 +2434,6 @@
     __ bind(done);
   }
 
-#ifdef JVMPI_SUPPORT
-  // Tell jvmpi about this method exit
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    save_native_result(masm, ret_type, stack_slots);
-    // save any pending exception
-    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
-    __ st_ptr(G3_scratch, SP, (oop_temp_slot_offset * VMRegImpl::stack_slot_size) + STACK_BIAS);
-    __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
-    __ set_oop_constant(JNIHandles::make_local(method()), O1);
-    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit), relocInfo::runtime_call_type);
-    __ delayed()->mov(G2_thread, O0);              // Need thread in O0
-
-    __ restore_thread(L7_thread_cache);
-    // ignore any exception from jvmpi (which really shouldn't post one anyway) if we already had one.
-    restore_native_result(masm, ret_type, stack_slots);
-    __ ld_ptr(SP, (oop_temp_slot_offset * VMRegImpl::stack_slot_size) + STACK_BIAS, G3_scratch);
-    Label L;
-    // did we save a pending exception?
-    __ addcc(G0, G3_scratch, G0);
-    __ brx(Assembler::zero, true, Assembler::pt, L);
-    __ delayed()->nop();
-    // yes it gets priority over any jvmpi exception
-    __ st_ptr(G3_scratch, G2_thread, in_bytes(Thread::pending_exception_offset()));
-    __ bind(L);
-  }
-#endif // JVMPI_SUPPORT
-
   // Tell dtrace about this method exit
   {
     SkipIfEqual skip_if(
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Fri May 25 00:49:14 2007 +0000
@@ -468,7 +468,7 @@
 %}
 
 source %{
-#pragma ident "@(#)sparc.ad	1.453 07/05/05 17:04:21 JVM"
+#pragma ident "@(#)sparc.ad	1.454 07/05/17 15:48:27 JVM"
 
 #define __ _masm.
 
@@ -2642,7 +2642,7 @@
     assert(Rbox  != Rscratch, "");
     assert(Rbox  != Rmark, "");
 
-    __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch);
+    __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters);
 %}
 
 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)stubGenerator_sparc.cpp	1.228 07/05/05 17:04:32 JVM"
+#pragma ident "@(#)stubGenerator_sparc.cpp	1.229 07/05/17 15:48:38 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -34,6 +34,13 @@
 
 #define __ _masm->
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 
 // Note:  The register L7 is used as L7_thread_cache, and may not be used
 //        any other way within this module.
@@ -64,13 +71,16 @@
 #ifdef PRODUCT
 #define inc_counter_np(a,b,c) (0)
 #else
-  void inc_counter_np(int& counter, Register t1, Register t2) {
+  void inc_counter_np_(int& counter, Register t1, Register t2) {
     Address counter_addr(t2, (address) &counter);
     __ sethi(counter_addr);
     __ ld(counter_addr, t1);
     __ inc(t1);
     __ st(t1, counter_addr);
   }
+#define inc_counter_np(counter, t1, t2) \
+  BLOCK_COMMENT("inc_counter " #counter); \
+  inc_counter_np_(counter, t1, t2);
 #endif
 
   //----------------------------------------------------------------------------------------------------
@@ -173,6 +183,7 @@
     // |               |
 
     // pass parameters if any
+    BLOCK_COMMENT("pass parameters if any");
     { const Register src = parameters.as_in().as_register();
       const Register dst = Lesp;
       const Register tmp = G3_scratch;
@@ -188,7 +199,7 @@
 
       // copy parameters if any
       Label loop;
-      __ bind(loop);
+      __ BIND(loop);
       // Store tag first.
       if (TaggedStackInterpreter) {
         __ ld_ptr(src, 0, tmp);
@@ -204,7 +215,7 @@
       __ delayed()->sub(dst, Interpreter::stackElementSize(), dst);
 
       // done
-      __ bind(exit);
+      __ BIND(exit);
     }
 
     // setup parameters, method & call Java function
@@ -233,8 +244,11 @@
     // G2_thread
     // G5_method
     // Gargs
+    BLOCK_COMMENT("call Java function");
     __ jmpl(entry_point.as_in().as_register(), G0, O7);
     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
+
+    BLOCK_COMMENT("call_stub_return_address:");
     return_pc = __ pc();
 
     // The callee, if it wasn't interpreted, can return with SP changed so
@@ -255,23 +269,23 @@
       // store int result 
       __ st(O0, addr, G0);
 
-      __ bind(exit);
+      __ BIND(exit);
       __ ret();
       __ delayed()->restore();
   
-      __ bind(is_object);
+      __ BIND(is_object);
       __ ba(false, exit);
       __ delayed()->st_ptr(O0, addr, G0);
   
-      __ bind(is_float);
+      __ BIND(is_float);
       __ ba(false, exit);
       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
   
-      __ bind(is_double);
+      __ BIND(is_double);
       __ ba(false, exit);
       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
   
-      __ bind(is_long);
+      __ BIND(is_long);
 #ifdef _LP64
       __ ba(false, exit);
       __ delayed()->st_long(O0, addr, G0);	// store entire long
@@ -373,6 +387,7 @@
     __ verify_oop(Oexception);
     __ save_frame(0);             // compensates for compiler weakness
     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
+    BLOCK_COMMENT("call exception_handler_for_return_address");
     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Lscratch);
     __ mov(O0, handler_reg);
     __ restore();                 // compensates for compiler weakness
@@ -455,6 +470,7 @@
     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
     __ save_thread(noreg);
     // do the call
+    BLOCK_COMMENT("call runtime_entry");
     __ call(runtime_entry, relocInfo::runtime_call_type);
     if (!VerifyThread)
       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
@@ -475,6 +491,7 @@
     __ should_not_reach_here();
     __ bind(L);
 #endif // ASSERT
+    BLOCK_COMMENT("call forward_exception_entry");
     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
     // we use O7 linkage so that forward_exception_entry has the issuing PC
     __ delayed()->restore();
@@ -581,7 +598,7 @@
     // Initialize yield counter
     __ mov(G0,yield_reg);
 
-    __ bind(retry);
+    __ BIND(retry);
     __ cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
     __ br(Assembler::less, false, Assembler::pt, dontyield);
     __ delayed()->nop();
@@ -594,6 +611,7 @@
     // Save the regs and make space for a C call
     __ save(SP, -96, SP);
     __ save_all_globals_into_locals();
+    BLOCK_COMMENT("call os::naked_sleep");
     __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
     __ delayed()->nop();
     __ restore_globals_from_locals();
@@ -601,7 +619,7 @@
     // reset the counter
     __ mov(G0,yield_reg);
 
-    __ bind(dontyield);
+    __ BIND(dontyield);
 
     // try to get lock
     __ swap(lock_ptr_reg, 0, lock_reg);
@@ -637,7 +655,7 @@
       // Use CAS instead of swap, just in case the MP hardware
       // prefers to work with just one kind of synch. instruction.
       Label retry;
-      __ bind(retry);
+      __ BIND(retry);
       __ mov(O0, O3);       // scratch copy of exchange value
       __ ld(O1, 0, O2);     // observe the previous value
       // try to replace O2 with O3
@@ -758,11 +776,11 @@
   address generate_atomic_add() {
     StubCodeMark mark(this, "StubRoutines", "atomic_add");
     address start = __ pc();
-    __ bind(_atomic_add_stub);
+    __ BIND(_atomic_add_stub);
 
     if (VM_Version::v9_instructions_work()) {
       Label(retry);
-      __ bind(retry);
+      __ BIND(retry);
 
       __ lduw(O1, 0, O2);
       __ add(O0,   O2, O3);
@@ -850,6 +868,7 @@
     }
     
     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
+    BLOCK_COMMENT("call handle_unsafe_access");
     __ call(entry_point, relocInfo::runtime_call_type);
     __ delayed()->nop();
 
@@ -875,7 +894,7 @@
   // Arguments :
   //
   //      ret  : O0, returned
-  //      icc/xcc: set as O0
+  //      icc/xcc: set as O0 (depending on wordSize)
   //      sub  : O1, argument, not changed
   //      super: O2, argument, not changed
   //      raddr: O7, blown by call
@@ -930,7 +949,7 @@
     __ align(CodeEntryAlignment);
 
     // The scan loop
-    __ bind(loop);
+    __ BIND(loop);
     __ add(L1_ary_ptr,wordSize,L1_ary_ptr); // Bump by OOP size
     __ cmp(L3_index,L0_ary_len); 
     __ br(Assembler::equal,false,Assembler::pn,miss);
@@ -948,15 +967,15 @@
     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
-    __ retl();			// Result in Rret is ok; flags set to Z
+    __ retl();			// Result in Rret is zero; flags set to Z
     __ delayed()->add(SP,4*wordSize,SP);
 #else
-    __ ret();			// Result in Rret is ok; flags set to Z
+    __ ret();			// Result in Rret is zero; flags set to Z
     __ delayed()->restore();
 #endif
 
     // Hit or miss falls through here
-    __ bind(miss);
+    __ BIND(miss);
     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
 
 #if defined(COMPILER2) && !defined(_LP64)
@@ -964,10 +983,10 @@
     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
-    __ retl();			// Result in O0 is ok; flags set to NZ
+    __ retl();			// Result in Rret is != 0; flags set to NZ
     __ delayed()->add(SP,4*wordSize,SP);
 #else
-    __ ret();			// Result in Rret is ok; flags set to Z
+    __ ret();			// Result in Rret is != 0; flags set to NZ
     __ delayed()->restore();
 #endif
 
@@ -987,6 +1006,20 @@
     return start;
   }
 
+  static address disjoint_byte_copy_entry;
+  static address disjoint_short_copy_entry;
+  static address disjoint_int_copy_entry;
+  static address disjoint_long_copy_entry;
+  static address disjoint_oop_copy_entry;
+
+  static address byte_copy_entry;
+  static address short_copy_entry;
+  static address int_copy_entry;
+  static address long_copy_entry;
+  static address oop_copy_entry;
+
+  static address checkcast_copy_entry;
+
   //
   // Verify that a register contains clean 32-bits positive value
   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
@@ -1011,7 +1044,16 @@
   //    O1    -  array2
   //    O2    -  element count
   //
+  //  Kills temps:  O3, O4
+  //
   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
+    assert(no_overlap_target != NULL, "must be generated");
+    array_overlap_test(no_overlap_target, NULL, log2_elem_size);
+  }
+  void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
+    array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
+  }
+  void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
     const Register from       = O0;
     const Register to         = O1;
     const Register count      = O2;
@@ -1020,9 +1062,15 @@
 
       __ subcc(to, from, to_from);
       __ sll_ptr(count, log2_elem_size, byte_count);
-      __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
+      if (NOLp == NULL)
+        __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
+      else
+        __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
       __ delayed()->cmp(to_from, byte_count);
-      __ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target);
+      if (NOLp == NULL)
+        __ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target);
+      else
+        __ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp));
       __ delayed()->nop();
   }
 
@@ -1054,10 +1102,10 @@
     assert( tmp != addr, "need separate temp reg");
     Address rs(tmp, (address)ct->byte_map_base);
       __ load_address(rs);
-    __ bind(L_loop);
+    __ BIND(L_loop);
       __ stb(G0, rs.base(), addr);
       __ subcc(count, 1, count);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_loop);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
       __ delayed()->add(addr, 1, addr);
   }
 
@@ -1096,7 +1144,7 @@
       __ ldx(from, 0, O3);
       __ inc(from, 8);
       __ align(16);
-    __ bind(L_loop);
+    __ BIND(L_loop);
       __ ldx(from, 0, O4);
       __ deccc(count, count_dec); // Can we do next iteration after this one? 
       __ ldx(from, 8, G4);
@@ -1110,11 +1158,11 @@
       __ srlx(G4, right_shift, G3);
       __ bset(G3, O4);
       __ stx(O4, to, -8);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_loop);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
       __ delayed()->mov(G4, O3);
 
       __ inccc(count, count_dec>>1 ); // + 8 bytes
-      __ br(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
+      __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
       __ delayed()->inc(count, count_dec>>1); // restore 'count'
 
       // copy 8 bytes, part of them already loaded in O3
@@ -1126,12 +1174,12 @@
       __ bset(O3, G3);
       __ stx(G3, to, -8);
 
-    __ bind(L_copy_last_bytes);
+    __ BIND(L_copy_last_bytes);
       __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
       __ delayed()->sub(from, right_shift, from);       // restore address
 
-    __ bind(L_aligned_copy);
+    __ BIND(L_aligned_copy);
   }
 
   // Copy big chunks backward with shift
@@ -1168,7 +1216,7 @@
       __ andn(end_from, 7, end_from);     // Align address
       __ ldx(end_from, 0, O3);
       __ align(16);
-    __ bind(L_loop);
+    __ BIND(L_loop);
       __ ldx(end_from, -8, O4);
       __ deccc(count, count_dec); // Can we do next iteration after this one? 
       __ ldx(end_from, -16, G4);
@@ -1182,11 +1230,11 @@
       __ sllx(G4, left_shift,  G3);
       __ bset(G3, O4);
       __ stx(O4, end_to, 0);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_loop);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
       __ delayed()->mov(G4, O3);
 
       __ inccc(count, count_dec>>1 ); // + 8 bytes
-      __ br(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
+      __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
       __ delayed()->inc(count, count_dec>>1); // restore 'count'
 
       // copy 8 bytes, part of them already loaded in O3
@@ -1198,7 +1246,7 @@
       __ bset(O3, G3);
       __ stx(G3, end_to, 0);
 
-    __ bind(L_copy_last_bytes);
+    __ BIND(L_copy_last_bytes);
       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
       __ delayed()->add(end_from, left_shift, end_from); // restore address
@@ -1229,6 +1277,10 @@
 
     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
 
+    if (!aligned)  disjoint_byte_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
     if (aligned) {
       // 'aligned' == true when it is known statically during compilation
       // of this arraycopy call site that both 'from' and 'to' addresses
@@ -1243,23 +1295,24 @@
 
       // For short arrays, just do single element copy
       __ cmp(count, 12); // 8 + 4
-      __ br(Assembler::less, false, Assembler::pn, L_copy_byte);
+      __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
       __ delayed()->mov(G0, offset);
 
-      // copy a 4-bytes word if necessary to align 'from' to 8 bytes
-      __ andcc(from, 7, G0);
+      // copy a 4-bytes word if necessary to align 'to' to 8 bytes
+      __ andcc(to, 7, G0);
       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
       __ delayed()->ld(from, 0, O3);
       __ inc(from, 4);
       __ inc(to, 4);
       __ dec(count, 4);
       __ st(O3, to, -4);
-    __ bind(L_skip_alignment);
+    __ BIND(L_skip_alignment);
 #endif
     } else {
+
       // for short arrays, just do single element copy
       __ cmp(count, 23); // 16 + 7
-      __ br(Assembler::less, false, Assembler::pn, L_copy_byte);
+      __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
       __ delayed()->mov(G0, offset);
 
       // copy bytes to align 'to' on 8 byte boundary
@@ -1268,15 +1321,19 @@
       __ delayed()->neg(G1);
       __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
       __ sub(count, G1, count);
-    __ bind(L_align);
+    __ BIND(L_align);
       __ ldub(from, 0, O3);
       __ deccc(G1);
       __ inc(from);
       __ stb(O3, to, 0);
       __ br(Assembler::notZero, false, Assembler::pt, L_align);
       __ delayed()->inc(to);
-    __ bind(L_skip_alignment);
-
+    __ BIND(L_skip_alignment);
+    }
+#ifdef _LP64
+    if (!aligned)
+#endif
+    {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise fall through to the next
       // code for aligned copy.
@@ -1288,23 +1345,23 @@
 
     // Both array are 8 bytes aligned, copy 16 bytes at a time
       __ and3(count, 7, G4); // Save count
-      __ srl(count, 3, count);
+      __ srlx(count, 3, count);
      generate_disjoint_long_copy_core(aligned);
       __ mov(G4, count);     // Restore count
 
     // copy tailing bytes
-    __ bind(L_copy_byte);
+    __ BIND(L_copy_byte);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
       __ align(16);
-    __ bind(L_copy_byte_loop);
+    __ BIND(L_copy_byte_loop);
       __ ldub(from, offset, O3);
       __ deccc(count);
       __ stb(O3, to, offset);
-      __ br(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
+      __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
       __ delayed()->inc(offset);
 
-    __ bind(L_exit);
+    __ BIND(L_exit);
       // O3, O4 are used as temp registers
       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
       __ retl();
@@ -1329,9 +1386,7 @@
     address start = __ pc();
     address nooverlap_target = aligned ?
         StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
-        StubRoutines::jbyte_disjoint_arraycopy();
-
-    array_overlap_test(nooverlap_target, 0);
+        disjoint_byte_copy_entry;
 
     Label L_skip_alignment, L_align, L_aligned_copy;
     Label L_copy_byte, L_copy_byte_loop, L_exit;
@@ -1344,11 +1399,17 @@
 
     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
 
+    if (!aligned)  byte_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
+    array_overlap_test(nooverlap_target, 0);
+
     __ add(to, count, end_to);       // offset after last copied element
 
     // for short arrays, just do single element copy
     __ cmp(count, 23); // 16 + 7
-    __ br(Assembler::less, false, Assembler::pn, L_copy_byte);
+    __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
     __ delayed()->add(from, count, end_from);
 
     if (aligned) {
@@ -1368,26 +1429,35 @@
       __ dec(end_to,   4);
       __ dec(count, 4);
       __ st(O4, end_to, 0);
-    __ bind(L_skip_alignment);
+    __ BIND(L_skip_alignment);
+#else
+      // The next code is generated in copy_16_bytes_backward_with_shift()
+      // in other cases.
+      __ dec(count, 16);
 #endif
     } else {
+
       // copy bytes to align 'to' on 8 byte boundary
       __ andcc(end_to, 7, G1); // misaligned bytes
       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
       __ delayed()->nop();
       __ sub(count, G1, count);
-    __ bind(L_align);
+    __ BIND(L_align);
       __ dec(end_from);
       __ dec(end_to);
       __ ldub(end_from, 0, O3);
       __ deccc(G1);
-      __ br(Assembler::notZero, false, Assembler::pt, L_align);
+      __ brx(Assembler::notZero, false, Assembler::pt, L_align);
       __ delayed()->stb(O3, end_to, 0);
-    __ bind(L_skip_alignment);
-
+    __ BIND(L_skip_alignment);
+    }
+#ifdef _LP64
+    if (!aligned)
+#endif
+    {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise jump to the next
-      // code for aligned copy.
+      // code for aligned copy (and substracting 16 from 'count' before jump).
       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
       // Also jump over aligned copy after the copy with shift completed.
 
@@ -1396,33 +1466,33 @@
     }
     // copy 4 elements (16 bytes) at a time
       __ align(16);
-    __ bind(L_aligned_copy);
+    __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
       __ ldx(end_from, 0, O4);
       __ dec(end_to, 16);
       __ deccc(count, 16);
       __ stx(O3, end_to, 8);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
       __ delayed()->stx(O4, end_to, 0);
       __ inc(count, 16);
 
     // copy 1 element (2 bytes) at a time
-    __ bind(L_copy_byte);
+    __ BIND(L_copy_byte);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
       __ align(16);
-    __ bind(L_copy_byte_loop);
+    __ BIND(L_copy_byte_loop);
       __ dec(end_from);
       __ dec(end_to);
       __ ldub(end_from, 0, O4);
       __ deccc(count);
-      __ br(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
+      __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
       __ delayed()->stb(O4, end_to, 0);
 
-    __ bind(L_exit);
+    __ BIND(L_exit);
     // O3, O4 are used as temp registers
-    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
+    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
     __ retl();
     __ delayed()->mov(G0, O0); // return 0
     return start;
@@ -1453,6 +1523,10 @@
 
     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
 
+    if (!aligned)  disjoint_short_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
     if (aligned) {
       // 'aligned' == true when it is known statically during compilation
       // of this arraycopy call site that both 'from' and 'to' addresses
@@ -1467,23 +1541,23 @@
 
       // for short arrays, just do single element copy
       __ cmp(count, 6); // 4 + 2  (12 bytes)
-      __ br(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
+      __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
       __ delayed()->mov(G0, offset);
 
-      // copy a 2-elements word if necessary to align to 8 bytes
-      __ andcc(from, 7, G0);
+      // copy a 2-elements word if necessary to align 'to' to 8 bytes
+      __ andcc(to, 7, G0);
       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
       __ delayed()->ld(from, 0, O3);
       __ inc(from, 4);
       __ inc(to, 4);
       __ dec(count, 2);
       __ st(O3, to, -4);
-    __ bind(L_skip_alignment);
+    __ BIND(L_skip_alignment);
 #endif
     } else {
       // for short arrays, just do single element copy
       __ cmp(count, 11); // 8 + 3  (22 bytes)
-      __ br(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
+      __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
       __ delayed()->mov(G0, offset);
 
       // copy 1 element if necessary to align 'to' on an 4 bytes
@@ -1494,7 +1568,7 @@
       __ inc(to, 2);
       __ dec(count);
       __ sth(O3, to, -2);
-    __ bind(L_skip_alignment);
+    __ BIND(L_skip_alignment);
 
       // copy 2 elements to align 'to' on an 8 byte boundary
       __ andcc(to, 7, G0);
@@ -1506,8 +1580,12 @@
       __ inc(to, 4);
       __ sth(O3, to, -4);
       __ sth(O4, to, -2);
-    __ bind(L_skip_alignment2);
-
+    __ BIND(L_skip_alignment2);
+    }
+#ifdef _LP64
+    if (!aligned)
+#endif
+    {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise fall through to the next
       // code for aligned copy.
@@ -1524,18 +1602,18 @@
       __ mov(G4, count); // restore
 
     // copy 1 element at a time
-    __ bind(L_copy_2_bytes);
+    __ BIND(L_copy_2_bytes);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
       __ align(16);
-    __ bind(L_copy_2_bytes_loop);
+    __ BIND(L_copy_2_bytes_loop);
       __ lduh(from, offset, O3);
       __ deccc(count);
       __ sth(O3, to, offset);
-      __ br(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
+      __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
       __ delayed()->inc(offset, 2);
 
-    __ bind(L_exit);
+    __ BIND(L_exit);
       // O3, O4 are used as temp registers
       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
       __ retl();
@@ -1560,9 +1638,7 @@
     address start = __ pc();
     address nooverlap_target = aligned ?
         StubRoutines::arrayof_jshort_disjoint_arraycopy() :
-        StubRoutines::jshort_disjoint_arraycopy();
-
-    array_overlap_test(nooverlap_target, 1);
+        disjoint_short_copy_entry;
 
     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
@@ -1573,16 +1649,22 @@
     const Register end_from  = from; // source array end address
     const Register end_to    = to;   // destination array end address
 
+    const Register byte_count = O3;  // bytes count to copy
+
     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
 
-    const Register byte_count = O3;  // bytes count to copy
+    if (!aligned)  short_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
+    array_overlap_test(nooverlap_target, 1);
 
     __ sllx(count, LogBytesPerShort, byte_count);
     __ add(to, byte_count, end_to);  // offset after last copied element
 
     // for short arrays, just do single element copy
     __ cmp(count, 11); // 8 + 3  (22 bytes)
-    __ br(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
+    __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
     __ delayed()->add(from, byte_count, end_from);
 
     if (aligned) {
@@ -1602,7 +1684,11 @@
       __ dec(end_to,   4);
       __ dec(count, 2);
       __ st(O4, end_to, 0);
-    __ bind(L_skip_alignment);
+    __ BIND(L_skip_alignment);
+#else
+      // The next code is generated in copy_16_bytes_backward_with_shift()
+      // in other cases.
+      __ dec(count, 8);
 #endif
     } else {
       // copy 1 element if necessary to align 'to' on an 4 bytes
@@ -1613,7 +1699,7 @@
       __ dec(end_to, 2);
       __ dec(count);
       __ sth(O3, end_to, 0);
-    __ bind(L_skip_alignment);
+    __ BIND(L_skip_alignment);
 
       // copy 2 elements to align 'to' on an 8 byte boundary
       __ andcc(end_to, 7, G0);
@@ -1625,11 +1711,15 @@
       __ dec(end_to, 4);
       __ sth(O3, end_to, 2);
       __ sth(O4, end_to, 0);
-    __ bind(L_skip_alignment2);
-
+    __ BIND(L_skip_alignment2);
+    }
+#ifdef _LP64
+    if (!aligned)
+#endif
+    {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise jump to the next
-      // code for aligned copy.
+      // code for aligned copy (and substracting 8 from 'count' before jump).
       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
       // Also jump over aligned copy after the copy with shift completed.
 
@@ -1638,30 +1728,30 @@
     }
     // copy 4 elements (16 bytes) at a time
       __ align(16);
-    __ bind(L_aligned_copy);
+    __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
       __ ldx(end_from, 0, O4);
       __ dec(end_to, 16);
       __ deccc(count, 8);
       __ stx(O3, end_to, 8);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
       __ delayed()->stx(O4, end_to, 0);
       __ inc(count, 8);
 
     // copy 1 element (2 bytes) at a time
-    __ bind(L_copy_2_bytes);
+    __ BIND(L_copy_2_bytes);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-    __ bind(L_copy_2_bytes_loop);
+    __ BIND(L_copy_2_bytes_loop);
       __ dec(end_from, 2);
       __ dec(end_to, 2);
       __ lduh(end_from, 0, O4);
       __ deccc(count);
-      __ br(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
+      __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
       __ delayed()->sth(O4, end_to, 0);
 
-    __ bind(L_exit);
+    __ BIND(L_exit);
     // O3, O4 are used as temp registers
     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
     __ retl();
@@ -1690,8 +1780,6 @@
     const Register offset    = O5;   // offset from start of arrays
     // O3, O4, G3, G4 are used as temp registers
 
-    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
-
     // 'aligned' == true when it is known statically during compilation
     // of this arraycopy call site that both 'from' and 'to' addresses
     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
@@ -1708,7 +1796,7 @@
 
       // for short arrays, just do single element copy
       __ cmp(count, 5); // 4 + 1 (20 bytes)
-      __ br(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
+      __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
       __ delayed()->mov(G0, offset);
 
       // copy 1 element to align 'to' on an 8 byte boundary
@@ -1719,10 +1807,7 @@
       __ inc(to, 4);
       __ dec(count);
       __ st(O3, to, -4);
-    __ bind(L_skip_alignment);
-    }
-
-    if (!aligned) {
+    __ BIND(L_skip_alignment);
 
     // if arrays have same alignment mod 8, do 4 elements copy
       __ andcc(from, 7, G0);
@@ -1740,7 +1825,7 @@
       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
 
       __ align(16);
-    __ bind(L_copy_16_bytes);
+    __ BIND(L_copy_16_bytes);
       __ ldx(from, 4, O4);
       __ deccc(count, 4); // Can we do next iteration after this one?
       __ ldx(from, 12, G4);
@@ -1754,13 +1839,13 @@
       __ srlx(G4, 32, G3);
       __ bset(G3, O4);
       __ stx(O4, to, -8);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
       __ delayed()->mov(G4, O3);
 
       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
       __ delayed()->inc(count, 4); // restore 'count'
 
-    __ bind(L_aligned_copy);
+    __ BIND(L_aligned_copy);
     }
     // copy 4 elements (16 bytes) at a time
       __ and3(count, 1, G4); // Save
@@ -1769,16 +1854,16 @@
       __ mov(G4, count);     // Restore
 
     // copy 1 element at a time
-    __ bind(L_copy_4_bytes);
+    __ BIND(L_copy_4_bytes);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-    __ bind(L_copy_4_bytes_loop);
+    __ BIND(L_copy_4_bytes_loop);
       __ ld(from, offset, O3);
       __ deccc(count);
       __ st(O3, to, offset);
-      __ br(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
+      __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
       __ delayed()->inc(offset, 4);
-    __ bind(L_exit);
+    __ BIND(L_exit);
   }
 
   //
@@ -1795,6 +1880,13 @@
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
 
+    const Register count = O2;
+    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
+
+    if (!aligned)  disjoint_int_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
     generate_disjoint_int_copy_core(aligned);
 
     // O3, O4 are used as temp registers
@@ -1827,15 +1919,13 @@
     const Register end_to    = to;   // destination array end address
     // O3, O4, O5, G3 are used as temp registers
 
-    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
-
     const Register byte_count = O3;  // bytes count to copy
 
       __ sllx(count, LogBytesPerInt, byte_count);
       __ add(to, byte_count, end_to); // offset after last copied element
 
       __ cmp(count, 5); // for short arrays, just do single element copy
-      __ br(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
+      __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
       __ delayed()->add(from, byte_count, end_from);
 
     // copy 1 element to align 'to' on an 8 byte boundary
@@ -1847,7 +1937,7 @@
       __ dec(end_to,   4);
       __ ld(end_from, 0, O4);
       __ st(O4, end_to, 0);
-    __ bind(L_skip_alignment);
+    __ BIND(L_skip_alignment);
 
     // Check if 'end_from' and 'end_to' has the same alignment.
       __ andcc(end_from, 7, G0);
@@ -1861,7 +1951,7 @@
     //
       __ ldx(end_from, -4, O3);
       __ align(16);
-    __ bind(L_copy_16_bytes);
+    __ BIND(L_copy_16_bytes);
       __ ldx(end_from, -12, O4);
       __ deccc(count, 4);
       __ ldx(end_from, -20, O5);
@@ -1875,7 +1965,7 @@
       __ sllx(O5, 32, G3);
       __ bset(O4, G3);
       __ stx(G3, end_to, 0);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
       __ delayed()->mov(O5, O3);
 
       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
@@ -1883,29 +1973,29 @@
 
     // copy 4 elements (16 bytes) at a time
       __ align(16);
-    __ bind(L_aligned_copy);
+    __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
       __ ldx(end_from, 0, O4);
       __ dec(end_to, 16);
       __ deccc(count, 4);
       __ stx(O3, end_to, 8);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
       __ delayed()->stx(O4, end_to, 0);
       __ inc(count, 4);
 
     // copy 1 element (4 bytes) at a time
-    __ bind(L_copy_4_bytes);
+    __ BIND(L_copy_4_bytes);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-    __ bind(L_copy_4_bytes_loop);
+    __ BIND(L_copy_4_bytes_loop);
       __ dec(end_from, 4);
       __ dec(end_to, 4);
       __ ld(end_from, 0, O4);
       __ deccc(count);
-      __ br(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
+      __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
       __ delayed()->st(O4, end_to, 0);
-    __ bind(L_exit);
+    __ BIND(L_exit);
   }
 
   //
@@ -1924,7 +2014,13 @@
 
     address nooverlap_target = aligned ?
         StubRoutines::arrayof_jint_disjoint_arraycopy() :
-        StubRoutines::jint_disjoint_arraycopy();
+        disjoint_int_copy_entry;
+
+    assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
+
+    if (!aligned)  int_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
 
     array_overlap_test(nooverlap_target, 2);
 
@@ -1957,26 +2053,26 @@
 
       __ deccc(count, 2);
       __ mov(G0, offset0);   // offset from start of arrays (0)
-      __ br(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
+      __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
       __ delayed()->add(offset0, 8, offset8);
       __ align(16);
-    __ bind(L_copy_16_bytes);
+    __ BIND(L_copy_16_bytes);
       __ ldx(from, offset0, O3);
       __ ldx(from, offset8, G3);
       __ deccc(count, 2);
       __ stx(O3, to, offset0);
       __ inc(offset0, 16);
       __ stx(G3, to, offset8);
-      __ br(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
       __ delayed()->inc(offset8, 16);
 
-    __ bind(L_copy_8_bytes);
+    __ BIND(L_copy_8_bytes);
       __ inccc(count, 2);
-      __ br(Assembler::zero, true, Assembler::pn, L_exit );
+      __ brx(Assembler::zero, true, Assembler::pn, L_exit );
       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
       __ ldx(from, offset0, O3);
       __ stx(O3, to, offset0);
-    __ bind(L_exit);
+    __ BIND(L_exit);
   }
 
   //
@@ -1996,6 +2092,10 @@
 
     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
 
+    if (!aligned)  disjoint_long_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
     generate_disjoint_long_copy_core(aligned);
 
     // O3, O4 are used as temp registers
@@ -2025,25 +2125,25 @@
     const Register offset0 = O5;  // previous element offset
 
       __ subcc(count, 1, count);
-      __ br(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
+      __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
       __ delayed()->sllx(count, LogBytesPerLong, offset8);
       __ sub(offset8, 8, offset0);
       __ align(16);
-    __ bind(L_copy_16_bytes);
+    __ BIND(L_copy_16_bytes);
       __ ldx(from, offset8, O2);
       __ ldx(from, offset0, O3);
       __ stx(O2, to, offset8);
       __ deccc(offset8, 16);      // use offset8 as counter
       __ stx(O3, to, offset0);
-      __ br(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
+      __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
       __ delayed()->dec(offset0, 16);
 
-    __ bind(L_copy_8_bytes);
-      __ br(Assembler::negative, false, Assembler::pn, L_exit );
+    __ BIND(L_copy_8_bytes);
+      __ brx(Assembler::negative, false, Assembler::pn, L_exit );
       __ delayed()->nop();
       __ ldx(from, 0, O3);
       __ stx(O3, to, 0);
-    __ bind(L_exit);
+    __ BIND(L_exit);
   }
 
   //  Generate stub for conjoint long copy.  
@@ -2059,10 +2159,17 @@
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
-    address nooverlap_target = StubRoutines::arrayof_jlong_disjoint_arraycopy();
+
+    assert(!aligned, "usage");
+    address nooverlap_target = disjoint_long_copy_entry;
+
+    assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
+
+    if (!aligned)  long_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
+    if (!aligned)  BLOCK_COMMENT("Entry:");
 
     array_overlap_test(nooverlap_target, 3);
-    assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
 
     generate_conjoint_long_copy_core(aligned);
 
@@ -2091,11 +2198,16 @@
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
 
+    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
+
+    if (!aligned)  disjoint_oop_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
     // save arguments for barrier generation
     __ mov(to, G1);
     __ mov(count, G5);
   #ifdef _LP64
-    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
     generate_disjoint_long_copy_core(aligned);
   #else
     generate_disjoint_int_copy_core(aligned);
@@ -2128,21 +2240,28 @@
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
 
+    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
+
+    if (!aligned)  oop_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here
+    if (!aligned)  BLOCK_COMMENT("Entry:");
+
     // save arguments for barrier generation
     __ mov(to, G1);
     __ mov(count, G5);
 
     address nooverlap_target = aligned ?
         StubRoutines::arrayof_oop_disjoint_arraycopy() :
-        StubRoutines::oop_disjoint_arraycopy();
+        disjoint_oop_copy_entry;
+
+    array_overlap_test(nooverlap_target, LogBytesPerWord);
+
   #ifdef _LP64
-    array_overlap_test(nooverlap_target, 3);
-    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
     generate_conjoint_long_copy_core(aligned);
   #else
-    array_overlap_test(nooverlap_target, 2);
     generate_conjoint_int_copy_core(aligned);
   #endif
+
     // O0 is used as temp register
     array_store_check(G1, G5, O0);
 
@@ -2154,6 +2273,274 @@
   }
 
 
+  // Helper for generating a dynamic type check.
+  // Smashes only the given temp registers.
+  void generate_type_check(Register sub_klass,
+                           Register super_check_offset,
+                           Register super_klass,
+                           Register temp,
+                           Label& L_success,
+                           Register deccc_hack = noreg) {
+    assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
+
+    BLOCK_COMMENT("type_check:");
+
+    Label L_miss;
+
+    assert_clean_int(super_check_offset, temp);
+
+    // maybe decrement caller's trip count:
+#define DELAY_SLOT delayed();   \
+    { if (deccc_hack == noreg) __ nop(); else __ deccc(deccc_hack); }
+
+    // if the pointers are equal, we are done (e.g., String[] elements)
+    __ cmp(sub_klass, super_klass);
+    __ brx(Assembler::equal, true, Assembler::pt, L_success);
+    __ DELAY_SLOT;
+
+    // check the supertype display:
+    __ ld_ptr(sub_klass, super_check_offset, temp); // query the super type
+    __ cmp(super_klass,                      temp); // test the super type
+    __ brx(Assembler::equal, true, Assembler::pt, L_success);
+    __ DELAY_SLOT;
+
+    int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
+                     Klass::secondary_super_cache_offset_in_bytes());
+    __ cmp(super_klass, sc_offset);
+    __ brx(Assembler::notEqual, true, Assembler::pt, L_miss);
+    __ delayed()->nop();
+
+    __ save_frame(0);
+    __ mov(sub_klass->after_save(), O1);
+    // mov(super_klass->after_save(), O2); //fill delay slot
+    assert(StubRoutines::Sparc::_partial_subtype_check != NULL, "order of generation");
+    __ call(StubRoutines::Sparc::_partial_subtype_check);
+    __ delayed()->mov(super_klass->after_save(), O2);
+    __ restore();
+
+    // Upon return, the condition codes are already set.
+    __ brx(Assembler::equal, true, Assembler::pt, L_success);
+    __ DELAY_SLOT;
+
+#undef DELAY_SLOT
+
+    // Fall through on failure!
+    __ BIND(L_miss);
+  }
+
+
+  //  Generate stub for checked oop copy.
+  //
+  // Arguments for generated stub:
+  //      from:  O0
+  //      to:    O1
+  //      count: O2 treated as signed
+  //      ckoff: O3 (super_check_offset)
+  //      ckval: O4 (super_klass)
+  //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
+  //
+  address generate_checkcast_copy(const char* name) {
+
+    const Register O0_from   = O0;      // source array address
+    const Register O1_to     = O1;      // destination array address
+    const Register O2_count  = O2;      // elements count
+    const Register O3_ckoff  = O3;      // super_check_offset
+    const Register O4_ckval  = O4;      // super_klass
+
+    const Register O5_offset = O5;      // loop var, with stride wordSize
+    const Register G1_remain = G1;      // loop var, with stride -1
+    const Register G3_oop    = G3;      // actual oop copied
+    const Register G4_klass  = G4;      // oop._klass
+    const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    int klass_off = oopDesc::klass_offset_in_bytes();
+
+#ifdef ASSERT
+    // We sometimes save a frame (see partial_subtype_check below).
+    // If this will cause trouble, let's fail now instead of later.
+    __ save_frame(0);
+    __ restore();
+#endif
+
+#ifdef ASSERT
+    // caller guarantees that the arrays really are different
+    // otherwise, we would have to make conjoint checks
+    { Label L;
+      __ mov(O3, G1);           // spill: overlap test smashes O3
+      __ mov(O4, G4);           // spill: overlap test smashes O4
+      array_overlap_test(L, LogBytesPerWord);
+      __ stop("checkcast_copy within a single array");
+      __ bind(L);
+      __ mov(G1, O3);
+      __ mov(G4, O4);
+    }
+#endif //ASSERT
+
+    assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
+
+    checkcast_copy_entry = __ pc();
+    // caller can pass a 64-bit byte count here (from generic stub)
+    BLOCK_COMMENT("Entry:");
+
+    Label load_element, store_element, do_card_marks, fail, done;
+    __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
+    __ brx(Assembler::notZero, false, Assembler::pt, load_element);
+    __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
+
+    // Empty array:  Nothing to do.
+    inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
+    __ retl();
+    __ delayed()->set(0, O0);           // return 0 on (trivial) success
+
+    // ======== begin loop ========
+    // (Loop is rotated; its entry is load_element.)
+    // Loop variables:
+    //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
+    //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
+    //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
+    __ align(16);
+
+    __ bind(store_element);
+    // deccc(G1_remain);                // decrement the count (hoisted)
+    __ st_ptr(G3_oop, O1_to, O5_offset); // store the oop
+    __ inc(O5_offset, wordSize);        // step to next offset
+    __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
+    __ delayed()->set(0, O0);           // return -1 on success
+
+    // ======== loop entry is here ========
+    __ bind(load_element);
+    __ ld_ptr(O0_from, O5_offset, G3_oop);  // load the oop
+    __ br_null(G3_oop, true, Assembler::pt, store_element);
+    __ delayed()->deccc(G1_remain);     // decrement the count
+
+    __ ld_ptr(G3_oop, klass_off, G4_klass); // query the object klass
+
+    generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
+                        // branch to this on success:
+                        store_element,
+                        // decrement this on success:
+                        G1_remain);
+    // ======== end loop ========
+
+    // It was a real error; we must depend on the caller to finish the job.
+    // Register G1 has number of *remaining* oops, O2 number of *total* oops.
+    // Emit GC store barriers for the oops we have copied (O2 minus G1),
+    // and report their number to the caller.
+    __ bind(fail);
+    __ subcc(O2_count, G1_remain, O2_count);
+    __ brx(Assembler::zero, false, Assembler::pt, done);
+    __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
+
+    __ bind(do_card_marks);
+    array_store_check(O1_to, O2_count, O2);   // store check on O1[0..O2]
+
+    __ bind(done);
+    inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
+    __ retl();
+    __ delayed()->nop();             // return value in 00
+
+    return start;
+  }
+
+
+  //  Generate 'unsafe' array copy stub
+  //  Though just as safe as the other stubs, it takes an unscaled
+  //  size_t argument instead of an element count.
+  //
+  // Arguments for generated stub:
+  //      from:  O0
+  //      to:    O1
+  //      count: O2 byte count, treated as ssize_t, can be zero
+  //
+  // Examines the alignment of the operands and dispatches
+  // to a long, int, short, or byte copy loop.
+  //
+  address generate_unsafe_copy(const char* name) {
+
+    const Register O0_from   = O0;      // source array address
+    const Register O1_to     = O1;      // destination array address
+    const Register O2_count  = O2;      // elements count
+
+    const Register G1_bits   = G1;      // test copy of low bits
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    // bump this on entry, not on exit:
+    inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
+
+    __ or3(O0_from, O1_to, G1_bits);
+    __ or3(O2_count,       G1_bits, G1_bits);
+
+    __ btst(BytesPerLong-1, G1_bits);
+    __ br(Assembler::zero, true, Assembler::pt,
+          long_copy_entry, relocInfo::runtime_call_type);
+    // scale the count on the way out:
+    __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
+
+    __ btst(BytesPerInt-1, G1_bits);
+    __ br(Assembler::zero, true, Assembler::pt,
+          int_copy_entry, relocInfo::runtime_call_type);
+    // scale the count on the way out:
+    __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
+
+    __ btst(BytesPerShort-1, G1_bits);
+    __ br(Assembler::zero, true, Assembler::pt,
+          short_copy_entry, relocInfo::runtime_call_type);
+    // scale the count on the way out:
+    __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
+
+    __ br(Assembler::always, false, Assembler::pt,
+          byte_copy_entry, relocInfo::runtime_call_type);
+    __ delayed()->nop();
+
+    return start;
+  }
+
+
+  // Perform range checks on the proposed arraycopy.
+  // Kills the two temps, but nothing else.
+  // Also, clean the sign bits of src_pos and dst_pos.
+  void arraycopy_range_checks(Register src,     // source array oop (O0)
+                              Register src_pos, // source position (O1)
+                              Register dst,     // destination array oo (O2)
+                              Register dst_pos, // destination position (O3)
+                              Register length,  // length of copy (O4)
+                              Register temp1, Register temp2,
+                              Label& L_failed) {
+    BLOCK_COMMENT("arraycopy_range_checks:");
+
+    //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
+
+    const Register array_length = temp1;  // scratch
+    const Register end_pos      = temp2;  // scratch
+
+    // Note:  This next instruction may be in the delay slot of a branch:
+    __ add(length, src_pos, end_pos);  // src_pos + length
+    __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length); 
+    __ cmp(end_pos, array_length);
+    __ br(Assembler::greater, false, Assembler::pn, L_failed);
+
+    //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
+    __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
+    __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length); 
+    __ cmp(end_pos, array_length);
+    __ br(Assembler::greater, false, Assembler::pn, L_failed);
+
+    // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
+    // Move with sign extension can be used since they are positive.
+    __ delayed()->signx(src_pos, src_pos);
+    __ signx(dst_pos, dst_pos);
+
+    BLOCK_COMMENT("arraycopy_range_checks done");
+  }
+
+
   //
   //  Generate generic array copy stubs
   //
@@ -2180,12 +2567,23 @@
     const Register length   = O4;  // elements count
 
     // registers used as temp
-    const Register G3_klass = G3;  // array klass
+    const Register G3_src_klass = G3; // source array klass
+    const Register G4_dst_klass = G4; // destination array klass
+    const Register G5_lh        = G5; // layout handler
+    const Register O5_temp      = O5;
 
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
 
+    // bump this on entry, not on exit:
+    inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
+
+    // In principle, the int arguments could be dirty.
+    //assert_clean_int(src_pos, G1);
+    //assert_clean_int(dst_pos, G1);
+    //assert_clean_int(length, G1);
+
     //-----------------------------------------------------------------------
     // Assembler stubs will be used for this call to arraycopy 
     // if the following conditions are met:
@@ -2198,6 +2596,7 @@
     // (6) src and dst should be arrays.
     // (7) src_pos + length must not exceed length of src.
     // (8) dst_pos + length must not exceed length of dst.
+    BLOCK_COMMENT("arraycopy initial argument checks");
 
     //  if (src == NULL) return -1;
     __ br_null(src, false, Assembler::pn, L_failed);
@@ -2218,60 +2617,73 @@
     __ delayed()->tst(length);
     __ br(Assembler::negative, false, Assembler::pn, L_failed);
 
-    //  if (src->klass() == NULL) return -1;
-    __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_klass);
-    __ br_null(G3_klass, false, Assembler::pn, L_failed);
-
-    //  if (src->klass() != dst->klass()) return -1;
-    __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4);
-    __ cmp(G3_klass, G4);
-    __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
+    BLOCK_COMMENT("arraycopy argument klass checks");
+    //  get src->klass()
+    __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
+
+#ifdef ASSERT
+    //  assert(src->klass() != NULL);
+    BLOCK_COMMENT("assert klasses not null");
+    { Label L_a, L_b;
+      __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
+      __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+      __ bind(L_a);
+      __ stop("broken null klass");
+      __ bind(L_b);
+      __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
+      __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
+      BLOCK_COMMENT("assert done");
+    }
+#endif
 
     // Load layout helper
     //
     //  |array_tag|     | header_size | element_type |     |log2_element_size|
     // 32        30    24            16              8     2                 0
     //
-    //   array_tag: typeArray = 0x3, objArray = 0x2
+    //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
     //
 
     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
                     Klass::layout_helper_offset_in_bytes();
 
-    const Register G3_lh = G3;  // layout helper
-
     // Load 32-bits signed value. Use br() instruction with it to check icc.
-    __ delayed()->lduw(G3_klass, lh_offset, G3_lh);
+    __ lduw(G3_src_klass, lh_offset, G5_lh);
+
+    // Handle objArrays completely differently...
+    juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
+    __ set(objArray_lh, O5_temp);
+    __ cmp(G5_lh,       O5_temp);
+    __ br(Assembler::equal, false, Assembler::pt, L_objArray);
+    __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+
+    //  if (src->klass() != dst->klass()) return -1;
+    __ cmp(G3_src_klass, G4_dst_klass);
+    __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
+    __ delayed()->nop();
 
     //  if (!src->is_Array()) return -1;
-    __ cmp(G3_lh, Klass::_lh_neutral_value); // < 0
+    __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
 
-    //  if (src_pos + length > arrayOop(src)->length() ) return ac_failed;
-
-    const Register G4_array_length = G4;  // scratch
-    const Register G5_end_pos      = G5;  // scratch
-
-    __ delayed()->add(length, src_pos, G5_end_pos);  // src_pos + length
-    __ lduw(src, arrayOopDesc::length_offset_in_bytes(), G4_array_length); 
-    __ cmp(G5_end_pos, G4_array_length);
-    __ br(Assembler::greater, false, Assembler::pn, L_failed);
-
-    //  if (dst_pos + length > arrayOop(dst)->length() ) return ac_failed;
-    __ delayed()->add(length, dst_pos, G5_end_pos); // dst_pos + length
-    __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), G4_array_length); 
-    __ cmp(G5_end_pos, G4_array_length);
-    __ br(Assembler::greater, false, Assembler::pn, L_failed);
-
-    // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
-    // Move with sign extension can be used since they are positive.
-    __ delayed()->signx(src_pos, src_pos);
-    __ signx(dst_pos, dst_pos);
-
-    __ srl(G3_lh, Klass::_lh_array_tag_shift, G4);
-    __ cmp(G4, Klass::_lh_array_tag_obj_value);
-    __ br(Assembler::equal, false, Assembler::pn, L_objArray);
+    // At this point, it is known to be a typeArray (array_tag 0x3).
+#ifdef ASSERT
     __ delayed()->nop();
+    { Label L;
+      jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
+      __ set(lh_prim_tag_in_place, O5_temp);
+      __ cmp(G5_lh,                O5_temp);
+      __ br(Assembler::greaterEqual, false, Assembler::pt, L);
+      __ delayed()->nop();
+      __ stop("must be a primitive array");
+      __ bind(L);
+    }
+#else
+    __ delayed();                               // match next insn to prev branch
+#endif
+
+    arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
+                           O5_temp, G4_dst_klass, L_failed);
 
     // typeArrayKlass
     //
@@ -2279,14 +2691,14 @@
     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
     //
 
-    const Register G4_offset = G4;     // array offset
-    const Register G3_elsize = G3_lh;  // log2 element size
-
-    __ srl(G3_lh, Klass::_lh_header_size_shift, G4_offset);
+    const Register G4_offset = G4_dst_klass;    // array offset
+    const Register G3_elsize = G3_src_klass;    // log2 element size
+
+    __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
     __ add(src, G4_offset, src);       // src array offset
     __ add(dst, G4_offset, dst);       // dst array offset
-    __ and3(G3_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
+    __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
 
     // next registers should be set before the jump to corresponding stub
     const Register from     = O0;  // source array address
@@ -2296,11 +2708,13 @@
     // 'from', 'to', 'count' registers should be set in this order
     // since they are the same as 'src', 'src_pos', 'dst'.
 
+    BLOCK_COMMENT("scale indexes to element size");
     __ sll_ptr(src_pos, G3_elsize, src_pos);
     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
     __ add(src, src_pos, from);       // src_addr
     __ add(dst, dst_pos, to);         // dst_addr
 
+    BLOCK_COMMENT("choose copy loop based on element size");
     __ cmp(G3_elsize, 0);
     __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jbyte_arraycopy);
     __ delayed()->signx(length, count); // length
@@ -2313,25 +2727,87 @@
     __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jint_arraycopy);
     __ delayed()->signx(length, count); // length
 #ifdef ASSERT
-    __ cmp(G3_elsize, LogBytesPerLong);
-    __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
-    __ delayed()->nop();
+    { Label L;
+      __ cmp(G3_elsize, LogBytesPerLong);
+      __ br(Assembler::equal, false, Assembler::pt, L);
+      __ delayed()->nop();
+      __ stop("must be long copy, but elsize is wrong");
+      __ bind(L);
+    }
 #endif
     __ br(Assembler::always,false,Assembler::pt,StubRoutines::_jlong_arraycopy);
     __ delayed()->signx(length, count); // length
 
     // objArrayKlass
-  __ bind(L_objArray); 
+  __ BIND(L_objArray); 
+    // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
+
+    Label L_plain_copy, L_checkcast_copy;
+    //  test array classes for subtyping
+    __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
+    __ br(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
+    __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
+
+    // Identically typed arrays can be copied without element-wise checks.
+    arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
+                           O5_temp, G5_lh, L_failed);
+
     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
     __ sll_ptr(src_pos, LogBytesPerOop, src_pos);
     __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
     __ add(src, src_pos, from);       // src_addr
     __ add(dst, dst_pos, to);         // dst_addr
+  __ BIND(L_plain_copy);
     __ br(Assembler::always, false, Assembler::pt,StubRoutines::_oop_arraycopy);
     __ delayed()->signx(length, count); // length
 
-  __ bind(L_failed);
+  __ BIND(L_checkcast_copy);
+    // live at this point:  G3_src_klass, G4_dst_klass
+    {
+      // Before looking at dst.length, make sure dst is also an objArray.
+      // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
+      __ cmp(G5_lh,                    O5_temp);
+      __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
+
+      // It is safe to examine both src.length and dst.length.
+      __ delayed();                             // match next insn to prev branch
+      arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
+                             O5_temp, G5_lh, L_failed);
+
+      // Marshal the base address arguments now, freeing registers.
+      __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
+      __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
+      __ sll_ptr(src_pos, LogBytesPerOop, src_pos);
+      __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
+      __ add(src, src_pos, from);               // src_addr
+      __ add(dst, dst_pos, to);                 // dst_addr
+      __ signx(length, count);                  // length (reloaded)
+
+      Register sco_temp = O3;                   // this register is free now
+      assert_different_registers(from, to, count, sco_temp,
+                                 G4_dst_klass, G3_src_klass);
+
+      // Generate the type check.
+      int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
+                        Klass::super_check_offset_offset_in_bytes());
+      __ lduw(G4_dst_klass, sco_offset, sco_temp);
+      generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
+                          O5_temp, L_plain_copy);
+
+      // Fetch destination element klass from the objArrayKlass header.
+      int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
+                       objArrayKlass::element_klass_offset_in_bytes());
+
+      // the checkcast_copy loop needs two extra arguments:
+      __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
+      // lduw(O4, sco_offset, O3);              // sco of elem klass
+
+      __ br(Assembler::always, false, Assembler::pt, checkcast_copy_entry);
+      __ delayed()->lduw(O4, sco_offset, O3);
+    }
+
+  __ BIND(L_failed);
     __ retl();
     __ delayed()->sub(G0, 1, O0); // return -1
     return start;
@@ -2368,7 +2844,9 @@
     StubRoutines::_arrayof_jlong_arraycopy    = StubRoutines::_jlong_arraycopy;
     StubRoutines::_arrayof_oop_arraycopy      = StubRoutines::_oop_arraycopy;
 
-    StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
+    StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
+    StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
+    StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
   }
 
   void generate_initial() {
@@ -2482,6 +2960,20 @@
 }; // end class declaration
 
 
+address StubGenerator::disjoint_byte_copy_entry  = NULL;
+address StubGenerator::disjoint_short_copy_entry = NULL;
+address StubGenerator::disjoint_int_copy_entry   = NULL;
+address StubGenerator::disjoint_long_copy_entry  = NULL;
+address StubGenerator::disjoint_oop_copy_entry   = NULL;
+
+address StubGenerator::byte_copy_entry  = NULL;
+address StubGenerator::short_copy_entry = NULL;
+address StubGenerator::int_copy_entry   = NULL;
+address StubGenerator::long_copy_entry  = NULL;
+address StubGenerator::oop_copy_entry   = NULL;
+
+address StubGenerator::checkcast_copy_entry = NULL;
+
 void StubGenerator_generate(CodeBuffer* code, bool all) {
   StubGenerator g(code, all);
 }
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)os_linux.cpp	1.256 07/05/05 17:04:37 JVM"
+#pragma ident "@(#)os_linux.cpp	1.257 07/05/17 15:48:43 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -4003,13 +4003,6 @@
   return true;
 }
 
-#ifdef JVMPI_SUPPORT
-bool os::thread_is_running(JavaThread* tp) {
-  Unimplemented();
-  return false;
-}
-#endif // JVMPI_SUPPORT
-
 // System loadavg support.  Returns -1 if load average cannot be obtained.
 // Linux doesn't yet have a (official) notion of processor sets,
 // so just return the system wide load average.
--- a/hotspot/src/os/win32/vm/os_win32.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os/win32/vm/os_win32.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)os_win32.cpp	1.525 07/05/05 17:04:44 JVM"
+#pragma ident "@(#)os_win32.cpp	1.526 07/05/17 15:48:49 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -3205,33 +3205,6 @@
   return -1;
 }
 
-#ifdef JVMPI_SUPPORT
-bool os::thread_is_running(JavaThread* tp) {
-#ifdef _WIN64
-  assert(0, "Fix thread_is_running");
-  return  false;
-#else
-  // this code is a copy from classic VM -> hpi::sysThreadIsRunning
-  uintptr_t sum = 0;
-  uintptr_t *p;
-  CONTEXT context;
-  
-  context.ContextFlags = CONTEXT_FULL;
-  GetThreadContext(tp->osthread()->thread_handle(), &context);
-  p = (uintptr_t*)&context.SegGs;
-  while (p <= (uintptr_t*)&context.SegSs) {
-    sum += *p;
-    p++;
-  }
-  if (sum == tp->last_sum()) {
-    return false;
-  }
-  tp->set_last_sum(sum);
-  return true;
-#endif
-}
-#endif // JVMPI_SUPPORT
-
 
 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
 bool os::dont_yield() {
--- a/hotspot/src/os_cpu/linux_amd64/vm/atomic_linux_amd64.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/linux_amd64/vm/atomic_linux_amd64.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)atomic_linux_amd64.inline.hpp	1.9 07/05/05 17:04:47 JVM"
+#pragma ident "@(#)atomic_linux_amd64.inline.hpp	1.10 07/05/17 15:48:52 JVM"
 #endif
 /*
  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -70,7 +70,7 @@
 
 inline void Atomic::inc(volatile jint* dest) {
   bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "incl (%0)"
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "addl $1,(%0)"
 			:
 			: "r" (dest), "r" (mp)
 			: "cc", "memory");
@@ -78,7 +78,7 @@
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "incq (%0)"
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
 			:
 			: "r" (dest), "r" (mp)
 			: "cc", "memory");
@@ -90,7 +90,7 @@
 
 inline void Atomic::dec(volatile jint* dest) {
   bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "decl (%0)"
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "subl $1,(%0)"
 			:
 			: "r" (dest), "r" (mp)
 			: "cc", "memory");
@@ -98,7 +98,7 @@
 
 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
   bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "decq (%0)"
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
 			:
 			: "r" (dest), "r" (mp)
 			: "cc", "memory");
--- a/hotspot/src/os_cpu/linux_i486/vm/atomic_linux_i486.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/linux_i486/vm/atomic_linux_i486.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)atomic_linux_i486.inline.hpp	1.29 07/05/05 17:04:48 JVM"
+#pragma ident "@(#)atomic_linux_i486.inline.hpp	1.30 07/05/17 15:48:55 JVM"
 #endif
 /*
  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -66,7 +66,7 @@
 
 inline void Atomic::inc    (volatile jint*     dest) {
   int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "incl (%0)" :
+  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
                     : "r" (dest), "r" (mp) : "cc", "memory");
 }
 
@@ -81,7 +81,7 @@
 
 inline void Atomic::dec    (volatile jint*     dest) {
   int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "decl (%0)" :
+  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
                     : "r" (dest), "r" (mp) : "cc", "memory");
 }
 
--- a/hotspot/src/os_cpu/linux_i486/vm/copy_linux_i486.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/linux_i486/vm/copy_linux_i486.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)copy_linux_i486.inline.hpp	1.11 07/05/05 17:04:49 JVM"
+#pragma ident "@(#)copy_linux_i486.inline.hpp	1.12 07/05/17 15:48:57 JVM"
 #endif
 /*
  * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
@@ -41,7 +41,7 @@
                    "2:      movl    (%4),%3       ;"
                    "        movl    %7,(%5,%4,1)  ;"
                    "        addl    $4,%0         ;"
-                   "        decl    %2            ;"
+                   "        subl    $1,%2          ;"
                    "        jnz     2b            ;"
                    "        jmp     7f            ;"
                    "3:      rep;    smovl         ;"
@@ -54,7 +54,7 @@
                    "5:      movl    (%4),%3       ;"
                    "        movl    %7,(%5,%4,1)  ;"
                    "        subl    $4,%0         ;"
-                   "        decl    %2            ;"
+                   "        subl    $1,%2          ;"
                    "        jnz     5b            ;"
                    "        jmp     7f            ;"
                    "6:      std                   ;"
@@ -77,7 +77,7 @@
                    "1:      movl    (%4),%3     ;"
                    "        movl    %7,(%5,%4,1);"
                    "        addl    $4,%0       ;"
-                   "        decl    %2          ;"
+                   "        subl    $1,%2        ;"
                    "        jnz     1b          ;"
                    "        jmp     3f          ;"
                    "2:      rep;    smovl       ;"
@@ -106,7 +106,7 @@
                    "2:      movl    (%4),%3       ;"
                    "        movl    %7,(%5,%4,1)  ;"
                    "        addl    $4,%0         ;"
-                   "        decl    %2            ;"
+                   "        subl    $1,%2          ;"
                    "        jnz     2b            ;"
                    "        jmp     7f            ;"
                    "3:      rep;    smovl         ;"
@@ -119,7 +119,7 @@
                    "5:      movl    (%4),%3       ;"
                    "        movl    %7,(%5,%4,1)  ;"
                    "        subl    $4,%0         ;"
-                   "        decl    %2            ;"
+                   "        subl    $1,%2          ;"
                    "        jnz     5b            ;"
                    "        jmp     7f            ;"
                    "6:      std                   ;"
@@ -140,7 +140,7 @@
                    "1:      movl    (%4),%3     ;"
                    "        movl    %7,(%5,%4,1);"
                    "        addl    $4,%0       ;"
-                   "        decl    %2          ;"
+                   "        subl    $1,%2        ;"
                    "        jnz     1b          ;"
                    "        jmp     3f          ;"
                    "2:      rep;    smovl       ;"
@@ -177,7 +177,7 @@
                    "3:      movl    (%4),%%edx     ;"
                    "        movl    %%edx,(%5,%4,1);"
                    "        addl    $4,%0          ;"
-                   "        decl    %2             ;"
+                   "        subl    $1,%2           ;"
                    "        jnz     3b             ;"
                    "        addl    %4,%1          ;"
                    "        jmp     5f             ;"
@@ -188,8 +188,8 @@
                    "6:      xorl    %7,%3          ;"
                    "7:      movb    (%4,%7,1),%%dl ;"
                    "        movb    %%dl,(%5,%7,1) ;"
-                   "        incl    %3             ;"
-                   "        decl    %2             ;"
+                   "        addl    $1,%3          ;"
+                   "        subl    $1,%2           ;"
                    "        jnz     7b             ;"
                    "        jmp     13f            ;"
                    "8:      std                    ;"
@@ -200,7 +200,7 @@
                    "        jmp     11f            ;"
                    "9:      xchgl   %3,%2          ;"
                    "        movl    %6,%0          ;"
-                   "        incl    %2             ;"
+                   "        addl    $1,%2          ;"
                    "        leal    -1(%7,%5),%1   ;"
                    "        andl    $3,%2          ;"
                    "        jz      10f            ;"
--- a/hotspot/src/os_cpu/linux_i486/vm/linux_i486.s	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/linux_i486/vm/linux_i486.s	Fri May 25 00:49:14 2007 +0000
@@ -21,7 +21,7 @@
 # have any questions.
 # 
 
-.ident	"@(#)linux_i486.s	1.9 07/05/05 17:04:48 JVM"
+.ident	"@(#)linux_i486.s	1.10 07/05/17 15:48:59 JVM"
 	
         # NOTE WELL!  The _Copy functions are called directly
 	# from server-compiler-generated code via CallLeafNoFP,
@@ -100,8 +100,8 @@
         subl     %esi,%edi
 0:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
-        incl     %esi
-        decl     %ecx
+        addl     $1,%esi
+        subl     $1,%ecx
         jnz      0b
         addl     %esi,%edi
 1:      movl     %eax,%ecx            # byte count less prefix
@@ -118,7 +118,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx            # byte count less prefix
@@ -128,8 +128,8 @@
 5:      xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
-        incl     %eax
-        decl     %ecx
+        addl     $1,%eax
+        subl     $1,%ecx
         jnz      6b
 7:      popl     %edi
         popl     %esi
@@ -153,7 +153,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -167,8 +167,8 @@
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
-	decl     %esi
-        decl     %ecx
+	subl     $1,%esi
+        subl     $1,%ecx
         jnz      6b
 7:      cld
         popl     %edi
@@ -208,7 +208,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -221,8 +221,8 @@
 5:      xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
-        incl     %eax
-        decl     %ecx
+        addl     $1,%eax
+        subl     $1,%ecx
         jnz      6b
 7:      popl     %edi
         popl     %esi
@@ -247,7 +247,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx
@@ -257,8 +257,8 @@
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
-	decl     %esi
-        decl     %ecx
+	subl     $1,%esi
+        subl     $1,%ecx
         jnz      6b
 7:      cld
         popl     %edi
@@ -292,7 +292,7 @@
         movw     %dx,(%edi)
         addl     %eax,%esi            # %eax == 2
         addl     %eax,%edi
-        decl     %ecx
+        subl     $1,%ecx
 1:      movl     %ecx,%eax            # word count less prefix
         sarl     %ecx                 # dword count
         jz       4f                   # no dwords to move
@@ -307,7 +307,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      andl     $1,%eax              # suffix count
@@ -334,7 +334,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -383,7 +383,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      andl     $1,%eax              # suffix count
@@ -409,7 +409,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -459,7 +459,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         popl     %edi
         popl     %esi
@@ -474,7 +474,7 @@
 2:      movl     (%eax),%edx
         movl     %edx,(%edi,%eax,1)
         subl     $4,%eax
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         cld
         popl     %edi
@@ -519,14 +519,14 @@
 1:      fildll   (%eax)
         fistpll  (%edx,%eax,1)
         addl     $8,%eax
-2:      decl     %ecx
+2:      subl     $1,%ecx
         jge      1b
         ret
         .p2align 4,,15
 3:      fildll   (%eax,%ecx,8)
         fistpll  (%edx,%ecx,8)
 cla_CopyLeft:
-        decl     %ecx
+        subl     $1,%ecx
         jge      3b
         ret
 
@@ -557,12 +557,12 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      5f 
 3:      smovl # align to 8 bytes, we know we are 4 byte aligned to start
-        decl     %ecx
+        subl     $1,%ecx
 4:      .p2align 4,,15
         movq     0(%esi),%mm0
         addl     $64,%edi
@@ -610,7 +610,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
--- a/hotspot/src/os_cpu/solaris_amd64/vm/os_solaris_amd64.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/solaris_amd64/vm/os_solaris_amd64.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)os_solaris_amd64.cpp	1.16 07/05/05 17:04:50 JVM"
+#pragma ident "@(#)os_solaris_amd64.cpp	1.17 07/05/17 15:49:01 JVM"
 #endif
 /*
  * Copyright 2004-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -580,29 +580,3 @@
 void os::Solaris::init_thread_fpu_state(void) {
   // Nothing to do 
 }
-
-#ifdef JVMPI_SUPPORT
-// JVMPI code
-bool os::thread_is_running(JavaThread* tp) {
-  int         flag;
-  lwpid_t     lwpid;
-  gregset_t   reg;
-  lwpstatus_t lwpstatus;
-  int         res;
-  thread_t    tid = tp->osthread()->thread_id();
-  res = threadgetstate(tid, &flag, &lwpid, NULL, reg, &lwpstatus);
-  assert(res == 0, "threadgetstate() failure");
-  if(res != 0) return false; // Safe return value
-
-  uintptr_t sum = 0;
-  // given TRS_NONVOLATILE doesn't trust any other registers, just use these
-  sum += reg[REG_RSP];  sum += reg[REG_RIP];  sum += reg[REG_RBP];
-
-  if (tp->last_sum() == sum) {
-    return false;
-  } else {
-    tp->set_last_sum(sum);
-    return true;
-  }
-}
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/os_cpu/solaris_i486/vm/os_solaris_i486.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/solaris_i486/vm/os_solaris_i486.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)os_solaris_i486.cpp	1.117 07/05/05 17:04:51 JVM"
+#pragma ident "@(#)os_solaris_i486.cpp	1.118 07/05/17 15:49:03 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -716,32 +716,6 @@
   fixcw();
 }
 
-#ifdef JVMPI_SUPPORT
-// JVMPI code
-bool os::thread_is_running(JavaThread* tp) {
-  int         flag;
-  lwpid_t     lwpid;
-  gregset_t   reg;
-  lwpstatus_t lwpstatus;
-  int         res;
-  thread_t    tid = tp->osthread()->thread_id();
-  res = threadgetstate(tid, &flag, &lwpid, NULL, reg, &lwpstatus);
-  assert(res == 0, "threadgetstate() failure");
-  if(res != 0) return false; // Safe return value
-
-  uintptr_t sum = 0;
-  // give TRS_NONVOLATILE doesn't trust any other registers, just use these
-  sum += reg[UESP];  sum += reg[EIP];  sum += reg[EBP];
-
-  if (tp->last_sum() == sum) {
-    return false;
-  } else {
-    tp->set_last_sum(sum);
-    return true;
-  }
-}
-#endif // JVMPI_SUPPORT
-
 // These routines are the initial value of atomic_xchg_entry(),
 // atomic_cmpxchg_entry(), atomic_inc_entry() and fence_entry()
 // until initialization is complete.
--- a/hotspot/src/os_cpu/solaris_i486/vm/solaris_i486.s	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/solaris_i486/vm/solaris_i486.s	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-.ident	"@(#)solaris_i486.s	1.13 07/05/05 17:04:52 JVM"
+.ident	"@(#)solaris_i486.s	1.14 07/05/17 15:49:05 JVM"
 //
 // Copyright 2004-2007 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -133,8 +133,8 @@
         subl     %esi,%edi
 0:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
-        incl     %esi
-        decl     %ecx
+        addl     $1,%esi
+        subl     $1,%ecx
         jnz      0b
         addl     %esi,%edi
 1:      movl     %eax,%ecx            / byte count less prefix
@@ -151,7 +151,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx            / byte count less prefix
@@ -161,8 +161,8 @@
 5:      xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
-        incl     %eax
-        decl     %ecx
+        addl     $1,%eax
+        subl     $1,%ecx
         jnz      6b
 7:      popl     %edi
         popl     %esi
@@ -186,7 +186,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -200,8 +200,8 @@
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
-	decl     %esi
-        decl     %ecx
+	subl     $1,%esi
+        subl     $1,%ecx
         jnz      6b
 7:      cld
         popl     %edi
@@ -240,7 +240,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -253,8 +253,8 @@
 5:      xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
-        incl     %eax
-        decl     %ecx
+        addl     $1,%eax
+        subl     $1,%ecx
         jnz      6b
 7:      popl     %edi
         popl     %esi
@@ -279,7 +279,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx
@@ -289,8 +289,8 @@
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
-	decl     %esi
-        decl     %ecx
+	subl     $1,%esi
+        subl     $1,%ecx
         jnz      6b
 7:      cld
         popl     %edi
@@ -323,7 +323,7 @@
         movw     %dx,(%edi)
         addl     %eax,%esi            / %eax == 2
         addl     %eax,%edi
-        decl     %ecx
+        subl     $1,%ecx
 1:      movl     %ecx,%eax            / word count less prefix
         sarl     %ecx                 / dword count
         jz       4f                   / no dwords to move
@@ -338,7 +338,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      andl     $1,%eax              / suffix count
@@ -365,7 +365,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -413,7 +413,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         addl     %esi,%edi
 4:      andl     $1,%eax              / suffix count
@@ -439,7 +439,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
@@ -487,7 +487,7 @@
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      3b
         popl     %edi
         popl     %esi
@@ -502,7 +502,7 @@
 2:      movl     (%eax),%edx
         movl     %edx,(%edi,%eax,1)
         subl     $4,%eax
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         cld
         popl     %edi
@@ -546,14 +546,14 @@
 1:      fildll   (%eax)
         fistpll  (%edx,%eax,1)
         addl     $8,%eax
-2:      decl     %ecx
+2:      subl     $1,%ecx
         jge      1b
         ret
         .align   16
 3:      fildll   (%eax,%ecx,8)
         fistpll  (%edx,%ecx,8)
 cla_CopyLeft:
-        decl     %ecx
+        subl     $1,%ecx
         jge      3b
         ret
 
@@ -583,12 +583,12 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      5f 
 3:      smovl / align to 8 bytes, we know we are 4 byte aligned to start
-        decl     %ecx
+        subl     $1,%ecx
 4:      .align   16
         movq     0(%esi),%mm0
         addl     $64,%edi
@@ -636,7 +636,7 @@
 2:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         subl     $4,%esi
-        decl     %ecx
+        subl     $1,%ecx
         jnz      2b
         addl     %esi,%edi
         jmp      4f
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)os_solaris_sparc.cpp	1.118 07/05/05 17:04:53 JVM"
+#pragma ident "@(#)os_solaris_sparc.cpp	1.119 07/05/17 15:49:08 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -609,42 +609,6 @@
     // Nothing needed on Sparc.
 }
 
-#ifdef JVMPI_SUPPORT
-// JVMPI code
-bool os::thread_is_running(JavaThread* tp) {
-  int         flag;
-  lwpid_t     lwpid;
-  gregset_t   reg;
-  lwpstatus_t lwpstatus;
-  int         res;
-  thread_t    tid = tp->osthread()->thread_id();
-  res = threadgetstate(tid, &flag, &lwpid, NULL, reg, &lwpstatus);
-  assert(res == 0, "threadgetstate() failure");
-  if(res != 0) return false; // Safe return value
-
-  uintptr_t sum = 0;
-  sum += reg[R_SP];  sum += reg[R_PC];
-
-  sum += reg[R_G1];  sum += reg[R_G2];  sum += reg[R_G3];  sum += reg[R_G4];
-    
-  sum += reg[R_O0];  sum += reg[R_O1];  sum += reg[R_O2];  sum += reg[R_O3];
-  sum += reg[R_O4];  sum += reg[R_O5];
-
-  sum += reg[R_I0];  sum += reg[R_I1];  sum += reg[R_I2];  sum += reg[R_I3];
-  sum += reg[R_I4];  sum += reg[R_I5];  sum += reg[R_I6];  sum += reg[R_I7];
-
-  sum += reg[R_L0];  sum += reg[R_L1];  sum += reg[R_L2];  sum += reg[R_L3];
-  sum += reg[R_L4];  sum += reg[R_L5];  sum += reg[R_L6];  sum += reg[R_L7];
-
-  if (tp->last_sum() == sum) {
-    return false;
-  } else {
-    tp->set_last_sum(sum);
-    return true;
-  }
-}
-#endif // JVMPI_SUPPORT
-
 #if !defined(COMPILER2) && !defined(_LP64)
 
 // These routines are the initial value of atomic_xchg_entry(),
--- a/hotspot/src/os_cpu/win32_i486/vm/atomic_win32_i486.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/win32_i486/vm/atomic_win32_i486.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)atomic_win32_i486.inline.hpp	1.19 07/05/05 17:04:56 JVM"
+#pragma ident "@(#)atomic_win32_i486.inline.hpp	1.20 07/05/17 15:49:10 JVM"
 #endif
 /*
  * Copyright 1999-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -90,7 +90,7 @@
   __asm {
     mov edx, dest;
     LOCK_IF_MP(mp)
-    inc dword ptr [edx];
+    add dword ptr [edx], 1;
   }
 }
 
@@ -109,7 +109,7 @@
   __asm {
     mov edx, dest;
     LOCK_IF_MP(mp)
-    dec dword ptr [edx];
+    sub dword ptr [edx], 1;
   }
 }
 
--- a/hotspot/src/os_cpu/win32_i486/vm/copy_win32_i486.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/os_cpu/win32_i486/vm/copy_win32_i486.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)copy_win32_i486.inline.hpp	1.10 07/05/05 17:04:56 JVM"
+#pragma ident "@(#)copy_win32_i486.inline.hpp	1.11 07/05/17 15:49:12 JVM"
 #endif
 /*
  * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
@@ -92,14 +92,14 @@
     add    eax, 8;
     add    edx, 8;
   uptest:
-    dec    ecx;
+    sub    ecx, 1;
     jge    up;
     jmp    done;
   down:
     fild   qword ptr [eax][ecx*8];
     fistp  qword ptr [edx][ecx*8];
   downtest:
-    dec    ecx;
+    sub    ecx, 1;
     jge    down;
   done:;
   }
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)formssel.cpp	1.181 07/05/05 17:05:02 JVM"
+#pragma ident "@(#)formssel.cpp	1.182 07/05/17 15:49:16 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -269,6 +269,13 @@
   return _matrule->is_ideal_if();
 }
 
+// Return 'true' if this instruction matches an ideal 'FastLock' node
+bool InstructForm::is_ideal_fastlock() const {
+  if( _matrule == NULL ) return false;
+
+  return _matrule->is_ideal_fastlock();
+}
+
 // Return 'true' if this instruction matches an ideal 'MemBarXXX' node
 bool InstructForm::is_ideal_membar() const {
   if( _matrule == NULL ) return false;
@@ -1030,6 +1037,9 @@
   else if (is_ideal_if()) {
     return "MachIfNode";
   }
+  else if (is_ideal_fastlock()) {
+    return "MachFastLockNode";
+  }
   else if (is_ideal_nop()) {
     return "MachNopNode";
   }
@@ -3801,6 +3811,13 @@
     !strcmp(_opType,"CountedLoopEnd");
 }
 
+bool MatchRule::is_ideal_fastlock() const {
+  if ( _opType && (strcmp(_opType,"Set") == 0) && _rChild ) {
+    return (strcmp(_rChild->_opType,"FastLock") == 0);
+  }
+  return false;
+}
+
 bool MatchRule::is_ideal_membar() const {
   if( !_opType ) return false;
   return 
--- a/hotspot/src/share/vm/adlc/formssel.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/adlc/formssel.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)formssel.hpp	1.75 07/05/05 17:05:02 JVM"
+#pragma ident "@(#)formssel.hpp	1.76 07/05/17 15:49:19 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -143,6 +143,7 @@
   virtual bool        is_ideal_unlock() const;  // node matches ideal 'Unlock'
   virtual bool        is_ideal_call_leaf() const; // node matches ideal 'CallLeaf'
   virtual bool        is_ideal_if()   const;    // node matches ideal 'If'
+  virtual bool        is_ideal_fastlock() const; // node matches 'FastLock'
   virtual bool        is_ideal_membar() const;  // node matches ideal 'MemBarXXX'
   virtual bool        is_ideal_loadPC() const;  // node matches ideal 'LoadPC'
   virtual bool        is_ideal_box() const;     // node matches ideal 'Box'
@@ -981,6 +982,7 @@
   bool       is_ideal_unlock() const;
   bool       is_ideal_call_leaf() const;
   bool       is_ideal_if()   const;    // node matches ideal 'If'
+  bool       is_ideal_fastlock() const; // node matches ideal 'FastLock'
   bool       is_ideal_jump()   const;  // node matches ideal 'Jump'
   bool       is_ideal_membar() const;  // node matches ideal 'MemBarXXX'
   bool       is_ideal_loadPC() const;  // node matches ideal 'LoadPC'
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)output_c.cpp	1.183 07/05/05 17:05:02 JVM"
+#pragma ident "@(#)output_c.cpp	1.184 07/05/17 15:49:23 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1585,6 +1585,10 @@
 	fprintf(fp, "  ((MachIfNode*)n%d)->_fcnt = _fcnt;\n",cnt);
       }
 
+      if( node->is_ideal_fastlock() && new_inst->is_ideal_fastlock() ) {
+	fprintf(fp, "  ((MachFastLockNode*)n%d)->_counters = _counters;\n",cnt);
+      }
+
       const char *resultOper = new_inst->reduce_result();
       fprintf(fp,"  n%d->set_opnd_array(0, state->MachOperGenerator( %s, C ));\n", 
               cnt, machOperEnum(resultOper));
@@ -3618,6 +3622,9 @@
     fprintf(fp_cpp, "%s node->_prob = _leaf->as_If()->_prob;\n", indent);
     fprintf(fp_cpp, "%s node->_fcnt = _leaf->as_If()->_fcnt;\n", indent);
   }
+  if( inst->is_ideal_fastlock() ) {
+    fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
+  }
   
 }
 
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)codeBuffer.hpp	1.62 07/05/05 17:05:04 JVM"
+#pragma ident "@(#)codeBuffer.hpp	1.63 07/05/17 15:49:26 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -489,6 +489,7 @@
   void initialize_oop_recorder(OopRecorder* r);
 
   OopRecorder* oop_recorder() const   { return _oop_recorder; }
+  CodeComments& comments()            { return _comments; }
 
   // Code generation
   void relocate(address at, RelocationHolder const& rspec, int format = 0) {
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_Compilation.cpp	1.158 07/05/05 17:05:06 JVM"
+#pragma ident "@(#)c1_Compilation.cpp	1.159 07/05/17 15:49:28 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -425,11 +425,6 @@
 , _allocator(NULL)
 , _code(Runtime1::get_buffer_blob()->instructions_begin(),
         Runtime1::get_buffer_blob()->instructions_size())
-#ifdef JVMPI_SUPPORT
-, _jvmpi_event_method_entry_enabled   (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY) ||
-                                       jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY2))
-, _jvmpi_event_method_exit_enabled    (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT))
-#endif // JVMPI_SUPPORT
 , _current_instruction(NULL)
 #ifndef PRODUCT
 , _last_instruction_printed(NULL)
@@ -440,11 +435,7 @@
   assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
   _arena = Thread::current()->resource_area();
   _compilation = this;
-#ifdef JVMPI_SUPPORT
-  _needs_debug_information = jvmpi::enabled() || JvmtiExport::can_examine_or_deopt_anywhere() || 
-#else // !JVMPI_SUPPORT
   _needs_debug_information = JvmtiExport::can_examine_or_deopt_anywhere() || 
-#endif // JVMPI_SUPPORT
                                JavaMonitorsInStackTrace || AlwaysEmitDebugInfo || DeoptimizeALot;
   _exception_info_list = new ExceptionInfoList();
   _implicit_exception_table.set_size(0);
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)c1_Compilation.hpp	1.87 07/05/05 17:05:05 JVM"
+#pragma ident "@(#)c1_Compilation.hpp	1.88 07/05/17 15:49:31 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -85,12 +85,6 @@
   CodeOffsets        _offsets;
   CodeBuffer         _code;
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI specifics
-  bool _jvmpi_event_method_entry_enabled;
-  bool _jvmpi_event_method_exit_enabled;
-#endif // JVMPI_SUPPORT
-
   // compilation helpers
   void initialize();
   void build_hir();
@@ -154,17 +148,6 @@
   Dependencies* dependency_recorder() const; // = _env->dependencies()
   ImplicitExceptionTable* implicit_exception_table()     { return &_implicit_exception_table; }
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi flags
-  bool jvmpi_event_method_entry_enabled() const  { return _jvmpi_event_method_entry_enabled;}
-  bool jvmpi_event_method_exit_enabled() const   { return _jvmpi_event_method_exit_enabled;}
-  bool jvmpi_event_method_enabled() const {
-    return
-      jvmpi_event_method_entry_enabled()  ||
-      jvmpi_event_method_exit_enabled();
-  }
-#endif // JVMPI_SUPPORT
-
   Instruction* current_instruction() const       { return _current_instruction; }
   Instruction* set_current_instruction(Instruction* instr) {
     Instruction* previous = _current_instruction;
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_GraphBuilder.cpp	1.254 07/05/05 17:05:08 JVM"
+#pragma ident "@(#)c1_GraphBuilder.cpp	1.255 07/05/17 15:49:34 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1704,6 +1704,9 @@
   Value recv = is_static ? NULL : apop();
   bool is_loaded = target->is_loaded();
   int vtable_index = methodOopDesc::invalid_vtable_index;
+
+#ifdef SPARC
+  // Currently only supported on Sparc.
   // The UseInlineCaches only controls dispatch to invokevirtuals for
   // loaded classes which we weren't able to statically bind.
   if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
@@ -1711,6 +1714,7 @@
     // Find a vtable index if one is available
     vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
   }
+#endif
   
   if (recv != NULL &&
       (code == Bytecodes::_invokespecial ||
@@ -2879,11 +2883,7 @@
   start_block->merge(_initial_state);
 
   BlockBegin* sync_handler = NULL;
-  if (method()->is_synchronized() ||
-#ifdef JVMPI_SUPPORT
-      compilation->jvmpi_event_method_enabled() ||
-#endif // JVMPI_SUPPORT
-      DTraceMethodProbes) {
+  if (method()->is_synchronized() || DTraceMethodProbes) {
     // setup an exception handler to do the unlocking and/or notification
     sync_handler = new BlockBegin(-1);
     sync_handler->set(BlockBegin::exception_entry_flag);
@@ -3004,12 +3004,6 @@
   // Clear out any existing inline bailout condition
   clear_inline_bailout();
 
-#ifdef JVMPI_SUPPORT
-  if (compilation()->jvmpi_event_method_enabled()) {
-    // do not inline at all
-    INLINE_BAILOUT("jvmpi event method enabled")
-  } else
-#endif // JVMPI_SUPPORT
   if (callee->should_exclude()) {
     // callee is excluded
     INLINE_BAILOUT("excluded by CompilerOracle")
@@ -3088,68 +3082,47 @@
       preserves_state = true;
       break;
 
-    // %%% the following xxx_obj32 are temporary until the 1.4.0 sun.misc.Unsafe goes away
-    case vmIntrinsics::_getObject_obj32 : return append_unsafe_get_obj32(callee, T_OBJECT);  return true;
-    case vmIntrinsics::_getBoolean_obj32: return append_unsafe_get_obj32(callee, T_BOOLEAN); return true;
-    case vmIntrinsics::_getByte_obj32   : return append_unsafe_get_obj32(callee, T_BYTE);    return true;
-    case vmIntrinsics::_getShort_obj32  : return append_unsafe_get_obj32(callee, T_SHORT);   return true;
-    case vmIntrinsics::_getChar_obj32   : return append_unsafe_get_obj32(callee, T_CHAR);    return true;
-    case vmIntrinsics::_getInt_obj32    : return append_unsafe_get_obj32(callee, T_INT);     return true;
-    case vmIntrinsics::_getLong_obj32   : return append_unsafe_get_obj32(callee, T_LONG);    return true;
-    case vmIntrinsics::_getFloat_obj32  : return append_unsafe_get_obj32(callee, T_FLOAT);   return true;
-    case vmIntrinsics::_getDouble_obj32 : return append_unsafe_get_obj32(callee, T_DOUBLE);  return true;
-
-    case vmIntrinsics::_putObject_obj32 : return append_unsafe_put_obj32(callee, T_OBJECT);  return true;
-    case vmIntrinsics::_putBoolean_obj32: return append_unsafe_put_obj32(callee, T_BOOLEAN); return true;
-    case vmIntrinsics::_putByte_obj32   : return append_unsafe_put_obj32(callee, T_BYTE);    return true;
-    case vmIntrinsics::_putShort_obj32  : return append_unsafe_put_obj32(callee, T_SHORT);   return true;
-    case vmIntrinsics::_putChar_obj32   : return append_unsafe_put_obj32(callee, T_CHAR);    return true;
-    case vmIntrinsics::_putInt_obj32    : return append_unsafe_put_obj32(callee, T_INT);     return true;
-    case vmIntrinsics::_putLong_obj32   : return append_unsafe_put_obj32(callee, T_LONG);    return true;
-    case vmIntrinsics::_putFloat_obj32  : return append_unsafe_put_obj32(callee, T_FLOAT);   return true;
-    case vmIntrinsics::_putDouble_obj32 : return append_unsafe_put_obj32(callee, T_DOUBLE);  return true;
-
     // Use special nodes for Unsafe instructions so we can more easily
     // perform an address-mode optimization on the raw variants
-    case vmIntrinsics::_getObject_obj : return append_unsafe_get_obj(callee, T_OBJECT,  false);
-    case vmIntrinsics::_getBoolean_obj: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
-    case vmIntrinsics::_getByte_obj   : return append_unsafe_get_obj(callee, T_BYTE,    false);
-    case vmIntrinsics::_getShort_obj  : return append_unsafe_get_obj(callee, T_SHORT,   false);
-    case vmIntrinsics::_getChar_obj   : return append_unsafe_get_obj(callee, T_CHAR,    false);
-    case vmIntrinsics::_getInt_obj    : return append_unsafe_get_obj(callee, T_INT,     false);
-    case vmIntrinsics::_getLong_obj   : return append_unsafe_get_obj(callee, T_LONG,    false);
-    case vmIntrinsics::_getFloat_obj  : return append_unsafe_get_obj(callee, T_FLOAT,   false);
-    case vmIntrinsics::_getDouble_obj : return append_unsafe_get_obj(callee, T_DOUBLE,  false);
-
-    case vmIntrinsics::_putObject_obj : return append_unsafe_put_obj(callee, T_OBJECT,  false);
-    case vmIntrinsics::_putBoolean_obj: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
-    case vmIntrinsics::_putByte_obj   : return append_unsafe_put_obj(callee, T_BYTE,    false);
-    case vmIntrinsics::_putShort_obj  : return append_unsafe_put_obj(callee, T_SHORT,   false);
-    case vmIntrinsics::_putChar_obj   : return append_unsafe_put_obj(callee, T_CHAR,    false);
-    case vmIntrinsics::_putInt_obj    : return append_unsafe_put_obj(callee, T_INT,     false);
-    case vmIntrinsics::_putLong_obj   : return append_unsafe_put_obj(callee, T_LONG,    false);
-    case vmIntrinsics::_putFloat_obj  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
-    case vmIntrinsics::_putDouble_obj : return append_unsafe_put_obj(callee, T_DOUBLE,  false); 
-
-    case vmIntrinsics::_getObjectVolatile_obj : return append_unsafe_get_obj(callee, T_OBJECT,  true); 
-    case vmIntrinsics::_getBooleanVolatile_obj: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
-    case vmIntrinsics::_getByteVolatile_obj   : return append_unsafe_get_obj(callee, T_BYTE,    true);
-    case vmIntrinsics::_getShortVolatile_obj  : return append_unsafe_get_obj(callee, T_SHORT,   true);
-    case vmIntrinsics::_getCharVolatile_obj   : return append_unsafe_get_obj(callee, T_CHAR,    true);
-    case vmIntrinsics::_getIntVolatile_obj    : return append_unsafe_get_obj(callee, T_INT,     true);
-    case vmIntrinsics::_getLongVolatile_obj   : return append_unsafe_get_obj(callee, T_LONG,    true);
-    case vmIntrinsics::_getFloatVolatile_obj  : return append_unsafe_get_obj(callee, T_FLOAT,   true);
-    case vmIntrinsics::_getDoubleVolatile_obj : return append_unsafe_get_obj(callee, T_DOUBLE,  true);
-
-    case vmIntrinsics::_putObjectVolatile_obj : return append_unsafe_put_obj(callee, T_OBJECT,  true);
-    case vmIntrinsics::_putBooleanVolatile_obj: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
-    case vmIntrinsics::_putByteVolatile_obj   : return append_unsafe_put_obj(callee, T_BYTE,    true);
-    case vmIntrinsics::_putShortVolatile_obj  : return append_unsafe_put_obj(callee, T_SHORT,   true);
-    case vmIntrinsics::_putCharVolatile_obj   : return append_unsafe_put_obj(callee, T_CHAR,    true);
-    case vmIntrinsics::_putIntVolatile_obj    : return append_unsafe_put_obj(callee, T_INT,     true);
-    case vmIntrinsics::_putLongVolatile_obj   : return append_unsafe_put_obj(callee, T_LONG,    true);
-    case vmIntrinsics::_putFloatVolatile_obj  : return append_unsafe_put_obj(callee, T_FLOAT,   true);
-    case vmIntrinsics::_putDoubleVolatile_obj : return append_unsafe_put_obj(callee, T_DOUBLE,  true);
+    case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT,  false);
+    case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
+    case vmIntrinsics::_getByte   : return append_unsafe_get_obj(callee, T_BYTE,    false);
+    case vmIntrinsics::_getShort  : return append_unsafe_get_obj(callee, T_SHORT,   false);
+    case vmIntrinsics::_getChar   : return append_unsafe_get_obj(callee, T_CHAR,    false);
+    case vmIntrinsics::_getInt    : return append_unsafe_get_obj(callee, T_INT,     false);
+    case vmIntrinsics::_getLong   : return append_unsafe_get_obj(callee, T_LONG,    false);
+    case vmIntrinsics::_getFloat  : return append_unsafe_get_obj(callee, T_FLOAT,   false);
+    case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE,  false);
+
+    case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT,  false);
+    case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
+    case vmIntrinsics::_putByte   : return append_unsafe_put_obj(callee, T_BYTE,    false);
+    case vmIntrinsics::_putShort  : return append_unsafe_put_obj(callee, T_SHORT,   false);
+    case vmIntrinsics::_putChar   : return append_unsafe_put_obj(callee, T_CHAR,    false);
+    case vmIntrinsics::_putInt    : return append_unsafe_put_obj(callee, T_INT,     false);
+    case vmIntrinsics::_putLong   : return append_unsafe_put_obj(callee, T_LONG,    false);
+    case vmIntrinsics::_putFloat  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
+    case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false); 
+
+    case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true); 
+    case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
+    case vmIntrinsics::_getByteVolatile   : return append_unsafe_get_obj(callee, T_BYTE,    true);
+    case vmIntrinsics::_getShortVolatile  : return append_unsafe_get_obj(callee, T_SHORT,   true);
+    case vmIntrinsics::_getCharVolatile   : return append_unsafe_get_obj(callee, T_CHAR,    true);
+    case vmIntrinsics::_getIntVolatile    : return append_unsafe_get_obj(callee, T_INT,     true);
+    case vmIntrinsics::_getLongVolatile   : return append_unsafe_get_obj(callee, T_LONG,    true);
+    case vmIntrinsics::_getFloatVolatile  : return append_unsafe_get_obj(callee, T_FLOAT,   true);
+    case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE,  true);
+
+    case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT,  true);
+    case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
+    case vmIntrinsics::_putByteVolatile   : return append_unsafe_put_obj(callee, T_BYTE,    true);
+    case vmIntrinsics::_putShortVolatile  : return append_unsafe_put_obj(callee, T_SHORT,   true);
+    case vmIntrinsics::_putCharVolatile   : return append_unsafe_put_obj(callee, T_CHAR,    true);
+    case vmIntrinsics::_putIntVolatile    : return append_unsafe_put_obj(callee, T_INT,     true);
+    case vmIntrinsics::_putLongVolatile   : return append_unsafe_put_obj(callee, T_LONG,    true);
+    case vmIntrinsics::_putFloatVolatile  : return append_unsafe_put_obj(callee, T_FLOAT,   true);
+    case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE,  true);
 
     case vmIntrinsics::_getByte_raw   : return append_unsafe_get_raw(callee, T_BYTE);
     case vmIntrinsics::_getShort_raw  : return append_unsafe_get_raw(callee, T_SHORT);
@@ -3176,15 +3149,15 @@
       if (!InlineNIOCheckIndex) return false;
       preserves_state = true;
       break;
-    case vmIntrinsics::_putOrderedObject_obj : return append_unsafe_put_obj(callee, T_OBJECT,  true);
-    case vmIntrinsics::_putOrderedInt_obj    : return append_unsafe_put_obj(callee, T_INT,     true);
-    case vmIntrinsics::_putOrderedLong_obj   : return append_unsafe_put_obj(callee, T_LONG,    true);
-
-    case vmIntrinsics::_compareAndSwapLong_obj: 
+    case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT,  true);
+    case vmIntrinsics::_putOrderedInt    : return append_unsafe_put_obj(callee, T_INT,     true);
+    case vmIntrinsics::_putOrderedLong   : return append_unsafe_put_obj(callee, T_LONG,    true);
+
+    case vmIntrinsics::_compareAndSwapLong: 
       if (!VM_Version::supports_cx8()) return false;
       // fall through
-    case vmIntrinsics::_compareAndSwapInt_obj: 
-    case vmIntrinsics::_compareAndSwapObject_obj: 
+    case vmIntrinsics::_compareAndSwapInt: 
+    case vmIntrinsics::_compareAndSwapObject: 
       append_unsafe_CAS(callee);
       return true; 
 
@@ -3694,30 +3667,6 @@
 }
 
 
-bool GraphBuilder::append_unsafe_get_obj32(ciMethod* callee, BasicType t) {
-  if (InlineUnsafeOps) {
-    Values* args = state()->pop_arguments(callee->arg_size());
-    null_check(args->at(0));
-    Instruction* op = append(new UnsafeGetObject(t, args->at(1), args->at(2), false));
-    push(op->type(), op);
-    compilation()->set_has_unsafe_access(true);
-  }
-  return InlineUnsafeOps;
-}
-
-
-bool GraphBuilder::append_unsafe_put_obj32(ciMethod* callee, BasicType t) {
-  if (InlineUnsafeOps) {
-    Values* args = state()->pop_arguments(callee->arg_size());
-    null_check(args->at(0));
-    Instruction* op = append(new UnsafePutObject(t, args->at(1), args->at(2), args->at(3), false));
-    compilation()->set_has_unsafe_access(true);
-    kill_all();
-  }
-  return InlineUnsafeOps;
-}
-
-
 bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
   if (InlineUnsafeOps) {
     Values* args = state()->pop_arguments(callee->arg_size());
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)c1_GraphBuilder.hpp	1.74 07/05/05 17:05:07 JVM"
+#pragma ident "@(#)c1_GraphBuilder.hpp	1.75 07/05/17 15:49:37 JVM"
 #endif
 /*
  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -340,8 +340,6 @@
   void pop_scope();
   void pop_scope_for_jsr();
 
-  bool append_unsafe_get_obj32(ciMethod* callee, BasicType t);
-  bool append_unsafe_put_obj32(ciMethod* callee, BasicType t);
   bool append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile);
   bool append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile);
   bool append_unsafe_get_raw(ciMethod* callee, BasicType t);
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_InstructionPrinter.cpp	1.124 07/05/05 17:05:07 JVM"
+#pragma ident "@(#)c1_InstructionPrinter.cpp	1.125 07/05/17 15:49:39 JVM"
 #endif
 /*
  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -511,115 +511,19 @@
 
 
 void InstructionPrinter::do_Intrinsic(Intrinsic* x) {
-  const char* name = "<unknown intrinsic>";
-  switch (x->id()) {
-    case vmIntrinsics::_Object_init   : name = "RegisterFinalizer";   break;
-    case vmIntrinsics::_getClass      : name = "Class.getClass";      break;
-    case vmIntrinsics::_currentThread : name = "Thread.currentThread";break;
-    case vmIntrinsics::_dlog          : name = "Math.dlog";           break;
-    case vmIntrinsics::_dlog10        : name = "Math.dlog10";         break;
-    case vmIntrinsics::_dabs          : name = "Math.dabs";           break;
-    case vmIntrinsics::_dsin          : name = "Math.dsin";           break;
-    case vmIntrinsics::_dcos          : name = "Math.dcos";           break;
-    case vmIntrinsics::_dsqrt         : name = "Math.dsqrt";          break;
-    case vmIntrinsics::_arraycopy     : name = "System.arraycopy";    break;
-    case vmIntrinsics::_compareTo     : name = "String.compareTo";    break;
-
-    case vmIntrinsics::_currentTimeMillis   : name = "System.currentTimeMillis";   break;
-    case vmIntrinsics::_nanoTime            : name = "System.nanoTime";            break;
-
-    case vmIntrinsics::_intBitsToFloat      : name = "Float.intBitsToFloat";       break;
-    case vmIntrinsics::_floatToRawIntBits   : name = "Float.floatToRawIntBits";    break;
-    case vmIntrinsics::_longBitsToDouble    : name = "Double.longBitsToDouble";    break;
-    case vmIntrinsics::_doubleToRawLongBits : name = "Double.doubleToRawLongBits"; break;
-
-    // %%% the following xxx_obj32 are temporary until the 1.4.0 sun.misc.Unsafe goes away
-    case vmIntrinsics::_getObject_obj32: name = "getObject_obj32"; break;
-    case vmIntrinsics::_getBoolean_obj32:name = "getBoolean_obj32";break;
-    case vmIntrinsics::_getByte_obj32  : name = "getByte_obj32";   break;
-    case vmIntrinsics::_getShort_obj32 : name = "getShort_obj32";  break;
-    case vmIntrinsics::_getChar_obj32  : name = "getChar_obj32";   break;
-    case vmIntrinsics::_getInt_obj32   : name = "getInt_obj32";    break;
-    case vmIntrinsics::_getLong_obj32  : name = "getLong_obj32";   break;
-    case vmIntrinsics::_getFloat_obj32 : name = "getFloat_obj32";  break;
-    case vmIntrinsics::_getDouble_obj32: name = "getDouble_obj32"; break;
-
-    case vmIntrinsics::_putObject_obj32: name = "putObject_obj32"; break;
-    case vmIntrinsics::_putBoolean_obj32:name = "putBoolean_obj32";break;
-    case vmIntrinsics::_putByte_obj32  : name = "putByte_obj32";   break;
-    case vmIntrinsics::_putShort_obj32 : name = "putShort_obj32";  break;
-    case vmIntrinsics::_putChar_obj32  : name = "putChar_obj32";   break;
-    case vmIntrinsics::_putInt_obj32   : name = "putInt_obj32";    break;
-    case vmIntrinsics::_putLong_obj32  : name = "putLong_obj32";   break;
-    case vmIntrinsics::_putFloat_obj32 : name = "putFloat_obj32";  break;
-    case vmIntrinsics::_putDouble_obj32: name = "putDouble_obj32"; break;
-
-    case vmIntrinsics::_getObject_obj : name = "getObject_obj";  break;
-    case vmIntrinsics::_getBoolean_obj: name = "getBoolean_obj"; break;
-    case vmIntrinsics::_getByte_obj   : name = "getByte_obj";    break;
-    case vmIntrinsics::_getShort_obj  : name = "getShort_obj";   break;
-    case vmIntrinsics::_getChar_obj   : name = "getChar_obj";    break;
-    case vmIntrinsics::_getInt_obj    : name = "getInt_obj";     break;
-    case vmIntrinsics::_getLong_obj   : name = "getLong_obj";    break;
-    case vmIntrinsics::_getFloat_obj  : name = "getFloat_obj";   break;
-    case vmIntrinsics::_getDouble_obj : name = "getDouble_obj";  break;
-
-    case vmIntrinsics::_putObject_obj : name = "putObject_obj";  break;
-    case vmIntrinsics::_putBoolean_obj: name = "putBoolean_obj"; break;
-    case vmIntrinsics::_putByte_obj   : name = "putByte_obj";    break;
-    case vmIntrinsics::_putShort_obj  : name = "putShort_obj";   break;
-    case vmIntrinsics::_putChar_obj   : name = "putChar_obj";    break;
-    case vmIntrinsics::_putInt_obj    : name = "putInt_obj";     break;
-    case vmIntrinsics::_putLong_obj   : name = "putLong_obj";    break;
-    case vmIntrinsics::_putFloat_obj  : name = "putFloat_obj";   break;
-    case vmIntrinsics::_putDouble_obj : name = "putDouble_obj";  break;
-
-    case vmIntrinsics::_getObjectVolatile_obj : name = "getObjectVolatile_obj";  break;
-    case vmIntrinsics::_getBooleanVolatile_obj: name = "getBooleanVolatile_obj"; break;
-    case vmIntrinsics::_getByteVolatile_obj   : name = "getByteVolatile_obj";    break;
-    case vmIntrinsics::_getShortVolatile_obj  : name = "getShortVolatile_obj";   break;
-    case vmIntrinsics::_getCharVolatile_obj   : name = "getCharVolatile_obj";    break;
-    case vmIntrinsics::_getIntVolatile_obj    : name = "getIntVolatile_obj";     break;
-    case vmIntrinsics::_getLongVolatile_obj   : name = "getLongVolatile_obj";    break;
-    case vmIntrinsics::_getFloatVolatile_obj  : name = "getFloatVolatile_obj";   break;
-    case vmIntrinsics::_getDoubleVolatile_obj : name = "getDoubleVolatile_obj";  break;
-
-    case vmIntrinsics::_putObjectVolatile_obj : name = "putObjectVolatile_obj";  break;
-    case vmIntrinsics::_putBooleanVolatile_obj: name = "putBooleanVolatile_obj"; break;
-    case vmIntrinsics::_putByteVolatile_obj   : name = "putByteVolatile_obj";    break;
-    case vmIntrinsics::_putShortVolatile_obj  : name = "putShortVolatile_obj";   break;
-    case vmIntrinsics::_putCharVolatile_obj   : name = "putCharVolatile_obj";    break;
-    case vmIntrinsics::_putIntVolatile_obj    : name = "putIntVolatile_obj";     break;
-    case vmIntrinsics::_putLongVolatile_obj   : name = "putLongVolatile_obj";    break;
-    case vmIntrinsics::_putFloatVolatile_obj  : name = "putFloatVolatile_obj";   break;
-    case vmIntrinsics::_putDoubleVolatile_obj : name = "putDoubleVolatile_obj";  break;
-
-    case vmIntrinsics::_getByte_raw   : name = "getByte_raw";    break;
-    case vmIntrinsics::_getShort_raw  : name = "getShort_raw";   break;
-    case vmIntrinsics::_getChar_raw   : name = "getChar_raw";    break;
-    case vmIntrinsics::_getInt_raw    : name = "getInt_raw";     break;
-    case vmIntrinsics::_getLong_raw   : name = "getLong_raw";    break;
-    case vmIntrinsics::_getFloat_raw  : name = "getFloat_raw";   break;
-    case vmIntrinsics::_getDouble_raw : name = "getDouble_raw";  break;
-
-    case vmIntrinsics::_putByte_raw   : name = "putByte_raw";    break;
-    case vmIntrinsics::_putShort_raw  : name = "putShort_raw";   break;
-    case vmIntrinsics::_putChar_raw   : name = "putChar_raw";    break;
-    case vmIntrinsics::_putInt_raw    : name = "putInt_raw";     break;
-    case vmIntrinsics::_putLong_raw   : name = "putLong_raw";    break;
-    case vmIntrinsics::_putFloat_raw  : name = "putFloat_raw";   break;
-    case vmIntrinsics::_putDouble_raw : name = "putDouble_raw";  break;
-
-    case vmIntrinsics::_checkIndex    : name = "NIO_checkIndex";  break;
-    case vmIntrinsics::_attemptUpdate : name = "AtomicLong_attemptUpdate";  break;
-    case vmIntrinsics::_compareAndSwapLong_obj:   name = "compareAndSwapLong_obj";   break;
-    case vmIntrinsics::_compareAndSwapObject_obj: name = "compareAndSwapObject_obj"; break;
-    case vmIntrinsics::_compareAndSwapInt_obj:    name = "compareAndSwapInt_obj";    break;
-    case vmIntrinsics::_putOrderedLong_obj:       name = "putOrderedLong_obj";       break;
-    case vmIntrinsics::_putOrderedObject_obj:     name = "putOrderedObject_obj";     break;
-    case vmIntrinsics::_putOrderedInt_obj:        name = "putOrderedInt_obj";        break;
+  const char* name = vmIntrinsics::name_at(x->id());
+  if (name[0] == '_')  name++;  // strip leading bug from _hashCode, etc.
+  const char* kname = vmSymbols::name_for(vmIntrinsics::class_for(x->id()));
+  if (strchr(name, '_') == NULL) {
+    kname = NULL;
+  } else {
+    const char* kptr = strrchr(kname, '/');
+    if (kptr != NULL)  kname = kptr + 1;
   }
-  output()->print("%s(", name);
+  if (kname == NULL)
+    output()->print("%s(", name);
+  else
+    output()->print("%s.%s(", kname, name);
   for (int i = 0; i < x->number_of_arguments(); i++) {
     if (i > 0) output()->print(", ");
     print_value(x->argument_at(i));
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_LIRGenerator.cpp	1.21 07/05/05 17:05:06 JVM"
+#pragma ident "@(#)c1_LIRGenerator.cpp	1.22 07/05/17 15:49:41 JVM"
 #endif
 /*
  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1629,38 +1629,14 @@
     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
   }
 
-#ifdef JVMPI_SUPPORT
-  if (compilation()->jvmpi_event_method_exit_enabled() &&
-      block()->is_set(BlockBegin::default_exception_handler_flag)) {
-    // This throw terminates the default exception handler block.
-    // Notify that runtime of the unwind of this frame.
+  // move exception oop into fixed register
+  __ move(exception_opr, exceptionOopOpr());
     
-    BasicTypeList signature;
-    signature.append(T_OBJECT); // exception
-    signature.append(T_OBJECT); // method
-
-    LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->encoding(), meth);
-
-    LIR_OprList* args = new LIR_OprList();
-    args->append(exception_opr);
-    args->append(meth);
-
-    // notify that we are exiting this method
-    call_runtime(&signature, args, Runtime1::entry_for(Runtime1::jvmpi_unwind_exception_id), voidType, info);
+  if (unwind) {
+    __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
   } else {
-#endif // JVMPI_SUPPORT
-    // move exception oop into fixed register
-    __ move(exception_opr, exceptionOopOpr());
-    
-    if (unwind) {
-      __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
-    } else {
-      __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
-    }
-#ifdef JVMPI_SUPPORT
+    __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
   }
-#endif // JVMPI_SUPPORT
 }
 
 
@@ -2067,12 +2043,7 @@
     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
   }
 
-#ifdef JVMPI_SUPPORT
-  if (method()->is_synchronized() ||
-      compilation()->jvmpi_event_method_entry_enabled()) {
-#else // !JVMPI_SUPPORT
   if (method()->is_synchronized()) {
-#endif // JVMPI_SUPPORT
     LIR_Opr obj;
     if (method()->is_static()) {
       obj = new_register(T_OBJECT);
@@ -2094,32 +2065,6 @@
       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
     }
-
-#ifdef JVMPI_SUPPORT
-    // If the method is synchronized we do the notification from the
-    // monitorenter path since we have to handle that path specially in
-    // case we deopt.  If we deoptimized in the monitorenter path and we
-    // wouldn't return to the nmethod so the notification would be
-    // skipped.  So for synchronized methods call into the runtime for
-    // the enter and perform the JVMPI notification there.
-    if (compilation()->jvmpi_event_method_entry_enabled()) {
-      BasicTypeList signature;
-      signature.append(T_OBJECT); // methodOop
-      signature.append(T_OBJECT); // receiver
-      LIR_OprList* args = new LIR_OprList();
-      LIR_Opr meth = new_register(T_OBJECT);
-      __ oop2reg(method()->encoding(), meth);
-      args->append(meth);
-      if (method()->is_static()) {
-        args->append(LIR_OprFact::oopConst(NULL));
-      } else {
-        args->append(obj);
-      }
-      
-      CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL);
-      call_runtime(&signature, args, Runtime1::entry_for(Runtime1::jvmpi_method_entry_id), voidType, info);
-    }
-#endif // JVMPI_SUPPORT
   }
 
   // increment invocation counters if needed
@@ -2376,13 +2321,13 @@
   // java.nio.Buffer.checkIndex
   case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
 
-  case vmIntrinsics::_compareAndSwapObject_obj: 
+  case vmIntrinsics::_compareAndSwapObject: 
     do_CompareAndSwap(x, objectType); 
     break;
-  case vmIntrinsics::_compareAndSwapInt_obj: 
+  case vmIntrinsics::_compareAndSwapInt: 
     do_CompareAndSwap(x, intType); 
     break;
-  case vmIntrinsics::_compareAndSwapLong_obj: 
+  case vmIntrinsics::_compareAndSwapLong: 
     do_CompareAndSwap(x, longType); 
     break;
 
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c1_Runtime1.cpp	1.242 07/05/05 17:05:09 JVM"
+#pragma ident "@(#)c1_Runtime1.cpp	1.243 07/05/17 15:49:45 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -66,11 +66,7 @@
 bool      Runtime1::_is_initialized = false;
 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
 const char *Runtime1::_blob_names[] = {
-  RUNTIME1_STUBS(STUB_NAME)
-#ifdef JVMPI_SUPPORT
-  RUNTIME1_JVMPI_STUBS(STUB_NAME)
-#endif // JVMPI_SUPPORT
-  LAST_STUB_NAME(number_of_ids)
+  RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
 };
 
 #ifndef PRODUCT
@@ -169,6 +165,29 @@
   oop_maps = generate_code_for(id, sasm);
   assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
          "if stub has an oop map it must have a valid frame size");
+
+#ifdef ASSERT
+  // Make sure that stubs that need oopmaps have them
+  switch (id) {
+    // These stubs don't need to have an oopmap
+    case dtrace_object_alloc_id:
+    case slow_subtype_check_id:
+    case fpu2long_stub_id:
+    case unwind_exception_id:
+#ifndef TIERED
+    case counter_overflow_id: // Not generated outside the tiered world
+#endif
+#ifdef SPARC
+    case handle_exception_nofpu_id:  // Unused on sparc
+#endif
+      break;
+
+    // All other stubs should have oopmaps
+    default:
+      assert(oop_maps != NULL, "must have an oopmap");
+  }
+#endif
+
   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
   sasm->align(BytesPerWord);
   // make sure all code is in code buffer
@@ -1140,22 +1159,6 @@
 JRT_END
 
 
-#ifdef JVMPI_SUPPORT
-JRT_ENTRY(void, Runtime1::jvmpi_method_entry_after_deopt(JavaThread* thread, oopDesc* receiver))
-  // Perform JVMPI method entry notification for a synchronized method activation
-  // that was deoptimized after its monitorenter operation.  First, get the method:
-  vframeStream vfst(thread, true);  // Do not skip and javaCalls
-  assert(!vfst.at_end(), "Java frame must exist");
-  methodHandle method (THREAD, vfst.method());
-  if (method()->is_static()) {
-    // clear the "receiver", which is really the method's class that was passed to monitorenter
-    receiver = NULL;
-  }
-  SharedRuntime::jvmpi_method_entry_work(thread, method(), receiver);
-JRT_END
-#endif // JVMPI_SUPPORT
-
-
 #ifndef PRODUCT
 void Runtime1::print_statistics() {
   tty->print_cr("C1 Runtime statistics:");
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)c1_Runtime1.hpp	1.139 07/05/05 17:05:09 JVM"
+#pragma ident "@(#)c1_Runtime1.hpp	1.140 07/05/17 15:49:48 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -31,7 +31,7 @@
 // runtime routines needed by code code generated
 // by the Compiler1.
 
-#define RUNTIME1_STUBS(stub) \
+#define RUNTIME1_STUBS(stub, last_entry) \
   stub(dtrace_object_alloc)          \
   stub(unwind_exception)             \
   stub(forward_exception)            \
@@ -60,15 +60,8 @@
   stub(load_klass_patching)          \
   stub(jvmti_exception_throw)        \
   stub(fpu2long_stub)                \
-  stub(counter_overflow)
-
-#ifdef JVMPI_SUPPORT
-#define RUNTIME1_JVMPI_STUBS(stub) \
-  stub(monitorenter_with_jvmpi) \
-  stub(jvmpi_method_entry)      \
-  stub(jvmpi_method_exit)       \
-  stub(jvmpi_unwind_exception)
-#endif // JVMPI_SUPPORT
+  stub(counter_overflow)             \
+  last_entry(number_of_ids)
 
 #define DECLARE_STUB_ID(x)       x ## _id ,
 #define DECLARE_LAST_STUB_ID(x)  x
@@ -93,11 +86,7 @@
 
  public:
   enum StubID {
-    RUNTIME1_STUBS(DECLARE_STUB_ID)
-#ifdef JVMPI_SUPPORT
-    RUNTIME1_JVMPI_STUBS(DECLARE_STUB_ID)
-#endif // JVMPI_SUPPORT
-    DECLARE_LAST_STUB_ID(number_of_ids)
+    RUNTIME1_STUBS(DECLARE_STUB_ID, DECLARE_LAST_STUB_ID)
   };
 
   // statistics
@@ -173,10 +162,6 @@
 
   static void patch_code(JavaThread* thread, StubID stub_id);
 
-#ifdef JVMPI_SUPPORT
-  static void jvmpi_method_entry_after_deopt(JavaThread* thread, oopDesc* receiver);
-#endif // JVMPI_SUPPORT
-
  public:
   static BufferBlob* get_buffer_blob();
   static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)bcEscapeAnalyzer.cpp	1.6 07/05/05 17:05:11 JVM"
+#pragma ident "@(#)bcEscapeAnalyzer.cpp	1.7 07/05/17 15:49:50 JVM"
 #endif
 /*
  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1056,7 +1056,7 @@
   vmIntrinsics::ID iid = method()->intrinsic_id();
 
   if (iid == vmIntrinsics::_getClass ||
-      iid == vmIntrinsics::_hash )
+      iid == vmIntrinsics::_hashCode)
     return iid;
   else
     return vmIntrinsics::_none;
@@ -1069,7 +1069,7 @@
   case vmIntrinsics::_getClass:
     _return_local = false;
     break;
-  case vmIntrinsics::_hash:
+  case vmIntrinsics::_hashCode:
     // initialized state is correct
     break;
   default:
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)ciEnv.cpp	1.127 07/05/05 17:05:12 JVM"
+#pragma ident "@(#)ciEnv.cpp	1.128 07/05/17 15:49:53 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -184,11 +184,7 @@
     if (!HAS_PENDING_EXCEPTION && k != NULL) {
       oop obj = instanceKlass::cast(k)->allocate_permanent_instance(THREAD);
       if (!HAS_PENDING_EXCEPTION)
-#ifdef JVMPI_SUPPORT
-        objh = JNIHandles::make_global(obj, false);
-#else // !JVMPI_SUPPORT
         objh = JNIHandles::make_global(obj);
-#endif // JVMPI_SUPPORT
     }
     if (HAS_PENDING_EXCEPTION) {
       CLEAR_PENDING_EXCEPTION;
@@ -927,11 +923,7 @@
       }
     }
   }
-#ifdef JVMPI_SUPPORT
-  // JVMTI/JVMPI -- compiled method notification (must be done outside lock)
-#else // !JVMPI_SUPPORT
   // JVMTI -- compiled method notification (must be done outside lock)
-#endif // JVMPI_SUPPORT
   if (nm != NULL) {
     nm->post_compiled_method_load_event();
   }
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)ciInstanceKlass.cpp	1.43 07/05/05 17:05:13 JVM"
+#pragma ident "@(#)ciInstanceKlass.cpp	1.44 07/05/17 15:49:55 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -65,13 +65,8 @@
   } else {
     Handle h_loader(thread, ik->class_loader());
     Handle h_protection_domain(thread, ik->protection_domain());
-#ifdef JVMPI_SUPPORT
-    _loader = JNIHandles::make_global(h_loader, false);
-    _protection_domain = JNIHandles::make_global(h_protection_domain, false);
-#else // !JVMPI_SUPPORT
     _loader = JNIHandles::make_global(h_loader);
     _protection_domain = JNIHandles::make_global(h_protection_domain);
-#endif // JVMPI_SUPPORT
     _is_shared = true;
   }
   
--- a/hotspot/src/share/vm/ci/ciObject.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/ci/ciObject.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)ciObject.cpp	1.27 07/05/05 17:05:14 JVM"
+#pragma ident "@(#)ciObject.cpp	1.28 07/05/17 15:49:59 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -54,11 +54,7 @@
   if (ciObjectFactory::is_initialized()) {
     _handle = JNIHandles::make_local(o);
   } else {
-#ifdef JVMPI_SUPPORT
-    _handle = JNIHandles::make_global(o, false);
-#else // !JVMPI_SUPPORT
     _handle = JNIHandles::make_global(o);
-#endif // JVMPI_SUPPORT
   }
   _klass = NULL;
   _ident = 0;
@@ -72,11 +68,7 @@
   if (ciObjectFactory::is_initialized()) {
     _handle = JNIHandles::make_local(h());
   } else {
-#ifdef JVMPI_SUPPORT
-    _handle = JNIHandles::make_global(h, false);
-#else // !JVMPI_SUPPORT
     _handle = JNIHandles::make_global(h);
-#endif // JVMPI_SUPPORT
   }
   _klass = NULL;
   _ident = 0;
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)ciObjectFactory.cpp	1.38 07/05/05 17:05:15 JVM"
+#pragma ident "@(#)ciObjectFactory.cpp	1.39 07/05/17 15:50:05 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -46,8 +46,7 @@
 // sort of balanced binary tree.
 
 GrowableArray<ciObject*>* ciObjectFactory::_shared_ci_objects = NULL;
-GrowableArray<ciObject*>* ciObjectFactory::_shared_ci_symbols = NULL;
-GrowableArray<int>*       ciObjectFactory::_shared_ci_symbol_map = NULL;
+ciSymbol*                 ciObjectFactory::_shared_ci_symbols[vmSymbols::SID_LIMIT];
 int                       ciObjectFactory::_shared_ident_limit = 0;
 volatile bool             ciObjectFactory::_initialized = false;
 
@@ -103,33 +102,19 @@
   
   {
     // Create the shared symbols, but not in _shared_ci_objects.
-    const int sym_count = (int) vmSymbolHandles::symbol_handle_count();
     int i;
-    GrowableArray<ciObject*>* syms = new (_arena) GrowableArray<ciObject*>(_arena, sym_count, 0, NULL);
-    for (i = 0; i < sym_count; i++) {
-      symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at(i);
+    for (i = vmSymbols::FIRST_SID; i < vmSymbols::SID_LIMIT; i++) {
+      symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at((vmSymbols::SID) i);
+      assert(vmSymbols::find_sid(sym_handle()) == i, "1-1 mapping");
       ciSymbol* sym = new (_arena) ciSymbol(sym_handle);
-      int index = find(sym_handle(), syms);
-      if (!is_found_at(index, sym_handle(), syms)) {
-        init_ident_of(sym);
-	insert(index, sym, syms);
-      }
+      init_ident_of(sym);
+      _shared_ci_symbols[i] = sym;
     }
-    GrowableArray<int>* map = new (_arena) GrowableArray<int>(_arena, sym_count, 0, 0);
-    for (i = 0; i < sym_count; i++) {
-      symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at(i);
-      int index = find(sym_handle(), syms);
-      assert(is_found_at(index, sym_handle(), syms), "must be valid index");
-      map->append(index);
-    }
-    _shared_ci_symbols = syms;
-    _shared_ci_symbol_map = map;
 #ifdef ASSERT
-    for (i = 0; i < sym_count; i++) {
-      symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at(i);
-      ciSymbol* sym = vm_symbol_at(i);
+    for (i = vmSymbols::FIRST_SID; i < vmSymbols::SID_LIMIT; i++) {
+      symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at((vmSymbols::SID) i);
+      ciSymbol* sym = vm_symbol_at((vmSymbols::SID) i);
       assert(sym->get_oop() == sym_handle(), "oop must match");
-      assert(map->at(i) == find(sym_handle(), syms), "index must match");
     }
     assert(ciSymbol::void_class_signature()->get_oop() == vmSymbols::void_class_signature(), "spot check");
 #endif
@@ -263,10 +248,10 @@
 
     // Check in the shared symbol area before putting it in the list.
     if (key->is_symbol()) {
-      int sym_index = find(key, _shared_ci_symbols);
-      if (is_found_at(sym_index, key, _shared_ci_symbols)) {
+      vmSymbols::SID sid = vmSymbols::find_sid((symbolOop)key);
+      if (sid != vmSymbols::NO_SID) {
 	// do not pollute the main cache with it
-	return _shared_ci_symbols->at(sym_index);
+        return vm_symbol_at(sid);
       }
     }
 
@@ -631,9 +616,8 @@
 // ciObjectFactory::vm_symbol_at
 // Get the ciSymbol corresponding to some index in vmSymbols.
 ciSymbol* ciObjectFactory::vm_symbol_at(int index) {
-  int map_index = _shared_ci_symbol_map->at(index);
-  ciObject* sym = _shared_ci_symbols->at(map_index);
-  return sym->as_symbol();
+  assert(index >= vmSymbols::FIRST_SID && index < vmSymbols::SID_LIMIT, "oob");
+  return _shared_ci_symbols[index];
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)ciObjectFactory.hpp	1.19 07/05/05 17:05:16 JVM"
+#pragma ident "@(#)ciObjectFactory.hpp	1.20 07/05/17 15:50:07 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -35,8 +35,7 @@
 private:
   static volatile bool _initialized;
   static GrowableArray<ciObject*>* _shared_ci_objects;
-  static GrowableArray<ciObject*>* _shared_ci_symbols;
-  static GrowableArray<int>*       _shared_ci_symbol_map;
+  static ciSymbol*                 _shared_ci_symbols[];
   static int                       _shared_ident_limit;
 
   Arena*                    _arena;
--- a/hotspot/src/share/vm/ci/ciSymbol.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/ci/ciSymbol.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)ciSymbol.hpp	1.14 07/05/05 17:05:17 JVM"
+#pragma ident "@(#)ciSymbol.hpp	1.15 07/05/17 15:50:09 JVM"
 #endif
 /*
  * Copyright 1999-2001 Sun Microsystems, Inc.  All Rights Reserved.
@@ -71,8 +71,8 @@
   // (Your code will be less subject to typographical bugs.)
   static ciSymbol* make(const char* s);
 
-#define CI_SYMBOL_DECLARE(name, string) \
+#define CI_SYMBOL_DECLARE(name, ignore_def) \
   static ciSymbol* name() { return ciObjectFactory::vm_symbol_at(vmSymbols::VM_SYMBOL_ENUM_NAME(name)); }
-  VM_SYMBOLS_DO(CI_SYMBOL_DECLARE)
+  VM_SYMBOLS_DO(CI_SYMBOL_DECLARE, CI_SYMBOL_DECLARE)
 #undef CI_SYMBOL_DECLARE
 };
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classFileError.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,64 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)classFileError.cpp	1.12 07/05/05 17:06:44 JVM"
+#endif
+/*
+ * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_classFileError.cpp.incl"
+
+// Keep these in a separate file to prevent inlining
+
+void ClassFileParser::classfile_parse_error(const char* msg, TRAPS) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
+                       msg, _class_name->as_C_string());
+}
+
+void ClassFileParser::classfile_parse_error(const char* msg, int index, TRAPS) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
+                       msg, index, _class_name->as_C_string());
+}
+
+void ClassFileParser::classfile_parse_error(const char* msg, const char *name, TRAPS) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
+                       msg, name, _class_name->as_C_string());
+}
+
+void ClassFileParser::classfile_parse_error(const char* msg, int index, const char *name, TRAPS) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
+                       msg, index, name, _class_name->as_C_string());
+}
+
+void StackMapStream::stackmap_format_error(const char* msg, TRAPS) {
+  ResourceMark rm(THREAD);
+  Exceptions::fthrow(
+    THREAD_AND_LOCATION,
+    vmSymbolHandles::java_lang_ClassFormatError(),
+    "StackMapTable format error: %s", msg
+  );
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,4019 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)classFileParser.cpp	1.278 07/05/17 15:50:13 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_classFileParser.cpp.incl"
+
+// We generally try to create the oops directly when parsing, rather than allocating
+// temporary data structures and copying the bytes twice. A temporary area is only
+// needed when parsing utf8 entries in the constant pool and when parsing line number
+// tables.
+
+// We add assert in debug mode when class format is not checked.
+
+#define JAVA_CLASSFILE_MAGIC              0xCAFEBABE
+#define JAVA_MIN_SUPPORTED_VERSION        45
+#define JAVA_MAX_SUPPORTED_VERSION        50
+#define JAVA_MAX_SUPPORTED_MINOR_VERSION  0
+
+// Used for two backward compatibility reasons:
+// - to check for new additions to the class file format in JDK1.5
+// - to check for bug fixes in the format checker in JDK1.5
+#define JAVA_1_5_VERSION                  49
+
+// Used for backward compatibility reasons:
+// - to check for javac bug fixes that happened after 1.5
+#define JAVA_6_VERSION                    50 
+
+
+void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS) {
+  // Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
+  // this function (_current can be allocated in a register, with scalar
+  // replacement of aggregates). The _current pointer is copied back to
+  // stream() when this function returns. DON'T call another method within
+  // this method that uses stream().
+  ClassFileStream* cfs0 = stream();
+  ClassFileStream cfs1 = *cfs0;
+  ClassFileStream* cfs = &cfs1;
+#ifdef ASSERT
+  u1* old_current = cfs0->current();
+#endif
+
+  // Used for batching symbol allocations.
+  const char* names[SymbolTable::symbol_alloc_batch_size];
+  int lengths[SymbolTable::symbol_alloc_batch_size];
+  int indices[SymbolTable::symbol_alloc_batch_size];
+  unsigned int hashValues[SymbolTable::symbol_alloc_batch_size];
+  int names_count = 0;
+    
+  // parsing  Index 0 is unused
+  for (int index = 1; index < length; index++) {
+    // Each of the following case guarantees one more byte in the stream
+    // for the following tag or the access_flags following constant pool,
+    // so we don't need bounds-check for reading tag.
+    u1 tag = cfs->get_u1_fast();
+    switch (tag) {
+      case JVM_CONSTANT_Class :
+        {
+          cfs->guarantee_more(3, CHECK);  // name_index, tag/access_flags
+          u2 name_index = cfs->get_u2_fast();
+          cp->klass_index_at_put(index, name_index);
+        }
+        break;
+      case JVM_CONSTANT_Fieldref :
+        {
+          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
+          u2 class_index = cfs->get_u2_fast();
+          u2 name_and_type_index = cfs->get_u2_fast();
+          cp->field_at_put(index, class_index, name_and_type_index);
+        }
+        break;
+      case JVM_CONSTANT_Methodref :
+        {
+          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
+          u2 class_index = cfs->get_u2_fast();
+          u2 name_and_type_index = cfs->get_u2_fast();
+          cp->method_at_put(index, class_index, name_and_type_index);
+        }
+        break;
+      case JVM_CONSTANT_InterfaceMethodref :
+        {
+          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
+          u2 class_index = cfs->get_u2_fast();
+          u2 name_and_type_index = cfs->get_u2_fast();
+          cp->interface_method_at_put(index, class_index, name_and_type_index);
+        }
+        break;
+      case JVM_CONSTANT_String :
+        {
+          cfs->guarantee_more(3, CHECK);  // string_index, tag/access_flags
+          u2 string_index = cfs->get_u2_fast();
+          cp->string_index_at_put(index, string_index);
+        }
+        break;
+      case JVM_CONSTANT_Integer :
+        {
+          cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
+          u4 bytes = cfs->get_u4_fast();
+          cp->int_at_put(index, (jint) bytes);
+        }
+        break;
+      case JVM_CONSTANT_Float :
+        {
+          cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
+          u4 bytes = cfs->get_u4_fast();
+          cp->float_at_put(index, *(jfloat*)&bytes);
+        }
+        break;
+      case JVM_CONSTANT_Long :
+        // A mangled type might cause you to overrun allocated memory
+        guarantee_property(index+1 < length, 
+                           "Invalid constant pool entry %u in class file %s", 
+                           index, CHECK);
+        {
+          cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
+          u8 bytes = cfs->get_u8_fast();
+          cp->long_at_put(index, bytes);
+        }
+        index++;   // Skip entry following eigth-byte constant, see JVM book p. 98
+        break;
+      case JVM_CONSTANT_Double :
+        // A mangled type might cause you to overrun allocated memory
+        guarantee_property(index+1 < length, 
+                           "Invalid constant pool entry %u in class file %s", 
+                           index, CHECK);
+        {
+          cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
+          u8 bytes = cfs->get_u8_fast();
+          cp->double_at_put(index, *(jdouble*)&bytes);
+        }
+        index++;   // Skip entry following eigth-byte constant, see JVM book p. 98
+        break;
+      case JVM_CONSTANT_NameAndType :
+        {
+          cfs->guarantee_more(5, CHECK);  // name_index, signature_index, tag/access_flags
+          u2 name_index = cfs->get_u2_fast();
+          u2 signature_index = cfs->get_u2_fast();
+          cp->name_and_type_at_put(index, name_index, signature_index);
+        }
+        break;
+      case JVM_CONSTANT_Utf8 :
+        {
+          cfs->guarantee_more(2, CHECK);  // utf8_length
+          u2  utf8_length = cfs->get_u2_fast();
+          u1* utf8_buffer = cfs->get_u1_buffer();
+          assert(utf8_buffer != NULL, "null utf8 buffer");
+          // Got utf8 string, guarantee utf8_length+1 bytes, set stream position forward.
+          cfs->guarantee_more(utf8_length+1, CHECK);  // utf8 string, tag/access_flags
+          cfs->skip_u1_fast(utf8_length);
+          // Before storing the symbol, make sure it's legal
+          if (_need_verify) {
+            verify_legal_utf8((unsigned char*)utf8_buffer, utf8_length, CHECK);
+          }
+
+          unsigned int hash;
+          symbolOop result = SymbolTable::lookup_only((char*)utf8_buffer, utf8_length, hash);
+          if (result == NULL) {
+            names[names_count] = (char*)utf8_buffer;
+            lengths[names_count] = utf8_length;
+            indices[names_count] = index;
+            hashValues[names_count++] = hash;
+            if (names_count == SymbolTable::symbol_alloc_batch_size) {
+              oopFactory::new_symbols(cp, names_count, names, lengths, indices, hashValues, CHECK);
+              names_count = 0;
+            }
+          } else {
+            cp->symbol_at_put(index, result);
+          }
+        }
+        break;
+      default:
+        classfile_parse_error(
+          "Unknown constant tag %u in class file %s", tag, CHECK);
+        break;
+    }
+  }
+
+  // Allocate the remaining symbols
+  if (names_count > 0) {
+    oopFactory::new_symbols(cp, names_count, names, lengths, indices, hashValues, CHECK);
+  }
+
+  // Copy _current pointer of local copy back to stream().
+#ifdef ASSERT
+  assert(cfs0->current() == old_current, "non-exclusive use of stream()");
+#endif
+  cfs0->set_current(cfs1.current());
+}
+
+bool inline valid_cp_range(int index, int length) { return (index > 0 && index < length); }
+
+constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
+  ClassFileStream* cfs = stream();
+  constantPoolHandle nullHandle;
+
+  cfs->guarantee_more(3, CHECK_(nullHandle)); // length, first cp tag
+  u2 length = cfs->get_u2_fast();
+  guarantee_property(
+    length >= 1, "Illegal constant pool size %u in class file %s", 
+    length, CHECK_(nullHandle));
+  constantPoolOop constant_pool =
+                      oopFactory::new_constantPool(length, CHECK_(nullHandle));
+  constantPoolHandle cp (THREAD, constant_pool);
+  
+  cp->set_partially_loaded();    // Enables heap verify to work on partial constantPoolOops
+
+  // parsing constant pool entries
+  parse_constant_pool_entries(cp, length, CHECK_(nullHandle));
+
+  int index = 1;  // declared outside of loops for portability
+
+  // first verification pass - validate cross references and fixup class and string constants
+  for (index = 1; index < length; index++) {          // Index 0 is unused
+    switch (cp->tag_at(index).value()) {
+      case JVM_CONSTANT_Class :
+        ShouldNotReachHere();     // Only JVM_CONSTANT_ClassIndex should be present
+        break;
+      case JVM_CONSTANT_Fieldref :
+        // fall through
+      case JVM_CONSTANT_Methodref :
+        // fall through
+      case JVM_CONSTANT_InterfaceMethodref : {
+        if (!_need_verify) break;
+        int klass_ref_index = cp->klass_ref_index_at(index);
+        int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
+        check_property(valid_cp_range(klass_ref_index, length) &&
+                       cp->tag_at(klass_ref_index).is_klass_reference(), 
+                       "Invalid constant pool index %u in class file %s", 
+                       klass_ref_index, 
+                       CHECK_(nullHandle));
+        check_property(valid_cp_range(name_and_type_ref_index, length) &&
+                       cp->tag_at(name_and_type_ref_index).is_name_and_type(), 
+                       "Invalid constant pool index %u in class file %s", 
+                       name_and_type_ref_index,
+                       CHECK_(nullHandle));
+        break;
+      }
+      case JVM_CONSTANT_String :
+        ShouldNotReachHere();     // Only JVM_CONSTANT_StringIndex should be present
+        break;
+      case JVM_CONSTANT_Integer :
+        break;
+      case JVM_CONSTANT_Float :
+        break;
+      case JVM_CONSTANT_Long :
+      case JVM_CONSTANT_Double :
+        index++;
+        check_property(
+          (index < length && cp->tag_at(index).is_invalid()), 
+          "Improper constant pool long/double index %u in class file %s", 
+          index, CHECK_(nullHandle));
+        break;
+      case JVM_CONSTANT_NameAndType : {
+        if (!_need_verify) break;
+        int name_ref_index = cp->name_ref_index_at(index);
+        int signature_ref_index = cp->signature_ref_index_at(index);
+        check_property(
+          valid_cp_range(name_ref_index, length) && 
+            cp->tag_at(name_ref_index).is_utf8(), 
+          "Invalid constant pool index %u in class file %s", 
+          name_ref_index, CHECK_(nullHandle));
+        check_property(
+          valid_cp_range(signature_ref_index, length) && 
+            cp->tag_at(signature_ref_index).is_utf8(), 
+          "Invalid constant pool index %u in class file %s", 
+          signature_ref_index, CHECK_(nullHandle));
+        break;
+      }
+      case JVM_CONSTANT_Utf8 :
+        break;
+      case JVM_CONSTANT_UnresolvedClass :	  // fall-through
+      case JVM_CONSTANT_UnresolvedClassInError:
+        ShouldNotReachHere();     // Only JVM_CONSTANT_ClassIndex should be present
+        break;
+      case JVM_CONSTANT_ClassIndex :
+        {
+          int class_index = cp->klass_index_at(index);
+          check_property(
+            valid_cp_range(class_index, length) && 
+              cp->tag_at(class_index).is_utf8(), 
+            "Invalid constant pool index %u in class file %s", 
+            class_index, CHECK_(nullHandle));
+          cp->unresolved_klass_at_put(index, cp->symbol_at(class_index));
+        }
+        break;
+      case JVM_CONSTANT_UnresolvedString :
+        ShouldNotReachHere();     // Only JVM_CONSTANT_StringIndex should be present
+        break;
+      case JVM_CONSTANT_StringIndex :
+        {
+          int string_index = cp->string_index_at(index);
+          check_property(
+            valid_cp_range(string_index, length) && 
+              cp->tag_at(string_index).is_utf8(), 
+            "Invalid constant pool index %u in class file %s", 
+            string_index, CHECK_(nullHandle));
+          symbolOop sym = cp->symbol_at(string_index);
+          cp->unresolved_string_at_put(index, sym);
+        }
+        break;
+      default:
+        fatal1("bad constant pool tag value %u", cp->tag_at(index).value());
+        ShouldNotReachHere();
+        break;
+    } // end of switch
+  } // end of for
+
+  if (!_need_verify) {
+    return cp;
+  }
+
+  // second verification pass - checks the strings are of the right format.
+  for (index = 1; index < length; index++) {
+    jbyte tag = cp->tag_at(index).value();
+    switch (tag) {
+      case JVM_CONSTANT_UnresolvedClass: {
+        symbolHandle class_name(THREAD, cp->unresolved_klass_at(index));
+        verify_legal_class_name(class_name, CHECK_(nullHandle));
+        break;
+      }
+      case JVM_CONSTANT_Fieldref:
+      case JVM_CONSTANT_Methodref:
+      case JVM_CONSTANT_InterfaceMethodref: {
+        int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
+        // already verified to be utf8
+        int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);  
+        // already verified to be utf8
+        int signature_ref_index = cp->signature_ref_index_at(name_and_type_ref_index); 
+        symbolHandle name(THREAD, cp->symbol_at(name_ref_index));
+        symbolHandle signature(THREAD, cp->symbol_at(signature_ref_index));
+        if (tag == JVM_CONSTANT_Fieldref) {
+          verify_legal_field_name(name, CHECK_(nullHandle));
+          verify_legal_field_signature(name, signature, CHECK_(nullHandle));
+        } else {
+          verify_legal_method_name(name, CHECK_(nullHandle));
+          verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+          if (tag == JVM_CONSTANT_Methodref) {
+            // 4509014: If a class method name begins with '<', it must be "<init>".
+            assert(!name.is_null(), "method name in constant pool is null");
+            unsigned int name_len = name->utf8_length();
+            assert(name_len > 0, "bad method name");  // already verified as legal name
+            if (name->byte_at(0) == '<') {
+              if (name() != vmSymbols::object_initializer_name()) {
+                classfile_parse_error(
+                  "Bad method name at constant pool index %u in class file %s", 
+                  name_ref_index, CHECK_(nullHandle));
+              }
+            }
+          }
+        }
+        break;
+      }                                                  
+    }  // end of switch
+  }  // end of for
+  
+  return cp;
+}
+
+
+class NameSigHash: public ResourceObj {
+ public:
+  symbolOop     _name;       // name
+  symbolOop     _sig;        // signature
+  NameSigHash*  _next;       // Next entry in hash table
+};
+
+
+#define HASH_ROW_SIZE 256
+
+unsigned int hash(symbolOop name, symbolOop sig) {
+  unsigned int raw_hash = 0;
+  raw_hash += ((unsigned int)(uintptr_t)name) >> (LogHeapWordSize + 2);
+  raw_hash += ((unsigned int)(uintptr_t)sig) >> LogHeapWordSize;
+
+  return (raw_hash + (unsigned int)(uintptr_t)name) % HASH_ROW_SIZE;
+}
+
+
+void initialize_hashtable(NameSigHash** table) {
+  memset((void*)table, 0, sizeof(NameSigHash*) * HASH_ROW_SIZE);
+}
+
+// Return false if the name/sig combination is found in table.
+// Return true if no duplicate is found. And name/sig is added as a new entry in table.
+// The old format checker uses heap sort to find duplicates.
+// NOTE: caller should guarantee that GC doesn't happen during the life cycle
+// of table since we don't expect symbolOop's to move.
+bool put_after_lookup(symbolOop name, symbolOop sig, NameSigHash** table) {
+  assert(name != NULL, "name in constant pool is NULL");
+
+  // First lookup for duplicates
+  int index = hash(name, sig);
+  NameSigHash* entry = table[index];
+  while (entry != NULL) {
+    if (entry->_name == name && entry->_sig == sig) {
+      return false;
+    }
+    entry = entry->_next;
+  }
+
+  // No duplicate is found, allocate a new entry and fill it.
+  entry = new NameSigHash();
+  entry->_name = name;
+  entry->_sig = sig;
+ 
+  // Insert into hash table
+  entry->_next = table[index];
+  table[index] = entry;
+
+  return true;
+}
+
+
+objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp,
+                                                 int length,
+                                                 Handle class_loader, 
+                                                 Handle protection_domain,
+                                                 PerfTraceTime* vmtimer,
+                                                 symbolHandle class_name,
+                                                 TRAPS) {  
+  ClassFileStream* cfs = stream();
+  assert(length > 0, "only called for length>0");
+  objArrayHandle nullHandle;
+  objArrayOop interface_oop = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
+  objArrayHandle interfaces (THREAD, interface_oop);
+
+  int index;
+  for (index = 0; index < length; index++) {
+    u2 interface_index = cfs->get_u2(CHECK_(nullHandle));
+    check_property(
+      valid_cp_range(interface_index, cp->length()) && 
+        cp->tag_at(interface_index).is_unresolved_klass(), 
+      "Interface name has bad constant pool index %u in class file %s", 
+      interface_index, CHECK_(nullHandle));
+    symbolHandle unresolved_klass (THREAD, cp->klass_name_at(interface_index));
+
+    // Don't need to check legal name because it's checked when parsing constant pool.
+    // But need to make sure it's not an array type.
+    guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY, 
+                       "Bad interface name in class file %s", CHECK_(nullHandle));
+
+    vmtimer->suspend();  // do not count recursive loading twice
+    // Call resolve_super so classcircularity is checked
+    klassOop k = SystemDictionary::resolve_super_or_fail(class_name,
+                  unresolved_klass, class_loader, protection_domain, 
+                  false, CHECK_(nullHandle));
+    KlassHandle interf (THREAD, k);
+    vmtimer->resume();
+
+    if (!Klass::cast(interf())->is_interface()) {
+      THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", nullHandle);
+    }
+    interfaces->obj_at_put(index, interf());
+  }
+
+  if (!_need_verify || length <= 1) {
+    return interfaces;
+  }
+
+  // Check if there's any duplicates in interfaces
+  ResourceMark rm(THREAD);
+  NameSigHash** interface_names = NEW_RESOURCE_ARRAY_IN_THREAD(
+    THREAD, NameSigHash*, HASH_ROW_SIZE);
+  initialize_hashtable(interface_names);
+  bool dup = false;
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+    for (index = 0; index < length; index++) {
+      klassOop k = (klassOop)interfaces->obj_at(index);
+      symbolOop name = instanceKlass::cast(k)->name();
+      // If no duplicates, add (name, NULL) in hashtable interface_names.
+      if (!put_after_lookup(name, NULL, interface_names)) {
+        dup = true;
+        break;
+      }
+    }
+  }
+  if (dup) {
+    classfile_parse_error("Duplicate interface name in class file %s",
+                          CHECK_(nullHandle));
+  }
+
+  return interfaces;
+}
+
+
+void ClassFileParser::verify_constantvalue(int constantvalue_index, int signature_index, constantPoolHandle cp, TRAPS) {
+  // Make sure the constant pool entry is of a type appropriate to this field
+  guarantee_property(
+    (constantvalue_index > 0 && 
+      constantvalue_index < cp->length()), 
+    "Bad initial value index %u in ConstantValue attribute in class file %s", 
+    constantvalue_index, CHECK); 
+  constantTag value_type = cp->tag_at(constantvalue_index);
+  switch ( cp->basic_type_for_signature_at(signature_index) ) {
+    case T_LONG:
+      guarantee_property(value_type.is_long(), "Inconsistent constant value type in class file %s", CHECK);
+      break;
+    case T_FLOAT:
+      guarantee_property(value_type.is_float(), "Inconsistent constant value type in class file %s", CHECK);
+      break;
+    case T_DOUBLE:
+      guarantee_property(value_type.is_double(), "Inconsistent constant value type in class file %s", CHECK);
+      break;
+    case T_BYTE: case T_CHAR: case T_SHORT: case T_BOOLEAN: case T_INT:
+      guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
+      break;
+    case T_OBJECT: 
+      guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;", 18) 
+                         && (value_type.is_string() || value_type.is_unresolved_string())),
+                         "Bad string initial value in class file %s", CHECK);
+      break;
+    default:
+      classfile_parse_error(
+        "Unable to set initial value %u in class file %s", 
+        constantvalue_index, CHECK);
+  }
+}
+
+
+// Parse attributes for a field.
+void ClassFileParser::parse_field_attributes(constantPoolHandle cp,
+                                             u2 attributes_count,
+                                             bool is_static, u2 signature_index,
+                                             u2* constantvalue_index_addr,
+                                             bool* is_synthetic_addr,
+                                             u2* generic_signature_index_addr,
+                                             typeArrayHandle* field_annotations,
+                                             TRAPS) {
+  ClassFileStream* cfs = stream();
+  assert(attributes_count > 0, "length should be greater than 0");
+  u2 constantvalue_index = 0;
+  u2 generic_signature_index = 0;
+  bool is_synthetic = false;
+  u1* runtime_visible_annotations = NULL;
+  int runtime_visible_annotations_length = 0;
+  u1* runtime_invisible_annotations = NULL;
+  int runtime_invisible_annotations_length = 0;
+  while (attributes_count--) {
+    cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
+    u2 attribute_name_index = cfs->get_u2_fast();
+    u4 attribute_length = cfs->get_u4_fast();
+    check_property(valid_cp_range(attribute_name_index, cp->length()) &&
+                   cp->tag_at(attribute_name_index).is_utf8(), 
+                   "Invalid field attribute index %u in class file %s", 
+                   attribute_name_index,
+                   CHECK);
+    symbolOop attribute_name = cp->symbol_at(attribute_name_index);
+    if (is_static && attribute_name == vmSymbols::tag_constant_value()) { 
+      // ignore if non-static   
+      if (constantvalue_index != 0) {
+        classfile_parse_error("Duplicate ConstantValue attribute in class file %s", CHECK);
+      }
+      check_property(
+        attribute_length == 2, 
+        "Invalid ConstantValue field attribute length %u in class file %s", 
+        attribute_length, CHECK);
+      constantvalue_index = cfs->get_u2(CHECK);
+      if (_need_verify) { 
+        verify_constantvalue(constantvalue_index, signature_index, cp, CHECK); 
+      }
+    } else if (attribute_name == vmSymbols::tag_synthetic()) {
+      if (attribute_length != 0) {
+        classfile_parse_error(
+          "Invalid Synthetic field attribute length %u in class file %s", 
+          attribute_length, CHECK);
+      }
+      is_synthetic = true;
+    } else if (attribute_name == vmSymbols::tag_deprecated()) { // 4276120
+      if (attribute_length != 0) {
+        classfile_parse_error(
+          "Invalid Deprecated field attribute length %u in class file %s", 
+          attribute_length, CHECK);
+      }
+    } else if (_major_version >= JAVA_1_5_VERSION) {
+      if (attribute_name == vmSymbols::tag_signature()) {
+        if (attribute_length != 2) {
+          classfile_parse_error(
+            "Wrong size %u for field's Signature attribute in class file %s", 
+            attribute_length, CHECK);
+        }
+        generic_signature_index = cfs->get_u2(CHECK);
+      } else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
+        runtime_visible_annotations_length = attribute_length;
+        runtime_visible_annotations = cfs->get_u1_buffer();
+        assert(runtime_visible_annotations != NULL, "null visible annotations");
+        cfs->skip_u1(runtime_visible_annotations_length, CHECK);
+      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
+        runtime_invisible_annotations_length = attribute_length;
+        runtime_invisible_annotations = cfs->get_u1_buffer();
+        assert(runtime_invisible_annotations != NULL, "null invisible annotations");
+        cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
+      } else {
+        cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
+      }
+    } else {
+      cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes			
+    }
+  }
+
+  *constantvalue_index_addr = constantvalue_index;
+  *is_synthetic_addr = is_synthetic;
+  *generic_signature_index_addr = generic_signature_index;
+  *field_annotations = assemble_annotations(runtime_visible_annotations,
+                                            runtime_visible_annotations_length,
+                                            runtime_invisible_annotations,
+                                            runtime_invisible_annotations_length,
+                                            CHECK);
+  return;
+}
+  
+
+// Field allocation types. Used for computing field offsets.
+
+enum FieldAllocationType {
+  STATIC_OOP,		// Oops
+  STATIC_BYTE,		// Boolean, Byte, char
+  STATIC_SHORT,		// shorts
+  STATIC_WORD,		// ints
+  STATIC_DOUBLE,	// long or double
+  STATIC_ALIGNED_DOUBLE,// aligned long or double
+  NONSTATIC_OOP,	 
+  NONSTATIC_BYTE,
+  NONSTATIC_SHORT,
+  NONSTATIC_WORD,
+  NONSTATIC_DOUBLE,
+  NONSTATIC_ALIGNED_DOUBLE
+};
+
+
+struct FieldAllocationCount {
+  int static_oop_count;
+  int static_byte_count;
+  int static_short_count;
+  int static_word_count;
+  int static_double_count;
+  int nonstatic_oop_count;
+  int nonstatic_byte_count;
+  int nonstatic_short_count;
+  int nonstatic_word_count;
+  int nonstatic_double_count;
+};
+
+typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface, 
+                                              struct FieldAllocationCount *fac,
+                                              objArrayHandle* fields_annotations, TRAPS) {
+  ClassFileStream* cfs = stream();
+  typeArrayHandle nullHandle;
+  cfs->guarantee_more(2, CHECK_(nullHandle));  // length
+  u2 length = cfs->get_u2_fast();
+  // Tuples of shorts [access, name index, sig index, initial value index, byte offset, generic signature index]
+  typeArrayOop new_fields = oopFactory::new_permanent_shortArray(length*instanceKlass::next_offset, CHECK_(nullHandle));
+  typeArrayHandle fields(THREAD, new_fields);
+ 
+  int index = 0;
+  typeArrayHandle field_annotations;
+  for (int n = 0; n < length; n++) {
+    cfs->guarantee_more(8, CHECK_(nullHandle));  // access_flags, name_index, descriptor_index, attributes_count
+
+    AccessFlags access_flags;
+    jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_FIELD_MODIFIERS;
+    verify_legal_field_modifiers(flags, is_interface, CHECK_(nullHandle));
+    access_flags.set_flags(flags);
+
+    u2 name_index = cfs->get_u2_fast();
+    int cp_size = cp->length();
+    check_property(
+      valid_cp_range(name_index, cp_size) && cp->tag_at(name_index).is_utf8(), 
+      "Invalid constant pool index %u for field name in class file %s", 
+      name_index, CHECK_(nullHandle));
+    symbolHandle name(THREAD, cp->symbol_at(name_index));
+    verify_legal_field_name(name, CHECK_(nullHandle));
+
+    u2 signature_index = cfs->get_u2_fast();
+    check_property(
+      valid_cp_range(signature_index, cp_size) && 
+        cp->tag_at(signature_index).is_utf8(), 
+      "Invalid constant pool index %u for field signature in class file %s", 
+      signature_index, CHECK_(nullHandle));
+    symbolHandle sig(THREAD, cp->symbol_at(signature_index));
+    verify_legal_field_signature(name, sig, CHECK_(nullHandle));
+
+    u2 constantvalue_index = 0;
+    bool is_synthetic = false;
+    u2 generic_signature_index = 0;
+    bool is_static = access_flags.is_static();
+
+    u2 attributes_count = cfs->get_u2_fast();
+    if (attributes_count > 0) {
+      parse_field_attributes(cp, attributes_count, is_static, signature_index,
+                             &constantvalue_index, &is_synthetic,
+                             &generic_signature_index, &field_annotations,
+                             CHECK_(nullHandle));
+      if (field_annotations.not_null()) {
+        if (fields_annotations->is_null()) {
+          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
+          *fields_annotations = objArrayHandle(THREAD, md);
+        }
+        (*fields_annotations)->obj_at_put(n, field_annotations());
+      }
+      if (is_synthetic) {
+        access_flags.set_is_synthetic();
+      }
+    }
+    
+    fields->short_at_put(index++, access_flags.as_short());
+    fields->short_at_put(index++, name_index);
+    fields->short_at_put(index++, signature_index);
+    fields->short_at_put(index++, constantvalue_index);	
+
+    // Remember how many oops we encountered and compute allocation type
+    BasicType type = cp->basic_type_for_signature_at(signature_index);
+    FieldAllocationType atype;
+    if ( is_static ) {
+      switch ( type ) {
+        case  T_BOOLEAN:
+        case  T_BYTE:
+          fac->static_byte_count++;
+          atype = STATIC_BYTE;
+          break;
+        case  T_LONG:
+        case  T_DOUBLE:
+          if (Universe::field_type_should_be_aligned(type)) {
+            atype = STATIC_ALIGNED_DOUBLE;
+          } else {
+            atype = STATIC_DOUBLE;
+          }
+          fac->static_double_count++;
+          break;
+        case  T_CHAR:     
+        case  T_SHORT: 
+          fac->static_short_count++;
+          atype = STATIC_SHORT;
+          break;
+        case  T_FLOAT:
+        case  T_INT:
+          fac->static_word_count++;
+          atype = STATIC_WORD;
+          break;
+        case  T_ARRAY: 
+        case  T_OBJECT:
+          fac->static_oop_count++;
+          atype = STATIC_OOP;
+          break;
+        case  T_ADDRESS: 
+        case  T_VOID:
+        default: 
+          assert(0, "bad field type");
+      }
+    } else {
+      switch ( type ) {
+        case  T_BOOLEAN:
+        case  T_BYTE:
+          fac->nonstatic_byte_count++;
+          atype = NONSTATIC_BYTE;
+          break;
+        case  T_LONG:
+        case  T_DOUBLE:
+          if (Universe::field_type_should_be_aligned(type)) {
+            atype = NONSTATIC_ALIGNED_DOUBLE;
+          } else {
+            atype = NONSTATIC_DOUBLE;
+          }
+          fac->nonstatic_double_count++;
+          break;
+        case  T_CHAR:     
+        case  T_SHORT: 
+          fac->nonstatic_short_count++;
+          atype = NONSTATIC_SHORT;
+          break;
+        case  T_FLOAT:
+        case  T_INT:
+          fac->nonstatic_word_count++;
+          atype = NONSTATIC_WORD;
+          break;
+        case  T_ARRAY: 
+        case  T_OBJECT:
+          fac->nonstatic_oop_count++;
+          atype = NONSTATIC_OOP;
+          break;
+        case  T_ADDRESS: 
+        case  T_VOID:
+        default: 
+          assert(0, "bad field type");
+      }
+    }
+
+    // The correct offset is computed later (all oop fields will be located together)
+    // We temporarily store the allocation type in the offset field
+    fields->short_at_put(index++, atype);
+    fields->short_at_put(index++, 0);  // Clear out high word of byte offset
+    fields->short_at_put(index++, generic_signature_index);
+  }
+
+  if (_need_verify && length > 1) {
+    // Check duplicated fields
+    ResourceMark rm(THREAD);
+    NameSigHash** names_and_sigs = NEW_RESOURCE_ARRAY_IN_THREAD(
+      THREAD, NameSigHash*, HASH_ROW_SIZE);
+    initialize_hashtable(names_and_sigs);
+    bool dup = false;
+    {
+      debug_only(No_Safepoint_Verifier nsv;)
+      for (int i = 0; i < length*instanceKlass::next_offset; i += instanceKlass::next_offset) {
+        int name_index = fields->ushort_at(i + instanceKlass::name_index_offset);
+        symbolOop name = cp->symbol_at(name_index);
+        int sig_index = fields->ushort_at(i + instanceKlass::signature_index_offset);
+        symbolOop sig = cp->symbol_at(sig_index);
+        // If no duplicates, add name/signature in hashtable names_and_sigs.
+        if (!put_after_lookup(name, sig, names_and_sigs)) {
+          dup = true;
+          break;
+        }
+      }
+    }
+    if (dup) {
+      classfile_parse_error("Duplicate field name&signature in class file %s",
+                            CHECK_(nullHandle));
+    }
+  }
+
+  return fields;
+}
+
+
+static void copy_u2_with_conversion(u2* dest, u2* src, int length) {
+  while (length-- > 0) {
+    *dest++ = Bytes::get_Java_u2((u1*) (src++));
+  }
+}
+
+
+typeArrayHandle ClassFileParser::parse_exception_table(u4 code_length, 
+                                                       u4 exception_table_length, 
+                                                       constantPoolHandle cp, 
+                                                       TRAPS) {
+  ClassFileStream* cfs = stream();
+  typeArrayHandle nullHandle;
+
+  // 4-tuples of ints [start_pc, end_pc, handler_pc, catch_type index]
+  typeArrayOop eh = oopFactory::new_permanent_intArray(exception_table_length*4, CHECK_(nullHandle));
+  typeArrayHandle exception_handlers = typeArrayHandle(THREAD, eh);
+  
+  int index = 0;
+  cfs->guarantee_more(8 * exception_table_length, CHECK_(nullHandle)); // start_pc, end_pc, handler_pc, catch_type_index
+  for (unsigned int i = 0; i < exception_table_length; i++) {
+    u2 start_pc = cfs->get_u2_fast();
+    u2 end_pc = cfs->get_u2_fast();
+    u2 handler_pc = cfs->get_u2_fast();
+    u2 catch_type_index = cfs->get_u2_fast();
+    // Will check legal target after parsing code array in verifier.
+    if (_need_verify) {
+      guarantee_property((start_pc < end_pc) && (end_pc <= code_length), 
+                         "Illegal exception table range in class file %s", CHECK_(nullHandle)); 
+      guarantee_property(handler_pc < code_length, 
+                         "Illegal exception table handler in class file %s", CHECK_(nullHandle)); 
+      if (catch_type_index != 0) {
+        guarantee_property(valid_cp_range(catch_type_index, cp->length()) && 
+                          (cp->tag_at(catch_type_index).is_klass() || 
+                           cp->tag_at(catch_type_index).is_unresolved_klass()),
+                           "Catch type in exception table has bad constant type in class file %s", CHECK_(nullHandle));
+      }
+    }	      
+    exception_handlers->int_at_put(index++, start_pc); 
+    exception_handlers->int_at_put(index++, end_pc);  
+    exception_handlers->int_at_put(index++, handler_pc);  
+    exception_handlers->int_at_put(index++, catch_type_index);  
+  }
+  return exception_handlers;
+}
+
+u_char* ClassFileParser::parse_linenumber_table(u4 code_attribute_length, 
+                                                u4 code_length,
+                                                int* compressed_linenumber_table_size, 
+                                                TRAPS) {
+  ClassFileStream* cfs = stream();
+  cfs->guarantee_more(2, CHECK_NULL);  // linenumber_table_length
+  unsigned int linenumber_table_length = cfs->get_u2_fast();
+
+  // Verify line number attribute and table length
+  if (_need_verify) {
+    guarantee_property(code_attribute_length ==
+                       (sizeof(u2) /* linenumber table length */ +
+                        linenumber_table_length*(sizeof(u2) /* start_pc */ +
+                        sizeof(u2) /* line_number */)),
+                       "LineNumberTable attribute has wrong length in class file %s", CHECK_NULL);
+  }          
+  
+  u_char* compressed_linenumber_table = NULL;
+  if (linenumber_table_length > 0) {
+    // initial_size large enough
+    int initial_size = linenumber_table_length * sizeof(u2) * 2;
+    CompressedLineNumberWriteStream c_stream =
+      (initial_size <= fixed_buffer_size) ? 
+      CompressedLineNumberWriteStream(_fixed_buffer, fixed_buffer_size) :
+      CompressedLineNumberWriteStream(initial_size);
+    cfs->guarantee_more(4 * linenumber_table_length, CHECK_NULL);  // bci, line
+    while (linenumber_table_length-- > 0) {
+      u2 bci  = cfs->get_u2_fast(); // start_pc
+      u2 line = cfs->get_u2_fast(); // line_number
+      guarantee_property(bci < code_length,
+                         "Invalid pc in LineNumberTable in class file %s", CHECK_NULL);
+      c_stream.write_pair(bci, line);
+    }
+    c_stream.write_terminator();
+    *compressed_linenumber_table_size = c_stream.position();
+    compressed_linenumber_table = c_stream.buffer();
+  }
+  return compressed_linenumber_table;
+}
+
+
+// Class file LocalVariableTable elements.
+class Classfile_LVT_Element VALUE_OBJ_CLASS_SPEC {
+ public:
+  u2 start_bci;
+  u2 length;
+  u2 name_cp_index;
+  u2 descriptor_cp_index;
+  u2 slot;
+};
+
+
+class LVT_Hash: public CHeapObj {
+ public:
+  LocalVariableTableElement  *_elem;  // element
+  LVT_Hash*                   _next;  // Next entry in hash table
+};
+
+unsigned int hash(LocalVariableTableElement *elem) {
+  unsigned int raw_hash = elem->start_bci;
+
+  raw_hash = elem->length        + raw_hash * 37;
+  raw_hash = elem->name_cp_index + raw_hash * 37;
+  raw_hash = elem->slot          + raw_hash * 37;
+
+  return raw_hash % HASH_ROW_SIZE;
+}
+
+void initialize_hashtable(LVT_Hash** table) {
+  for (int i = 0; i < HASH_ROW_SIZE; i++) {
+    table[i] = NULL;
+  }
+}
+
+void clear_hashtable(LVT_Hash** table) {
+  for (int i = 0; i < HASH_ROW_SIZE; i++) {
+    LVT_Hash* current = table[i];
+    LVT_Hash* next;
+    while (current != NULL) {
+      next = current->_next;
+      current->_next = NULL;
+      delete(current);
+      current = next;
+    }
+    table[i] = NULL;
+  }
+}
+
+LVT_Hash* LVT_lookup(LocalVariableTableElement *elem, int index, LVT_Hash** table) {
+  LVT_Hash* entry = table[index];
+
+  /*
+   * 3-tuple start_bci/length/slot has to be unique key,
+   * so the following comparison seems to be redundant:
+   *       && elem->name_cp_index == entry->_elem->name_cp_index
+   */
+  while (entry != NULL) {
+    if (elem->start_bci           == entry->_elem->start_bci
+     && elem->length              == entry->_elem->length 
+     && elem->name_cp_index       == entry->_elem->name_cp_index
+     && elem->slot                == entry->_elem->slot
+    ) {
+      return entry;
+    }
+    entry = entry->_next;
+  }
+  return NULL;
+}
+
+// Return false if the local variable is found in table.
+// Return true if no duplicate is found.
+// And local variable is added as a new entry in table.
+bool LVT_put_after_lookup(LocalVariableTableElement *elem, LVT_Hash** table) {
+  // First lookup for duplicates
+  int index = hash(elem);
+  LVT_Hash* entry = LVT_lookup(elem, index, table);
+
+  if (entry != NULL) {
+      return false;
+  }
+  // No duplicate is found, allocate a new entry and fill it.
+  if ((entry = new LVT_Hash()) == NULL) {
+    return false;
+  }
+  entry->_elem = elem;
+ 
+  // Insert into hash table
+  entry->_next = table[index];
+  table[index] = entry;
+
+  return true;
+}
+
+void copy_lvt_element(Classfile_LVT_Element *src, LocalVariableTableElement *lvt) {
+  lvt->start_bci           = Bytes::get_Java_u2((u1*) &src->start_bci);
+  lvt->length              = Bytes::get_Java_u2((u1*) &src->length);
+  lvt->name_cp_index       = Bytes::get_Java_u2((u1*) &src->name_cp_index);
+  lvt->descriptor_cp_index = Bytes::get_Java_u2((u1*) &src->descriptor_cp_index);
+  lvt->signature_cp_index  = 0;
+  lvt->slot                = Bytes::get_Java_u2((u1*) &src->slot);
+}
+
+// Function is used to parse both attributes:
+//       LocalVariableTable (LVT) and LocalVariableTypeTable (LVTT)
+u2* ClassFileParser::parse_localvariable_table(u4 code_length,
+                                               u2 max_locals,
+                                               u4 code_attribute_length,
+                                               constantPoolHandle cp,
+                                               u2* localvariable_table_length,
+                                               bool isLVTT,
+                                               TRAPS) {
+  ClassFileStream* cfs = stream();
+  const char * tbl_name = (isLVTT) ? "LocalVariableTypeTable" : "LocalVariableTable";
+  *localvariable_table_length = cfs->get_u2(CHECK_NULL);
+  unsigned int size = (*localvariable_table_length) * sizeof(Classfile_LVT_Element) / sizeof(u2);
+  // Verify local variable table attribute has right length
+  if (_need_verify) {
+    guarantee_property(code_attribute_length == (sizeof(*localvariable_table_length) + size * sizeof(u2)),
+                       "%s has wrong length in class file %s", tbl_name, CHECK_NULL);
+  }
+  u2* localvariable_table_start = cfs->get_u2_buffer();
+  assert(localvariable_table_start != NULL, "null local variable table");
+  if (!_need_verify) { 
+    cfs->skip_u2_fast(size);
+  } else {
+    cfs->guarantee_more(size * 2, CHECK_NULL);
+    for(int i = 0; i < (*localvariable_table_length); i++) {
+      u2 start_pc = cfs->get_u2_fast();
+      u2 length = cfs->get_u2_fast();
+      u2 name_index = cfs->get_u2_fast();
+      u2 descriptor_index = cfs->get_u2_fast();
+      u2 index = cfs->get_u2_fast();
+      // Assign to a u4 to avoid overflow
+      u4 end_pc = (u4)start_pc + (u4)length;
+
+      if (start_pc >= code_length) {
+        classfile_parse_error(
+          "Invalid start_pc %u in %s in class file %s", 
+          start_pc, tbl_name, CHECK_NULL);
+      }
+      if (end_pc > code_length) {
+        classfile_parse_error(
+          "Invalid length %u in %s in class file %s", 
+          length, tbl_name, CHECK_NULL);
+      }
+      int cp_size = cp->length();
+      guarantee_property(
+        valid_cp_range(name_index, cp_size) && 
+          cp->tag_at(name_index).is_utf8(),
+        "Name index %u in %s has bad constant type in class file %s",
+        name_index, tbl_name, CHECK_NULL);
+      guarantee_property(
+        valid_cp_range(descriptor_index, cp_size) &&
+          cp->tag_at(descriptor_index).is_utf8(),
+        "Signature index %u in %s has bad constant type in class file %s",
+        descriptor_index, tbl_name, CHECK_NULL);
+
+      symbolHandle name(THREAD, cp->symbol_at(name_index));
+      symbolHandle sig(THREAD, cp->symbol_at(descriptor_index));
+      verify_legal_field_name(name, CHECK_NULL);
+      u2 extra_slot = 0;
+      if (!isLVTT) {
+        verify_legal_field_signature(name, sig, CHECK_NULL);
+
+        // 4894874: check special cases for double and long local variables
+        if (sig() == vmSymbols::type_signature(T_DOUBLE) || 
+            sig() == vmSymbols::type_signature(T_LONG)) {
+          extra_slot = 1;
+        }
+      }
+      guarantee_property((index + extra_slot) < max_locals,
+                          "Invalid index %u in %s in class file %s",
+                          index, tbl_name, CHECK_NULL);
+    }
+  }
+  return localvariable_table_start;
+}
+
+
+void ClassFileParser::parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
+                                      u1* u1_array, u2* u2_array, constantPoolHandle cp, TRAPS) {
+  ClassFileStream* cfs = stream();
+  u2 index = 0; // index in the array with long/double occupying two slots
+  u4 i1 = *u1_index;
+  u4 i2 = *u2_index + 1;  
+  for(int i = 0; i < array_length; i++) {
+    u1 tag = u1_array[i1++] = cfs->get_u1(CHECK);
+    index++;
+    if (tag == ITEM_Long || tag == ITEM_Double) {
+      index++; 
+    } else if (tag == ITEM_Object) {
+      u2 class_index = u2_array[i2++] = cfs->get_u2(CHECK);
+      guarantee_property(valid_cp_range(class_index, cp->length()) &&
+                         cp->tag_at(class_index).is_unresolved_klass(), 
+                         "Bad class index %u in StackMap in class file %s", 
+                         class_index, CHECK);
+    } else if (tag == ITEM_Uninitialized) {
+      u2 offset = u2_array[i2++] = cfs->get_u2(CHECK);
+      guarantee_property(
+        offset < code_length, 
+        "Bad uninitialized type offset %u in StackMap in class file %s", 
+        offset, CHECK);
+    } else {
+      guarantee_property(
+        tag <= (u1)ITEM_Uninitialized,
+        "Unknown variable type %u in StackMap in class file %s", 
+        tag, CHECK);
+    }
+  }
+  u2_array[*u2_index] = index; 
+  *u1_index = i1;
+  *u2_index = i2;
+}
+
+typeArrayOop ClassFileParser::parse_stackmap_table(
+    u4 code_attribute_length, TRAPS) {
+  if (code_attribute_length == 0) 
+    return NULL;
+  
+  ClassFileStream* cfs = stream();
+  u1* stackmap_table_start = cfs->get_u1_buffer();
+  assert(stackmap_table_start != NULL, "null stackmap table");
+
+  // check code_attribute_length first
+  stream()->skip_u1(code_attribute_length, CHECK_NULL);
+
+  if (!_need_verify && !DumpSharedSpaces) {
+    return NULL;
+  }
+
+  typeArrayOop stackmap_data = 
+    oopFactory::new_permanent_byteArray(code_attribute_length, CHECK_NULL);
+
+  stackmap_data->set_length(code_attribute_length);
+  memcpy((void*)stackmap_data->byte_at_addr(0), 
+         (void*)stackmap_table_start, code_attribute_length);
+  return stackmap_data;
+}
+
+u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length, 
+                                              u4 method_attribute_length,
+                                              constantPoolHandle cp, TRAPS) {
+  ClassFileStream* cfs = stream();
+  cfs->guarantee_more(2, CHECK_NULL);  // checked_exceptions_length
+  *checked_exceptions_length = cfs->get_u2_fast();
+  unsigned int size = (*checked_exceptions_length) * sizeof(CheckedExceptionElement) / sizeof(u2);
+  u2* checked_exceptions_start = cfs->get_u2_buffer();
+  assert(checked_exceptions_start != NULL, "null checked exceptions");
+  if (!_need_verify) { 
+    cfs->skip_u2_fast(size);
+  } else {
+    // Verify each value in the checked exception table
+    u2 checked_exception;
+    u2 len = *checked_exceptions_length;
+    cfs->guarantee_more(2 * len, CHECK_NULL);
+    for (int i = 0; i < len; i++) {
+      checked_exception = cfs->get_u2_fast();
+      check_property(
+        valid_cp_range(checked_exception, cp->length()) &&
+        cp->tag_at(checked_exception).is_klass_reference(), 
+        "Exception name has bad type at constant pool %u in class file %s", 
+        checked_exception, CHECK_NULL);
+    }
+  }
+  // check exceptions attribute length
+  if (_need_verify) {
+    guarantee_property(method_attribute_length == (sizeof(*checked_exceptions_length) +
+                                                   sizeof(u2) * size),
+                      "Exceptions attribute has wrong length in class file %s", CHECK_NULL);
+  }
+  return checked_exceptions_start;
+}
+
+
+#define MAX_ARGS_SIZE 255
+#define MAX_CODE_SIZE 65535
+#define INITIAL_MAX_LVT_NUMBER 256
+
+// Note: the parse_method below is big and clunky because all parsing of the code and exceptions
+// attribute is inlined. This is curbersome to avoid since we inline most of the parts in the
+// methodOop to save footprint, so we only know the size of the resulting methodOop when the
+// entire method attribute is parsed.
+//
+// The promoted_flags parameter is used to pass relevant access_flags
+// from the method back up to the containing klass. These flag values
+// are added to klass's access_flags.
+
+methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interface,
+                                           AccessFlags *promoted_flags,
+                                           typeArrayHandle* method_annotations,
+                                           typeArrayHandle* method_parameter_annotations,
+                                           typeArrayHandle* method_default_annotations,
+                                           TRAPS) {
+  ClassFileStream* cfs = stream();
+  methodHandle nullHandle;
+  ResourceMark rm(THREAD);
+  // Parse fixed parts
+  cfs->guarantee_more(8, CHECK_(nullHandle)); // access_flags, name_index, descriptor_index, attributes_count
+
+  int flags = cfs->get_u2_fast();
+  u2 name_index = cfs->get_u2_fast();
+  int cp_size = cp->length();
+  check_property(
+    valid_cp_range(name_index, cp_size) && 
+      cp->tag_at(name_index).is_utf8(), 
+    "Illegal constant pool index %u for method name in class file %s", 
+    name_index, CHECK_(nullHandle));
+  symbolHandle name(THREAD, cp->symbol_at(name_index));
+  verify_legal_method_name(name, CHECK_(nullHandle));  
+
+  u2 signature_index = cfs->get_u2_fast();
+  check_property(
+    valid_cp_range(signature_index, cp_size) &&
+      cp->tag_at(signature_index).is_utf8(), 
+    "Illegal constant pool index %u for method signature in class file %s", 
+    signature_index, CHECK_(nullHandle));
+  symbolHandle signature(THREAD, cp->symbol_at(signature_index));
+
+  AccessFlags access_flags;  
+  if (name == vmSymbols::class_initializer_name()) {
+    // We ignore the access flags for a class initializer. (JVM Spec. p. 116)
+    flags = JVM_ACC_STATIC;
+  } else {
+    verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
+  }
+
+  int args_size = -1;  // only used when _need_verify is true
+  if (_need_verify) {
+    args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) + 
+                 verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+    if (args_size > MAX_ARGS_SIZE) {
+      classfile_parse_error("Too many arguments in method signature in class file %s", CHECK_(nullHandle));
+    }
+  }
+        
+  access_flags.set_flags(flags & JVM_RECOGNIZED_METHOD_MODIFIERS);
+  
+  // Default values for code and exceptions attribute elements
+  u2 max_stack = 0;
+  u2 max_locals = 0;
+  u4 code_length = 0;
+  u1* code_start = 0;
+  u2 exception_table_length = 0;
+  typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array());
+  u2 checked_exceptions_length = 0;
+  u2* checked_exceptions_start = NULL;
+  int compressed_linenumber_table_size = 0;
+  u_char* compressed_linenumber_table = NULL;
+  int total_lvt_length = 0;
+  u2 lvt_cnt = 0;
+  u2 lvtt_cnt = 0;
+  bool lvt_allocated = false;
+  u2 max_lvt_cnt = INITIAL_MAX_LVT_NUMBER;
+  u2 max_lvtt_cnt = INITIAL_MAX_LVT_NUMBER;
+  u2* localvariable_table_length;
+  u2** localvariable_table_start;
+  u2* localvariable_type_table_length;
+  u2** localvariable_type_table_start;
+  bool parsed_code_attribute = false;
+  bool parsed_checked_exceptions_attribute = false;
+  bool parsed_stackmap_attribute = false;
+  // stackmap attribute - JDK1.5
+  typeArrayHandle stackmap_data;
+  u2 generic_signature_index = 0;
+  u1* runtime_visible_annotations = NULL;
+  int runtime_visible_annotations_length = 0;
+  u1* runtime_invisible_annotations = NULL;
+  int runtime_invisible_annotations_length = 0;
+  u1* runtime_visible_parameter_annotations = NULL;
+  int runtime_visible_parameter_annotations_length = 0;
+  u1* runtime_invisible_parameter_annotations = NULL;
+  int runtime_invisible_parameter_annotations_length = 0;
+  u1* annotation_default = NULL;
+  int annotation_default_length = 0;
+
+  // Parse code and exceptions attribute
+  u2 method_attributes_count = cfs->get_u2_fast();
+  while (method_attributes_count--) {   
+    cfs->guarantee_more(6, CHECK_(nullHandle));  // method_attribute_name_index, method_attribute_length
+    u2 method_attribute_name_index = cfs->get_u2_fast();
+    u4 method_attribute_length = cfs->get_u4_fast();
+    check_property(
+      valid_cp_range(method_attribute_name_index, cp_size) &&
+        cp->tag_at(method_attribute_name_index).is_utf8(), 
+      "Invalid method attribute name index %u in class file %s", 
+      method_attribute_name_index, CHECK_(nullHandle));
+
+    symbolOop method_attribute_name = cp->symbol_at(method_attribute_name_index);
+    if (method_attribute_name == vmSymbols::tag_code()) {
+      // Parse Code attribute
+      if (_need_verify) {
+        guarantee_property(!access_flags.is_native() && !access_flags.is_abstract(), 
+                        "Code attribute in native or abstract methods in class file %s", 
+                         CHECK_(nullHandle));
+      }
+      if (parsed_code_attribute) {
+        classfile_parse_error("Multiple Code attributes in class file %s", CHECK_(nullHandle));
+      }
+      parsed_code_attribute = true;
+
+      // Stack size, locals size, and code size
+      if (_major_version == 45 && _minor_version <= 2) {
+        cfs->guarantee_more(4, CHECK_(nullHandle));
+        max_stack = cfs->get_u1_fast();
+        max_locals = cfs->get_u1_fast();
+        code_length = cfs->get_u2_fast();
+      } else {
+        cfs->guarantee_more(8, CHECK_(nullHandle));
+        max_stack = cfs->get_u2_fast();
+        max_locals = cfs->get_u2_fast();
+        code_length = cfs->get_u4_fast();
+      }
+      if (_need_verify) {
+        guarantee_property(args_size <= max_locals, 
+                           "Arguments can't fit into locals in class file %s", CHECK_(nullHandle));
+        guarantee_property(code_length > 0 && code_length <= MAX_CODE_SIZE, 
+                           "Invalid method Code length %u in class file %s", 
+                           code_length, CHECK_(nullHandle));
+      }
+      // Code pointer
+      code_start = cfs->get_u1_buffer();
+      assert(code_start != NULL, "null code start");
+      cfs->guarantee_more(code_length, CHECK_(nullHandle));
+      cfs->skip_u1_fast(code_length);
+
+      // Exception handler table
+      cfs->guarantee_more(2, CHECK_(nullHandle));  // exception_table_length
+      exception_table_length = cfs->get_u2_fast();
+      if (exception_table_length > 0) {
+        exception_handlers = 
+              parse_exception_table(code_length, exception_table_length, cp, CHECK_(nullHandle));
+      }
+
+      // Parse additional attributes in code attribute
+      cfs->guarantee_more(2, CHECK_(nullHandle));  // code_attributes_count
+      u2 code_attributes_count = cfs->get_u2_fast();
+      unsigned int calculated_attribute_length = sizeof(max_stack) + 
+                                                 sizeof(max_locals) + 
+                                                 sizeof(code_length) +
+                                                 code_length + 
+                                                 sizeof(exception_table_length) +
+                                                 sizeof(code_attributes_count) +
+                                                 exception_table_length*(sizeof(u2) /* start_pc */+
+                                                                         sizeof(u2) /* end_pc */  +
+                                                                         sizeof(u2) /* handler_pc */ +
+                                                                         sizeof(u2) /* catch_type_index */);
+
+      while (code_attributes_count--) {
+        cfs->guarantee_more(6, CHECK_(nullHandle));  // code_attribute_name_index, code_attribute_length
+        u2 code_attribute_name_index = cfs->get_u2_fast();
+        u4 code_attribute_length = cfs->get_u4_fast();
+        calculated_attribute_length += code_attribute_length + 
+                                       sizeof(code_attribute_name_index) +
+                                       sizeof(code_attribute_length);
+        check_property(valid_cp_range(code_attribute_name_index, cp_size) &&
+                       cp->tag_at(code_attribute_name_index).is_utf8(), 
+                       "Invalid code attribute name index %u in class file %s", 
+                       code_attribute_name_index,
+                       CHECK_(nullHandle));
+        if (LoadLineNumberTables && 
+            cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) {
+          // Parse and compress line number table
+          compressed_linenumber_table = parse_linenumber_table(code_attribute_length, 
+                                                               code_length,
+                                                               &compressed_linenumber_table_size, 
+                                                               CHECK_(nullHandle));
+                                         
+        } else if (LoadLocalVariableTables && 
+                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) {
+          // Parse local variable table
+          if (!lvt_allocated) {
+            localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
+            localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+            localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
+            localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+            lvt_allocated = true;
+          }
+          if (lvt_cnt == max_lvt_cnt) {
+            max_lvt_cnt <<= 1;
+            REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt);
+            REALLOC_RESOURCE_ARRAY(u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt);
+          }
+          localvariable_table_start[lvt_cnt] =
+            parse_localvariable_table(code_length,
+                                      max_locals,
+                                      code_attribute_length,
+                                      cp,
+                                      &localvariable_table_length[lvt_cnt],
+                                      false,	// is not LVTT
+                                      CHECK_(nullHandle));
+          total_lvt_length += localvariable_table_length[lvt_cnt];
+          lvt_cnt++;
+        } else if (LoadLocalVariableTypeTables && 
+                   _major_version >= JAVA_1_5_VERSION &&
+                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) {
+          if (!lvt_allocated) {
+            localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
+            localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+            localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
+            localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+            lvt_allocated = true;
+          }
+          // Parse local variable type table
+          if (lvtt_cnt == max_lvtt_cnt) {
+            max_lvtt_cnt <<= 1;
+            REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt);
+            REALLOC_RESOURCE_ARRAY(u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt);
+          }
+          localvariable_type_table_start[lvtt_cnt] =
+            parse_localvariable_table(code_length,
+                                      max_locals,
+                                      code_attribute_length,
+                                      cp,
+                                      &localvariable_type_table_length[lvtt_cnt],
+                                      true,	// is LVTT
+                                      CHECK_(nullHandle));
+          lvtt_cnt++;
+        } else if (UseSplitVerifier &&
+                   _major_version >= Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION &&
+                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_stack_map_table()) {
+          // Stack map is only needed by the new verifier in JDK1.5.
+          if (parsed_stackmap_attribute) {
+            classfile_parse_error("Multiple StackMapTable attributes in class file %s", CHECK_(nullHandle));
+          }
+          typeArrayOop sm = 
+            parse_stackmap_table(code_attribute_length, CHECK_(nullHandle));
+          stackmap_data = typeArrayHandle(THREAD, sm);
+          parsed_stackmap_attribute = true;
+        } else {
+          // Skip unknown attributes
+          cfs->skip_u1(code_attribute_length, CHECK_(nullHandle));
+        }
+      }
+      // check method attribute length
+      if (_need_verify) {
+        guarantee_property(method_attribute_length == calculated_attribute_length,
+                           "Code segment has wrong length in class file %s", CHECK_(nullHandle));
+      }
+    } else if (method_attribute_name == vmSymbols::tag_exceptions()) {
+      // Parse Exceptions attribute
+      if (parsed_checked_exceptions_attribute) {
+        classfile_parse_error("Multiple Exceptions attributes in class file %s", CHECK_(nullHandle));
+      }
+      parsed_checked_exceptions_attribute = true;
+      checked_exceptions_start =
+            parse_checked_exceptions(&checked_exceptions_length, 
+                                     method_attribute_length, 
+                                     cp, CHECK_(nullHandle));
+    } else if (method_attribute_name == vmSymbols::tag_synthetic()) {
+      if (method_attribute_length != 0) {
+        classfile_parse_error(
+          "Invalid Synthetic method attribute length %u in class file %s", 
+          method_attribute_length, CHECK_(nullHandle));
+      }
+      // Should we check that there hasn't already been a synthetic attribute?
+      access_flags.set_is_synthetic();
+    } else if (method_attribute_name == vmSymbols::tag_deprecated()) { // 4276120
+      if (method_attribute_length != 0) {
+        classfile_parse_error(
+          "Invalid Deprecated method attribute length %u in class file %s", 
+          method_attribute_length, CHECK_(nullHandle));
+      }
+    } else if (_major_version >= JAVA_1_5_VERSION) {
+      if (method_attribute_name == vmSymbols::tag_signature()) {
+        if (method_attribute_length != 2) {
+          classfile_parse_error(
+            "Invalid Signature attribute length %u in class file %s", 
+            method_attribute_length, CHECK_(nullHandle));
+        }
+        cfs->guarantee_more(2, CHECK_(nullHandle));  // generic_signature_index
+        generic_signature_index = cfs->get_u2_fast();
+      } else if (method_attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
+        runtime_visible_annotations_length = method_attribute_length;
+        runtime_visible_annotations = cfs->get_u1_buffer();
+        assert(runtime_visible_annotations != NULL, "null visible annotations");
+        cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
+      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
+        runtime_invisible_annotations_length = method_attribute_length;
+        runtime_invisible_annotations = cfs->get_u1_buffer();
+        assert(runtime_invisible_annotations != NULL, "null invisible annotations");
+        cfs->skip_u1(runtime_invisible_annotations_length, CHECK_(nullHandle));
+      } else if (method_attribute_name == vmSymbols::tag_runtime_visible_parameter_annotations()) {
+        runtime_visible_parameter_annotations_length = method_attribute_length;
+        runtime_visible_parameter_annotations = cfs->get_u1_buffer();
+        assert(runtime_visible_parameter_annotations != NULL, "null visible parameter annotations");
+        cfs->skip_u1(runtime_visible_parameter_annotations_length, CHECK_(nullHandle));
+      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_parameter_annotations()) {
+        runtime_invisible_parameter_annotations_length = method_attribute_length;
+        runtime_invisible_parameter_annotations = cfs->get_u1_buffer();
+        assert(runtime_invisible_parameter_annotations != NULL, "null invisible parameter annotations");
+        cfs->skip_u1(runtime_invisible_parameter_annotations_length, CHECK_(nullHandle));
+      } else if (method_attribute_name == vmSymbols::tag_annotation_default()) {
+        annotation_default_length = method_attribute_length;
+        annotation_default = cfs->get_u1_buffer();
+        assert(annotation_default != NULL, "null annotation default");
+        cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
+      } else {
+        // Skip unknown attributes
+        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+      }
+    } else {
+      // Skip unknown attributes
+      cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+    }      
+  }
+  // Make sure there's at least one Code attribute in non-native/non-abstract method
+  if (_need_verify) {
+    guarantee_property(access_flags.is_native() || access_flags.is_abstract() || parsed_code_attribute,
+                      "Absent Code attribute in method that is not native or abstract in class file %s", CHECK_(nullHandle));
+  }
+
+  // All sizing information for a methodOop is finally available, now create it
+  methodOop m_oop  = oopFactory::new_method(code_length, access_flags,
+                               compressed_linenumber_table_size, 
+                               total_lvt_length, 
+                               checked_exceptions_length, 
+                               CHECK_(nullHandle));
+  methodHandle m (THREAD, m_oop);
+
+  ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize);
+
+  // Fill in information from fixed part (access_flags already set)
+  m->set_constants(cp());
+  m->set_name_index(name_index);
+  m->set_signature_index(signature_index);
+  m->set_generic_signature_index(generic_signature_index);
+#ifdef CC_INTERP
+  // hmm is there a gc issue here??
+  ResultTypeFinder rtf(cp->symbol_at(signature_index));
+  m->set_result_index(rtf.type());
+#endif
+
+  if (args_size >= 0) {
+    m->set_size_of_parameters(args_size);
+  } else { 
+    m->compute_size_of_parameters(THREAD);
+  }
+#ifdef ASSERT
+  if (args_size >= 0) {
+    m->compute_size_of_parameters(THREAD);
+    assert(args_size == m->size_of_parameters(), "");
+  }
+#endif
+
+  // Fill in code attribute information
+  m->set_max_stack(max_stack);
+  m->set_max_locals(max_locals);
+  m->constMethod()->set_stackmap_data(stackmap_data());
+
+  /**
+   * The exception_table field is the flag used to indicate
+   * that the methodOop and it's associated constMethodOop are partially 
+   * initialized and thus are exempt from pre/post GC verification.  Once 
+   * the field is set, the oops are considered fully initialized so make 
+   * sure that the oops can pass verification when this field is set. 
+   */
+  m->set_exception_table(exception_handlers());
+
+  // Copy byte codes
+  if (code_length > 0) {
+    memcpy(m->code_base(), code_start, code_length);
+  }
+  // Copy line number table
+  if (compressed_linenumber_table_size > 0) {
+    memcpy(m->compressed_linenumber_table(), compressed_linenumber_table, compressed_linenumber_table_size);
+  }
+  // Copy checked exceptions
+  if (checked_exceptions_length > 0) {
+    int size = checked_exceptions_length * sizeof(CheckedExceptionElement) / sizeof(u2);
+    copy_u2_with_conversion((u2*) m->checked_exceptions_start(), checked_exceptions_start, size);
+  }
+
+  /* Copy class file LVT's/LVTT's into the HotSpot internal LVT.
+   *
+   * Rules for LVT's and LVTT's are:
+   *   - There can be any number of LVT's and LVTT's.
+   *   - If there are n LVT's, it is the same as if there was just
+   *     one LVT containing all the entries from the n LVT's.
+   *   - There may be no more than one LVT entry per local variable.
+   *     Two LVT entries are 'equal' if these fields are the same:
+   *        start_pc, length, name, slot
+   *   - There may be no more than one LVTT entry per each LVT entry.
+   *     Each LVTT entry has to match some LVT entry.
+   *   - HotSpot internal LVT keeps natural ordering of class file LVT entries.
+   */
+  if (total_lvt_length > 0) {  
+    int tbl_no, idx;
+
+    promoted_flags->set_has_localvariable_table();
+
+    LVT_Hash** lvt_Hash = NEW_RESOURCE_ARRAY(LVT_Hash*, HASH_ROW_SIZE);
+    initialize_hashtable(lvt_Hash);
+
+    // To fill LocalVariableTable in
+    Classfile_LVT_Element*  cf_lvt;
+    LocalVariableTableElement* lvt = m->localvariable_table_start();
+
+    for (tbl_no = 0; tbl_no < lvt_cnt; tbl_no++) {
+      cf_lvt = (Classfile_LVT_Element *) localvariable_table_start[tbl_no];
+      for (idx = 0; idx < localvariable_table_length[tbl_no]; idx++, lvt++) {
+        copy_lvt_element(&cf_lvt[idx], lvt);
+        // If no duplicates, add LVT elem in hashtable lvt_Hash.
+        if (LVT_put_after_lookup(lvt, lvt_Hash) == false 
+          && _need_verify 
+          && _major_version >= JAVA_1_5_VERSION ) {
+          clear_hashtable(lvt_Hash);
+          classfile_parse_error("Duplicated LocalVariableTable attribute "
+                                "entry for '%s' in class file %s",
+                                 cp->symbol_at(lvt->name_cp_index)->as_utf8(),
+                                 CHECK_(nullHandle));
+        }
+      }
+    }
+
+    // To merge LocalVariableTable and LocalVariableTypeTable
+    Classfile_LVT_Element* cf_lvtt;
+    LocalVariableTableElement lvtt_elem;
+
+    for (tbl_no = 0; tbl_no < lvtt_cnt; tbl_no++) {
+      cf_lvtt = (Classfile_LVT_Element *) localvariable_type_table_start[tbl_no];
+      for (idx = 0; idx < localvariable_type_table_length[tbl_no]; idx++) {
+        copy_lvt_element(&cf_lvtt[idx], &lvtt_elem);
+        int index = hash(&lvtt_elem);
+        LVT_Hash* entry = LVT_lookup(&lvtt_elem, index, lvt_Hash);
+        if (entry == NULL) {
+          if (_need_verify) {
+            clear_hashtable(lvt_Hash);
+            classfile_parse_error("LVTT entry for '%s' in class file %s "
+                                  "does not match any LVT entry",
+                                   cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
+                                   CHECK_(nullHandle));
+          }
+        } else if (entry->_elem->signature_cp_index != 0 && _need_verify) {
+          clear_hashtable(lvt_Hash);
+          classfile_parse_error("Duplicated LocalVariableTypeTable attribute "
+                                "entry for '%s' in class file %s",
+                                 cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
+                                 CHECK_(nullHandle));
+        } else {
+          // to add generic signatures into LocalVariableTable
+          entry->_elem->signature_cp_index = lvtt_elem.descriptor_cp_index;
+        }
+      }
+    }
+    clear_hashtable(lvt_Hash);
+  }
+
+  *method_annotations = assemble_annotations(runtime_visible_annotations,
+                                             runtime_visible_annotations_length,
+                                             runtime_invisible_annotations,
+                                             runtime_invisible_annotations_length,
+                                             CHECK_(nullHandle));
+  *method_parameter_annotations = assemble_annotations(runtime_visible_parameter_annotations,
+                                                       runtime_visible_parameter_annotations_length,
+                                                       runtime_invisible_parameter_annotations,
+                                                       runtime_invisible_parameter_annotations_length,
+                                                       CHECK_(nullHandle));
+  *method_default_annotations = assemble_annotations(annotation_default,
+                                                     annotation_default_length,
+                                                     NULL,
+                                                     0,
+                                                     CHECK_(nullHandle));
+
+  if (name() == vmSymbols::finalize_method_name() &&
+      signature() == vmSymbols::void_method_signature()) {
+    if (m->is_empty_method()) {
+      _has_empty_finalizer = true;
+    } else {
+      _has_finalizer = true;
+    }
+  }
+  if (name() == vmSymbols::object_initializer_name() &&
+      signature() == vmSymbols::void_method_signature() &&
+      m->is_vanilla_constructor()) {
+    _has_vanilla_constructor = true;
+  }
+
+  return m;
+}
+
+  
+// The promoted_flags parameter is used to pass relevant access_flags
+// from the methods back up to the containing klass. These flag values
+// are added to klass's access_flags.
+
+objArrayHandle ClassFileParser::parse_methods(constantPoolHandle cp, bool is_interface, 
+                                              AccessFlags* promoted_flags,
+                                              bool* has_final_method,
+                                              objArrayOop* methods_annotations_oop,
+                                              objArrayOop* methods_parameter_annotations_oop,
+                                              objArrayOop* methods_default_annotations_oop,
+                                              TRAPS) {
+  ClassFileStream* cfs = stream();
+  objArrayHandle nullHandle;
+  typeArrayHandle method_annotations;
+  typeArrayHandle method_parameter_annotations;
+  typeArrayHandle method_default_annotations;
+  cfs->guarantee_more(2, CHECK_(nullHandle));  // length
+  u2 length = cfs->get_u2_fast();
+  if (length == 0) {
+    return objArrayHandle(THREAD, Universe::the_empty_system_obj_array());
+  } else {
+    objArrayOop m = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
+    objArrayHandle methods(THREAD, m);
+    HandleMark hm(THREAD);
+    objArrayHandle methods_annotations;
+    objArrayHandle methods_parameter_annotations;
+    objArrayHandle methods_default_annotations;
+    for (int index = 0; index < length; index++) {
+      methodHandle method = parse_method(cp, is_interface, 
+                                         promoted_flags,
+                                         &method_annotations,
+                                         &method_parameter_annotations,
+                                         &method_default_annotations,
+                                         CHECK_(nullHandle));
+      if (method->is_final()) {
+        *has_final_method = true;
+      }
+      methods->obj_at_put(index, method());  
+      if (method_annotations.not_null()) {
+        if (methods_annotations.is_null()) {
+          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
+          methods_annotations = objArrayHandle(THREAD, md);
+        }
+        methods_annotations->obj_at_put(index, method_annotations());
+      }
+      if (method_parameter_annotations.not_null()) {
+        if (methods_parameter_annotations.is_null()) {
+          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
+          methods_parameter_annotations = objArrayHandle(THREAD, md);
+        }
+        methods_parameter_annotations->obj_at_put(index, method_parameter_annotations());
+      }
+      if (method_default_annotations.not_null()) {
+        if (methods_default_annotations.is_null()) {
+          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
+          methods_default_annotations = objArrayHandle(THREAD, md);
+        }
+        methods_default_annotations->obj_at_put(index, method_default_annotations());
+      }
+    }
+    if (_need_verify && length > 1) {
+      // Check duplicated methods
+      ResourceMark rm(THREAD);
+      NameSigHash** names_and_sigs = NEW_RESOURCE_ARRAY_IN_THREAD(
+        THREAD, NameSigHash*, HASH_ROW_SIZE);
+      initialize_hashtable(names_and_sigs);
+      bool dup = false;
+      {
+        debug_only(No_Safepoint_Verifier nsv;)
+        for (int i = 0; i < length; i++) {
+          methodOop m = (methodOop)methods->obj_at(i);
+          // If no duplicates, add name/signature in hashtable names_and_sigs.
+          if (!put_after_lookup(m->name(), m->signature(), names_and_sigs)) {
+            dup = true;
+            break;
+          }
+        }
+      }
+      if (dup) {
+        classfile_parse_error("Duplicate method name&signature in class file %s",
+                              CHECK_(nullHandle));
+      }
+    }
+
+    *methods_annotations_oop = methods_annotations();
+    *methods_parameter_annotations_oop = methods_parameter_annotations();
+    *methods_default_annotations_oop = methods_default_annotations();
+
+    return methods;
+  }
+}
+
+
+typeArrayHandle ClassFileParser::sort_methods(objArrayHandle methods,
+                                              objArrayHandle methods_annotations,
+                                              objArrayHandle methods_parameter_annotations,
+                                              objArrayHandle methods_default_annotations,
+                                              TRAPS) {
+  typeArrayHandle nullHandle;
+  int length = methods()->length();
+  // If JVMTI original method ordering is enabled we have to 
+  // remember the original class file ordering.
+  // We temporarily use the vtable_index field in the methodOop to store the
+  // class file index, so we can read in after calling qsort.
+  if (JvmtiExport::can_maintain_original_method_order()) {
+    for (int index = 0; index < length; index++) {
+      methodOop m = methodOop(methods->obj_at(index));
+      assert(!m->valid_vtable_index(), "vtable index should not be set");
+      m->set_vtable_index(index);
+    }
+  }
+  // Sort method array by ascending method name (for faster lookups & vtable construction)
+  // Note that the ordering is not alphabetical, see symbolOopDesc::fast_compare
+  methodOopDesc::sort_methods(methods(),
+                              methods_annotations(),
+                              methods_parameter_annotations(),
+                              methods_default_annotations());
+
+  // If JVMTI original method ordering is enabled construct int array remembering the original ordering
+  if (JvmtiExport::can_maintain_original_method_order()) {
+    typeArrayOop new_ordering = oopFactory::new_permanent_intArray(length, CHECK_(nullHandle));
+    typeArrayHandle method_ordering(THREAD, new_ordering);
+    for (int index = 0; index < length; index++) {
+      methodOop m = methodOop(methods->obj_at(index));
+      int old_index = m->vtable_index();
+      assert(old_index >= 0 && old_index < length, "invalid method index");
+      method_ordering->int_at_put(index, old_index);
+      m->set_vtable_index(methodOopDesc::invalid_vtable_index);
+    }
+    return method_ordering;
+  } else {
+    return typeArrayHandle(THREAD, Universe::the_empty_int_array());
+  }
+}
+
+
+void ClassFileParser::parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
+  ClassFileStream* cfs = stream();
+  cfs->guarantee_more(2, CHECK);  // sourcefile_index
+  u2 sourcefile_index = cfs->get_u2_fast();
+  check_property(
+    valid_cp_range(sourcefile_index, cp->length()) &&
+      cp->tag_at(sourcefile_index).is_utf8(), 
+    "Invalid SourceFile attribute at constant pool index %u in class file %s", 
+    sourcefile_index, CHECK);  
+  k->set_source_file_name(cp->symbol_at(sourcefile_index));
+}
+
+
+
+void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, 
+                                                                       instanceKlassHandle k, 
+                                                                       int length, TRAPS) {
+  ClassFileStream* cfs = stream();
+  u1* sde_buffer = cfs->get_u1_buffer();
+  assert(sde_buffer != NULL, "null sde buffer");
+
+  // Don't bother storing it if there is no way to retrieve it
+  if (JvmtiExport::can_get_source_debug_extension()) {
+    // Optimistically assume that only 1 byte UTF format is used
+    // (common case)
+    symbolOop sde_symbol = oopFactory::new_symbol((char*)sde_buffer, 
+                                                  length, CHECK);
+    k->set_source_debug_extension(sde_symbol);
+  }
+  // Got utf8 string, set stream position forward
+  cfs->skip_u1(length, CHECK);
+}
+
+
+// Inner classes can be static, private or protected (classic VM does this)
+#define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
+
+// Return number of classes in the inner classes attribute table
+u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {  
+  ClassFileStream* cfs = stream();
+  cfs->guarantee_more(2, CHECK_0);  // length
+  u2 length = cfs->get_u2_fast();
+
+  // 4-tuples of shorts [inner_class_info_index, outer_class_info_index, inner_name_index, inner_class_access_flags]
+  typeArrayOop ic = oopFactory::new_permanent_shortArray(length*4, CHECK_0);  
+  typeArrayHandle inner_classes(THREAD, ic);
+  int index = 0;
+  int cp_size = cp->length();
+  cfs->guarantee_more(8 * length, CHECK_0);  // 4-tuples of u2
+  for (int n = 0; n < length; n++) {
+    // Inner class index
+    u2 inner_class_info_index = cfs->get_u2_fast();
+    check_property(
+      inner_class_info_index == 0 || 
+        (valid_cp_range(inner_class_info_index, cp_size) && 
+        cp->tag_at(inner_class_info_index).is_klass_reference()), 
+      "inner_class_info_index %u has bad constant type in class file %s", 
+      inner_class_info_index, CHECK_0);
+    // Outer class index
+    u2 outer_class_info_index = cfs->get_u2_fast();
+    check_property(
+      outer_class_info_index == 0 || 
+        (valid_cp_range(outer_class_info_index, cp_size) &&
+        cp->tag_at(outer_class_info_index).is_klass_reference()), 
+      "outer_class_info_index %u has bad constant type in class file %s", 
+      outer_class_info_index, CHECK_0);
+    // Inner class name
+    u2 inner_name_index = cfs->get_u2_fast();
+    check_property(
+      inner_name_index == 0 || (valid_cp_range(inner_name_index, cp_size) &&
+        cp->tag_at(inner_name_index).is_utf8()), 
+      "inner_name_index %u has bad constant type in class file %s", 
+      inner_name_index, CHECK_0);    
+    if (_need_verify) {
+      guarantee_property(inner_class_info_index != outer_class_info_index, 
+                         "Class is both outer and inner class in class file %s", CHECK_0);
+    }
+    // Access flags
+    AccessFlags inner_access_flags;
+    jint flags = cfs->get_u2_fast() & RECOGNIZED_INNER_CLASS_MODIFIERS;
+    if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) {
+      // Set abstract bit for old class files for backward compatibility
+      flags |= JVM_ACC_ABSTRACT;
+    }
+    verify_legal_class_modifiers(flags, CHECK_0);
+    inner_access_flags.set_flags(flags);
+
+    inner_classes->short_at_put(index++, inner_class_info_index);
+    inner_classes->short_at_put(index++, outer_class_info_index);
+    inner_classes->short_at_put(index++, inner_name_index);	
+    inner_classes->short_at_put(index++, inner_access_flags.as_short());
+  }
+
+  // 4347400: make sure there's no duplicate entry in the classes array
+  if (_need_verify && _major_version >= JAVA_1_5_VERSION) {
+    for(int i = 0; i < inner_classes->length(); i += 4) {
+      for(int j = i + 4; j < inner_classes->length(); j += 4) {
+        guarantee_property((inner_classes->ushort_at(i)   != inner_classes->ushort_at(j) ||
+                            inner_classes->ushort_at(i+1) != inner_classes->ushort_at(j+1) ||
+                            inner_classes->ushort_at(i+2) != inner_classes->ushort_at(j+2) ||
+                            inner_classes->ushort_at(i+3) != inner_classes->ushort_at(j+3)),
+                            "Duplicate entry in InnerClasses in class file %s",
+                            CHECK_0);
+      }
+    }  
+  }  
+
+  // Update instanceKlass with inner class info.  
+  k->set_inner_classes(inner_classes());
+  return length;  
+}
+
+void ClassFileParser::parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
+  k->set_is_synthetic();
+}
+
+void ClassFileParser::parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
+  ClassFileStream* cfs = stream();
+  u2 signature_index = cfs->get_u2(CHECK);
+  check_property(
+    valid_cp_range(signature_index, cp->length()) &&
+      cp->tag_at(signature_index).is_utf8(), 
+    "Invalid constant pool index %u in Signature attribute in class file %s", 
+    signature_index, CHECK);    
+  k->set_generic_signature(cp->symbol_at(signature_index));
+}
+
+void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
+  ClassFileStream* cfs = stream();
+  // Set inner classes attribute to default sentinel
+  k->set_inner_classes(Universe::the_empty_short_array());
+  cfs->guarantee_more(2, CHECK);  // attributes_count
+  u2 attributes_count = cfs->get_u2_fast();
+  bool parsed_sourcefile_attribute = false;
+  bool parsed_innerclasses_attribute = false;
+  bool parsed_enclosingmethod_attribute = false;
+  u1* runtime_visible_annotations = NULL;
+  int runtime_visible_annotations_length = 0;
+  u1* runtime_invisible_annotations = NULL;
+  int runtime_invisible_annotations_length = 0;
+  // Iterate over attributes
+  while (attributes_count--) {    
+    cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
+    u2 attribute_name_index = cfs->get_u2_fast();
+    u4 attribute_length = cfs->get_u4_fast();
+    check_property(
+      valid_cp_range(attribute_name_index, cp->length()) &&
+        cp->tag_at(attribute_name_index).is_utf8(), 
+      "Attribute name has bad constant pool index %u in class file %s", 
+      attribute_name_index, CHECK);
+    symbolOop tag = cp->symbol_at(attribute_name_index);
+    if (tag == vmSymbols::tag_source_file()) {
+      // Check for SourceFile tag
+      if (_need_verify) {
+        guarantee_property(attribute_length == 2, "Wrong SourceFile attribute length in class file %s", CHECK);
+      }
+      if (parsed_sourcefile_attribute) {
+        classfile_parse_error("Multiple SourceFile attributes in class file %s", CHECK);
+      } else {
+        parsed_sourcefile_attribute = true;
+      }
+      parse_classfile_sourcefile_attribute(cp, k, CHECK);
+    } else if (tag == vmSymbols::tag_source_debug_extension()) {
+      // Check for SourceDebugExtension tag
+      parse_classfile_source_debug_extension_attribute(cp, k, (int)attribute_length, CHECK);
+    } else if (tag == vmSymbols::tag_inner_classes()) {
+      // Check for InnerClasses tag
+      if (parsed_innerclasses_attribute) {
+        classfile_parse_error("Multiple InnerClasses attributes in class file %s", CHECK);
+      } else {
+        parsed_innerclasses_attribute = true;
+      }
+      u2 num_of_classes = parse_classfile_inner_classes_attribute(cp, k, CHECK);
+      if (_need_verify && _major_version >= JAVA_1_5_VERSION) {
+        guarantee_property(attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes,
+                          "Wrong InnerClasses attribute length in class file %s", CHECK);
+      }
+    } else if (tag == vmSymbols::tag_synthetic()) {
+      // Check for Synthetic tag
+      // Shouldn't we check that the synthetic flags wasn't already set? - not required in spec
+      if (attribute_length != 0) {
+        classfile_parse_error(
+          "Invalid Synthetic classfile attribute length %u in class file %s", 
+          attribute_length, CHECK);
+      }
+      parse_classfile_synthetic_attribute(cp, k, CHECK);
+    } else if (tag == vmSymbols::tag_deprecated()) {
+      // Check for Deprecatd tag - 4276120
+      if (attribute_length != 0) {
+        classfile_parse_error(
+          "Invalid Deprecated classfile attribute length %u in class file %s", 
+          attribute_length, CHECK);
+      }
+    } else if (_major_version >= JAVA_1_5_VERSION) {
+      if (tag == vmSymbols::tag_signature()) {
+        if (attribute_length != 2) {
+          classfile_parse_error(
+            "Wrong Signature attribute length %u in class file %s", 
+            attribute_length, CHECK);
+        }
+        parse_classfile_signature_attribute(cp, k, CHECK);
+      } else if (tag == vmSymbols::tag_runtime_visible_annotations()) {
+        runtime_visible_annotations_length = attribute_length;
+        runtime_visible_annotations = cfs->get_u1_buffer();
+        assert(runtime_visible_annotations != NULL, "null visible annotations");
+        cfs->skip_u1(runtime_visible_annotations_length, CHECK);
+      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_annotations()) {
+        runtime_invisible_annotations_length = attribute_length;
+        runtime_invisible_annotations = cfs->get_u1_buffer();
+        assert(runtime_invisible_annotations != NULL, "null invisible annotations");
+        cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
+      } else if (tag == vmSymbols::tag_enclosing_method()) {
+        if (parsed_enclosingmethod_attribute) {
+          classfile_parse_error("Multiple EnclosingMethod attributes in class file %s", CHECK);
+        }   else {
+          parsed_enclosingmethod_attribute = true;
+        }
+        cfs->guarantee_more(4, CHECK);  // class_index, method_index
+        u2 class_index  = cfs->get_u2_fast();
+        u2 method_index = cfs->get_u2_fast();
+        if (class_index == 0) {
+          classfile_parse_error("Invalid class index in EnclosingMethod attribute in class file %s", CHECK);
+        }
+        // Validate the constant pool indices and types
+        if (!cp->is_within_bounds(class_index) ||
+            !cp->tag_at(class_index).is_klass_reference()) {
+          classfile_parse_error("Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK);
+        }
+        if (method_index != 0 &&
+            (!cp->is_within_bounds(method_index) ||
+             !cp->tag_at(method_index).is_name_and_type())) {
+          classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
+        }           
+        k->set_enclosing_method_indices(class_index, method_index);
+      } else {
+        // Unknown attribute
+        cfs->skip_u1(attribute_length, CHECK);
+      }
+    } else {
+      // Unknown attribute
+      cfs->skip_u1(attribute_length, CHECK);
+    }
+  }
+  typeArrayHandle annotations = assemble_annotations(runtime_visible_annotations,
+                                                     runtime_visible_annotations_length,
+                                                     runtime_invisible_annotations,
+                                                     runtime_invisible_annotations_length,
+                                                     CHECK);
+  k->set_class_annotations(annotations());
+}
+
+
+typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annotations,
+                                                      int runtime_visible_annotations_length,
+                                                      u1* runtime_invisible_annotations,
+                                                      int runtime_invisible_annotations_length, TRAPS) {
+  typeArrayHandle annotations;
+  if (runtime_visible_annotations != NULL ||
+      runtime_invisible_annotations != NULL) {
+    typeArrayOop anno = oopFactory::new_permanent_byteArray(runtime_visible_annotations_length +
+                                                            runtime_invisible_annotations_length, CHECK_(annotations));
+    annotations = typeArrayHandle(THREAD, anno);
+    if (runtime_visible_annotations != NULL) {
+      memcpy(annotations->byte_at_addr(0), runtime_visible_annotations, runtime_visible_annotations_length);
+    }
+    if (runtime_invisible_annotations != NULL) {
+      memcpy(annotations->byte_at_addr(runtime_visible_annotations_length), runtime_invisible_annotations, runtime_invisible_annotations_length);
+    }
+  }
+  return annotations;
+}
+
+
+static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
+  KlassHandle h_k (THREAD, fd->field_holder());
+  assert(h_k.not_null() && fd->is_static(), "just checking");
+  if (fd->has_initial_value()) {
+    BasicType t = fd->field_type();
+    switch (t) {
+      case T_BYTE:
+        h_k()->byte_field_put(fd->offset(), fd->int_initial_value());
+	      break;
+      case T_BOOLEAN:
+        h_k()->bool_field_put(fd->offset(), fd->int_initial_value());
+	      break;
+      case T_CHAR:
+        h_k()->char_field_put(fd->offset(), fd->int_initial_value());
+	      break;
+      case T_SHORT:
+        h_k()->short_field_put(fd->offset(), fd->int_initial_value());
+	      break;
+      case T_INT:
+        h_k()->int_field_put(fd->offset(), fd->int_initial_value());
+        break;
+      case T_FLOAT:
+        h_k()->float_field_put(fd->offset(), fd->float_initial_value());
+        break;
+      case T_DOUBLE:
+        h_k()->double_field_put(fd->offset(), fd->double_initial_value());
+        break;
+      case T_LONG:
+        h_k()->long_field_put(fd->offset(), fd->long_initial_value());
+        break;
+      case T_OBJECT:
+        {
+          #ifdef ASSERT      
+          symbolOop sym = oopFactory::new_symbol("Ljava/lang/String;", CHECK);
+          assert(fd->signature() == sym, "just checking");      
+          #endif
+          oop string = fd->string_initial_value(CHECK);
+          h_k()->obj_field_put(fd->offset(), string);
+        }
+        break;
+      default:
+        THROW_MSG(vmSymbols::java_lang_ClassFormatError(), 
+                  "Illegal ConstantValue attribute in class file");
+    }
+  }
+}
+
+
+void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr,
+  constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS) {
+  // This code is for compatibility with earlier jdk's that do not
+  // have the "discovered" field in java.lang.ref.Reference.  For 1.5
+  // the check for the "discovered" field should issue a warning if
+  // the field is not found.  For 1.6 this code should be issue a
+  // fatal error if the "discovered" field is not found.
+  //
+  // Increment fac.nonstatic_oop_count so that the start of the
+  // next type of non-static oops leaves room for the fake oop.
+  // Do not increment next_nonstatic_oop_offset so that the
+  // fake oop is place after the java.lang.ref.Reference oop
+  // fields.
+  //
+  // Check the fields in java.lang.ref.Reference for the "discovered"
+  // field.  If it is not present, artifically create a field for it.
+  // This allows this VM to run on early JDK where the field is not
+  // present.
+
+  //
+  // Increment fac.nonstatic_oop_count so that the start of the 
+  // next type of non-static oops leaves room for the fake oop.
+  // Do not increment next_nonstatic_oop_offset so that the
+  // fake oop is place after the java.lang.ref.Reference oop
+  // fields.
+  //
+  // Check the fields in java.lang.ref.Reference for the "discovered"
+  // field.  If it is not present, artifically create a field for it.
+  // This allows this VM to run on early JDK where the field is not
+  // present.
+  int reference_sig_index = 0;
+  int reference_name_index = 0;
+  int reference_index = 0;
+  int extra = java_lang_ref_Reference::number_of_fake_oop_fields;
+  const int n = (*fields_ptr)()->length();
+  for (int i = 0; i < n; i += instanceKlass::next_offset ) {
+    int name_index = 
+    (*fields_ptr)()->ushort_at(i + instanceKlass::name_index_offset);
+    int sig_index  = 
+      (*fields_ptr)()->ushort_at(i + instanceKlass::signature_index_offset);
+    symbolOop f_name = cp->symbol_at(name_index);
+    symbolOop f_sig  = cp->symbol_at(sig_index);
+    if (f_sig == vmSymbols::reference_signature() && reference_index == 0) {
+      // Save the index for reference signature for later use.
+      // The fake discovered field does not entries in the
+      // constant pool so the index for its signature cannot
+      // be extracted from the constant pool.  It will need 
+      // later, however.  It's signature is vmSymbols::reference_signature()
+      // so same an index for that signature.
+      reference_sig_index = sig_index;
+      reference_name_index = name_index;
+      reference_index = i;
+    }
+    if (f_name == vmSymbols::reference_discovered_name() &&
+      f_sig == vmSymbols::reference_signature()) {
+      // The values below are fake but will force extra
+      // non-static oop fields and a corresponding non-static 
+      // oop map block to be allocated.
+      extra = 0;
+      break;
+    }
+  }
+  if (extra != 0) { 
+    fac_ptr->nonstatic_oop_count += extra;
+    // Add the additional entry to "fields" so that the klass
+    // contains the "discoverd" field and the field will be initialized
+    // in instances of the object.
+    int fields_with_fix_length = (*fields_ptr)()->length() + 
+      instanceKlass::next_offset;
+    typeArrayOop ff = oopFactory::new_permanent_shortArray(
+                                                fields_with_fix_length, CHECK);
+    typeArrayHandle fields_with_fix(THREAD, ff);
+
+    // Take everything from the original but the length.
+    for (int idx = 0; idx < (*fields_ptr)->length(); idx++) {
+      fields_with_fix->ushort_at_put(idx, (*fields_ptr)->ushort_at(idx));
+    }
+
+    // Add the fake field at the end.
+    int i = (*fields_ptr)->length();
+    // There is no name index for the fake "discovered" field nor 
+    // signature but a signature is needed so that the field will
+    // be properly initialized.  Use one found for
+    // one of the other reference fields. Be sure the index for the
+    // name is 0.  In fieldDescriptor::initialize() the index of the
+    // name is checked.  That check is by passed for the last nonstatic
+    // oop field in a java.lang.ref.Reference which is assumed to be
+    // this artificial "discovered" field.  An assertion checks that
+    // the name index is 0.
+    assert(reference_index != 0, "Missing signature for reference");
+
+    int j;
+    for (j = 0; j < instanceKlass::next_offset; j++) {
+      fields_with_fix->ushort_at_put(i + j, 
+	(*fields_ptr)->ushort_at(reference_index +j));
+    }
+    // Clear the public access flag and set the private access flag.
+    short flags;
+    flags = 
+      fields_with_fix->ushort_at(i + instanceKlass::access_flags_offset);
+    assert(!(flags & JVM_RECOGNIZED_FIELD_MODIFIERS), "Unexpected access flags set");
+    flags = flags & (~JVM_ACC_PUBLIC);
+    flags = flags | JVM_ACC_PRIVATE;
+    AccessFlags access_flags;
+    access_flags.set_flags(flags);
+    assert(!access_flags.is_public(), "Failed to clear public flag");
+    assert(access_flags.is_private(), "Failed to set private flag");
+    fields_with_fix->ushort_at_put(i + instanceKlass::access_flags_offset, 
+      flags);
+
+    assert(fields_with_fix->ushort_at(i + instanceKlass::name_index_offset) 
+      == reference_name_index, "The fake reference name is incorrect");
+    assert(fields_with_fix->ushort_at(i + instanceKlass::signature_index_offset)
+      == reference_sig_index, "The fake reference signature is incorrect");
+    // The type of the field is stored in the low_offset entry during
+    // parsing.
+    assert(fields_with_fix->ushort_at(i + instanceKlass::low_offset) ==
+      NONSTATIC_OOP, "The fake reference type is incorrect");
+
+    // "fields" is allocated in the permanent generation.  Disgard
+    // it and let it be collected.
+    (*fields_ptr) = fields_with_fix;
+  }
+  return;
+}
+
+
+void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr, 
+  FieldAllocationCount *fac_ptr, TRAPS) {
+  // Add fake fields for java.lang.Class instances
+  //
+  // This is not particularly nice. We should consider adding a
+  // private transient object field at the Java level to
+  // java.lang.Class. Alternatively we could add a subclass of
+  // instanceKlass which provides an accessor and size computer for
+  // this field, but that appears to be more code than this hack.
+  //
+  // NOTE that we wedge these in at the beginning rather than the
+  // end of the object because the Class layout changed between JDK
+  // 1.3 and JDK 1.4 with the new reflection implementation; some
+  // nonstatic oop fields were added at the Java level. The offsets
+  // of these fake fields can't change between these two JDK
+  // versions because when the offsets are computed at bootstrap
+  // time we don't know yet which version of the JDK we're running in.
+
+  // The values below are fake but will force two non-static oop fields and 
+  // a corresponding non-static oop map block to be allocated.
+  const int extra = java_lang_Class::number_of_fake_oop_fields;
+  fac_ptr->nonstatic_oop_count += extra;
+}
+
+
+void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_ptr) {
+  // Cause the extra fake fields in java.lang.Class to show up before
+  // the Java fields for layout compatibility between 1.3 and 1.4
+  // Incrementing next_nonstatic_oop_offset here advances the 
+  // location where the real java fields are placed.
+  const int extra = java_lang_Class::number_of_fake_oop_fields;
+  (*next_nonstatic_oop_offset_ptr) += (extra * wordSize);
+}
+
+
+instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name, 
+                                                    Handle class_loader, 
+                                                    Handle protection_domain, 
+                                                    symbolHandle& parsed_name,
+                                                    TRAPS) {
+  // So that JVMTI can cache class file in the state before retransformable agents
+  // have modified it
+  unsigned char *cached_class_file_bytes = NULL;
+  jint cached_class_file_length;
+
+  ClassFileStream* cfs = stream();
+  // Timing
+  PerfTraceTime vmtimer(ClassLoader::perf_accumulated_time());
+
+  _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
+
+  if (JvmtiExport::should_post_class_file_load_hook()) {
+    unsigned char* ptr = cfs->buffer();
+    unsigned char* end_ptr = cfs->buffer() + cfs->length();
+
+    JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, 
+                                           &ptr, &end_ptr,
+                                           &cached_class_file_bytes, 
+                                           &cached_class_file_length);
+
+    if (ptr != cfs->buffer()) {
+      // JVMTI agent has modified class file data.
+      // Set new class file stream using JVMTI agent modified
+      // class file data.       
+      cfs = new ClassFileStream(ptr, end_ptr - ptr, cfs->source());
+      set_stream(cfs);
+    }
+  }
+
+
+  instanceKlassHandle nullHandle;
+
+  // Figure out whether we can skip format checking (matching classic VM behavior)
+  _need_verify = Verifier::should_verify_for(class_loader());
+  
+  // Set the verify flag in stream
+  cfs->set_verify(_need_verify);
+
+  // Save the class file name for easier error message printing.
+  _class_name = name.not_null()? name : vmSymbolHandles::unknown_class_name();
+
+  cfs->guarantee_more(8, CHECK_(nullHandle));  // magic, major, minor
+  // Magic value
+  u4 magic = cfs->get_u4_fast();
+  guarantee_property(magic == JAVA_CLASSFILE_MAGIC, 
+                     "Incompatible magic value %u in class file %s", 
+                     magic, CHECK_(nullHandle));
+
+  // Version numbers  
+  u2 minor_version = cfs->get_u2_fast();
+  u2 major_version = cfs->get_u2_fast();
+
+  // Check version numbers - we check this even with verifier off
+  if (!is_supported_version(major_version, minor_version)) {
+    if (name.is_null()) {
+      Exceptions::fthrow( 
+        THREAD_AND_LOCATION,
+        vmSymbolHandles::java_lang_UnsupportedClassVersionError(), 
+        "Unsupported major.minor version %u.%u",
+        major_version, 
+        minor_version);
+    } else {
+      ResourceMark rm(THREAD);
+      Exceptions::fthrow( 
+        THREAD_AND_LOCATION,
+        vmSymbolHandles::java_lang_UnsupportedClassVersionError(), 
+        "%s : Unsupported major.minor version %u.%u",
+        name->as_C_string(),
+        major_version, 
+        minor_version);
+    }
+    return nullHandle;
+  }
+
+  _major_version = major_version;
+  _minor_version = minor_version;
+
+
+  // Check if verification needs to be relaxed for this class file
+  // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376)
+  _relax_verify = Verifier::relax_verify_for(class_loader());
+
+  // Constant pool
+  constantPoolHandle cp = parse_constant_pool(CHECK_(nullHandle));
+  int cp_size = cp->length();
+
+  cfs->guarantee_more(8, CHECK_(nullHandle));  // flags, this_class, super_class, infs_len
+
+  // Access flags
+  AccessFlags access_flags;
+  jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS;
+
+  if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) {
+    // Set abstract bit for old class files for backward compatibility
+    flags |= JVM_ACC_ABSTRACT;
+  }
+  verify_legal_class_modifiers(flags, CHECK_(nullHandle));
+  access_flags.set_flags(flags);
+
+  // This class and superclass
+  instanceKlassHandle super_klass;
+  u2 this_class_index = cfs->get_u2_fast();
+  check_property(
+    valid_cp_range(this_class_index, cp_size) &&
+      cp->tag_at(this_class_index).is_unresolved_klass(), 
+    "Invalid this class index %u in constant pool in class file %s", 
+    this_class_index, CHECK_(nullHandle));
+
+  symbolHandle class_name (THREAD, cp->unresolved_klass_at(this_class_index));
+  assert(class_name.not_null(), "class_name can't be null");
+
+  // It's important to set parsed_name *before* resolving the super class.
+  // (it's used for cleanup by the caller if parsing fails)
+  parsed_name = class_name;
+
+  // Update _class_name which could be null previously to be class_name
+  _class_name = class_name;
+
+  // Don't need to check whether this class name is legal or not.
+  // It has been checked when constant pool is parsed.
+  // However, make sure it is not an array type.
+  if (_need_verify) {
+    guarantee_property(class_name->byte_at(0) != JVM_SIGNATURE_ARRAY, 
+                       "Bad class name in class file %s", 
+                       CHECK_(nullHandle));
+  }
+  
+  klassOop preserve_this_klass;   // for storing result across HandleMark
+
+  // release all handles when parsing is done
+  { HandleMark hm(THREAD);
+
+    // Checks if name in class file matches requested name
+    if (name.not_null() && class_name() != name()) {
+      ResourceMark rm(THREAD);
+      Exceptions::fthrow(
+        THREAD_AND_LOCATION,
+        vmSymbolHandles::java_lang_NoClassDefFoundError(), 
+        "%s (wrong name: %s)", 
+        name->as_C_string(), 
+        class_name->as_C_string()
+      );
+      return nullHandle;
+    }
+
+    if (TraceClassLoadingPreorder) {
+      tty->print("[Loading %s", name()->as_klass_external_name());
+      if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
+      tty->print_cr("]");
+    }
+
+    u2 super_class_index = cfs->get_u2_fast();
+    if (super_class_index == 0) {
+      check_property(class_name() == vmSymbols::java_lang_Object(),
+                     "Invalid superclass index %u in class file %s", 
+                     super_class_index,
+                     CHECK_(nullHandle));
+    } else {
+      check_property(valid_cp_range(super_class_index, cp_size) &&
+                     cp->tag_at(super_class_index).is_unresolved_klass(), 
+                     "Invalid superclass index %u in class file %s", 
+                     super_class_index,
+                     CHECK_(nullHandle));
+      // The class name should be legal because it is checked when parsing constant pool.
+      // However, make sure it is not an array type.
+      if (_need_verify) {
+        guarantee_property(cp->unresolved_klass_at(super_class_index)->byte_at(0) != JVM_SIGNATURE_ARRAY, 
+                          "Bad superclass name in class file %s", CHECK_(nullHandle));
+      }
+    }
+
+    // Interfaces
+    u2 itfs_len = cfs->get_u2_fast();
+    objArrayHandle local_interfaces;
+    if (itfs_len == 0) {
+      local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array());
+    } else {
+      local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, &vmtimer, _class_name, CHECK_(nullHandle));
+    }
+
+    // Fields (offsets are filled in later)
+    struct FieldAllocationCount fac = {0,0,0,0,0,0,0,0,0,0};
+    objArrayHandle fields_annotations;
+    typeArrayHandle fields = parse_fields(cp, access_flags.is_interface(), &fac, &fields_annotations, CHECK_(nullHandle));
+    // Methods
+    bool has_final_method = false;
+    AccessFlags promoted_flags;
+    promoted_flags.set_flags(0);
+    // These need to be oop pointers because they are allocated lazily
+    // inside parse_methods inside a nested HandleMark
+    objArrayOop methods_annotations_oop = NULL;
+    objArrayOop methods_parameter_annotations_oop = NULL;
+    objArrayOop methods_default_annotations_oop = NULL;
+    objArrayHandle methods = parse_methods(cp, access_flags.is_interface(), 
+                                           &promoted_flags,
+                                           &has_final_method,
+                                           &methods_annotations_oop,
+                                           &methods_parameter_annotations_oop,
+                                           &methods_default_annotations_oop,
+                                           CHECK_(nullHandle));
+
+    objArrayHandle methods_annotations(THREAD, methods_annotations_oop);
+    objArrayHandle methods_parameter_annotations(THREAD, methods_parameter_annotations_oop);
+    objArrayHandle methods_default_annotations(THREAD, methods_default_annotations_oop);
+
+    // We check super class after class file is parsed and format is checked
+    if (super_class_index > 0) {
+      symbolHandle sk (THREAD, cp->klass_name_at(super_class_index));
+      if (access_flags.is_interface()) {
+        // Before attempting to resolve the superclass, check for class format
+        // errors not checked yet.
+        guarantee_property(sk() == vmSymbols::java_lang_Object(),
+                           "Interfaces must have java.lang.Object as superclass in class file %s",
+                           CHECK_(nullHandle));
+      }
+      klassOop k = SystemDictionary::resolve_super_or_fail(class_name,
+                                                           sk, 
+                                                           class_loader, 
+                                                           protection_domain, 
+                                                           true,
+                                                           CHECK_(nullHandle));
+      KlassHandle kh (THREAD, k);
+      super_klass = instanceKlassHandle(THREAD, kh());
+      if (super_klass->is_interface()) {
+        ResourceMark rm(THREAD);
+        Exceptions::fthrow(
+          THREAD_AND_LOCATION,
+          vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
+          "class %s has interface %s as super class",
+          class_name->as_klass_external_name(),
+          super_klass->external_name()
+        );
+        return nullHandle;
+      }
+      // Make sure super class is not final
+      if (super_klass->is_final()) {
+        THROW_MSG_(vmSymbols::java_lang_VerifyError(), "Cannot inherit from final class", nullHandle);
+      }
+    }
+
+    // Compute the transitive list of all unique interfaces implemented by this class
+    objArrayHandle transitive_interfaces = compute_transitive_interfaces(super_klass, local_interfaces, CHECK_(nullHandle));
+
+    // sort methods
+    typeArrayHandle method_ordering = sort_methods(methods,
+                                                   methods_annotations,
+                                                   methods_parameter_annotations,
+                                                   methods_default_annotations,
+                                                   CHECK_(nullHandle));
+
+    // promote flags from parse_methods() to the klass' flags
+    access_flags.add_promoted_flags(promoted_flags.as_int());
+
+    // Size of Java vtable (in words)
+    int vtable_size = 0;    
+    int itable_size = 0;
+    int num_miranda_methods = 0;
+
+    klassVtable::compute_vtable_size_and_num_mirandas(vtable_size, 
+                                                      num_miranda_methods, 
+                                                      super_klass(),
+                                                      methods(),
+                                                      access_flags,
+                                                      class_loader(),
+                                                      class_name(), 
+                                                      local_interfaces());  
+       
+    // Size of Java itable (in words)
+    itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);  
+    
+    // Field size and offset computation
+    int nonstatic_field_size = super_klass() == NULL ? 0 : super_klass->nonstatic_field_size();
+#ifndef PRODUCT
+    int orig_nonstatic_field_size = 0;
+#endif
+    int static_field_size = 0;
+    int next_static_oop_offset;
+    int next_static_double_offset;
+    int next_static_word_offset;
+    int next_static_short_offset;
+    int next_static_byte_offset;
+    int next_static_type_offset;
+    int next_nonstatic_oop_offset;
+    int next_nonstatic_double_offset;
+    int next_nonstatic_word_offset;
+    int next_nonstatic_short_offset;
+    int next_nonstatic_byte_offset;
+    int next_nonstatic_type_offset;
+    int first_nonstatic_oop_offset;
+    int first_nonstatic_field_offset;
+    int next_nonstatic_field_offset;
+
+    // Calculate the starting byte offsets
+    next_static_oop_offset      = (instanceKlass::header_size() + 
+		 		  align_object_offset(vtable_size) + 
+				  align_object_offset(itable_size)) * wordSize;
+    next_static_double_offset   = next_static_oop_offset + 
+			 	  (fac.static_oop_count * oopSize);
+    if ( fac.static_double_count && 
+	 (Universe::field_type_should_be_aligned(T_DOUBLE) || 
+ 	  Universe::field_type_should_be_aligned(T_LONG)) ) {
+      next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
+    }
+
+    next_static_word_offset     = next_static_double_offset + 
+				  (fac.static_double_count * BytesPerLong);
+    next_static_short_offset    = next_static_word_offset + 
+				  (fac.static_word_count * BytesPerInt);
+    next_static_byte_offset     = next_static_short_offset + 
+				  (fac.static_short_count * BytesPerShort);
+    next_static_type_offset     = align_size_up((next_static_byte_offset +
+			          fac.static_byte_count ), wordSize );
+    static_field_size 	        = (next_static_type_offset - 
+			          next_static_oop_offset) / wordSize;
+    first_nonstatic_field_offset = (instanceOopDesc::header_size() + 
+				    nonstatic_field_size) * wordSize;
+    next_nonstatic_field_offset = first_nonstatic_field_offset;
+
+    // Add fake fields for java.lang.Class instances (also see below)
+    if (class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) {
+      java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle));
+    }
+
+    // Add a fake "discovered" field if it is not present 
+    // for compatibility with earlier jdk's.
+    if (class_name() == vmSymbols::java_lang_ref_Reference() 
+      && class_loader.is_null()) {
+      java_lang_ref_Reference_fix_pre(&fields, cp, &fac, CHECK_(nullHandle));
+    }
+    // end of "discovered" field compactibility fix
+
+    int nonstatic_double_count = fac.nonstatic_double_count;
+    int nonstatic_word_count   = fac.nonstatic_word_count;
+    int nonstatic_short_count  = fac.nonstatic_short_count;
+    int nonstatic_byte_count   = fac.nonstatic_byte_count;
+    int nonstatic_oop_count    = fac.nonstatic_oop_count;
+
+    // Prepare list of oops for oop maps generation.
+    u2* nonstatic_oop_offsets;
+    u2* nonstatic_oop_length;
+    int nonstatic_oop_map_count = 0;
+
+    nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2,  nonstatic_oop_count+1);
+    nonstatic_oop_length  = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, u2,  nonstatic_oop_count+1);
+
+    // Add fake fields for java.lang.Class instances (also see above).
+    // FieldsAllocationStyle and CompactFields values will be reset to default.
+    if(class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) {
+      java_lang_Class_fix_post(&next_nonstatic_field_offset);
+      nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
+      int fake_oop_count       = (( next_nonstatic_field_offset -
+                                    first_nonstatic_field_offset ) / oopSize);
+      nonstatic_oop_length [0] = (u2)fake_oop_count;
+      nonstatic_oop_map_count  = 1;
+      nonstatic_oop_count     -= fake_oop_count;
+      first_nonstatic_oop_offset = first_nonstatic_field_offset;
+    } else {
+      first_nonstatic_oop_offset = 0; // will be set for first oop field
+    }
+
+#ifndef PRODUCT
+    if( PrintCompactFieldsSavings ) {
+      next_nonstatic_double_offset = next_nonstatic_field_offset + 
+                                     (nonstatic_oop_count * oopSize);
+      if ( nonstatic_double_count > 0 ) {
+        next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong); 
+      }
+      next_nonstatic_word_offset  = next_nonstatic_double_offset + 
+                                    (nonstatic_double_count * BytesPerLong);
+      next_nonstatic_short_offset = next_nonstatic_word_offset + 
+                                    (nonstatic_word_count * BytesPerInt);
+      next_nonstatic_byte_offset  = next_nonstatic_short_offset + 
+                                    (nonstatic_short_count * BytesPerShort);
+      next_nonstatic_type_offset  = align_size_up((next_nonstatic_byte_offset +
+                                    nonstatic_byte_count ), wordSize );
+      orig_nonstatic_field_size   = nonstatic_field_size + 
+        ((next_nonstatic_type_offset - first_nonstatic_field_offset)/wordSize);
+    }
+#endif
+    bool compact_fields   = CompactFields;
+    int  allocation_style = FieldsAllocationStyle;
+    if( allocation_style < 0 || allocation_style > 1 ) { // Out of range?
+      assert(false, "0 <= FieldsAllocationStyle <= 1");
+      allocation_style = 1; // Optimistic
+    }
+
+    // The next classes have predefined hard-coded fields offsets
+    // (see in JavaClasses::compute_hard_coded_offsets()).
+    // Use default fields allocation order for them.
+    if( (allocation_style != 0 || compact_fields ) && class_loader.is_null() &&
+        (class_name() == vmSymbols::java_lang_AssertionStatusDirectives() ||
+         class_name() == vmSymbols::java_lang_Class() ||
+         class_name() == vmSymbols::java_lang_ClassLoader() ||
+         class_name() == vmSymbols::java_lang_ref_Reference() ||
+         class_name() == vmSymbols::java_lang_ref_SoftReference() ||
+         class_name() == vmSymbols::java_lang_StackTraceElement() ||
+         class_name() == vmSymbols::java_lang_String() ||
+         class_name() == vmSymbols::java_lang_Throwable()) ) {
+      allocation_style = 0;     // Allocate oops first
+      compact_fields   = false; // Don't compact fields
+    }
+
+    if( allocation_style == 0 ) {
+      // Fields order: oops, longs/doubles, ints, shorts/chars, bytes
+      next_nonstatic_oop_offset    = next_nonstatic_field_offset;
+      next_nonstatic_double_offset = next_nonstatic_oop_offset + 
+			 	     (nonstatic_oop_count * oopSize);
+    } else if( allocation_style == 1 ) {
+      // Fields order: longs/doubles, ints, shorts/chars, bytes, oops
+      next_nonstatic_double_offset = next_nonstatic_field_offset;
+    } else {
+      ShouldNotReachHere();
+    }
+
+    int nonstatic_oop_space_count   = 0;
+    int nonstatic_word_space_count  = 0;
+    int nonstatic_short_space_count = 0;
+    int nonstatic_byte_space_count  = 0;
+    int nonstatic_oop_space_offset;
+    int nonstatic_word_space_offset;
+    int nonstatic_short_space_offset;
+    int nonstatic_byte_space_offset;
+
+    if( nonstatic_double_count > 0 ) {
+      int offset = next_nonstatic_double_offset;
+      next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
+      if( compact_fields && offset != next_nonstatic_double_offset ) {
+        // Allocate available fields into the gap before double field.
+        int length = next_nonstatic_double_offset - offset;
+        assert(length == BytesPerInt, "");
+        nonstatic_word_space_offset = offset;
+        if( nonstatic_word_count > 0 ) {
+          nonstatic_word_count      -= 1;
+          nonstatic_word_space_count = 1; // Only one will fit
+          length -= BytesPerInt;
+          offset += BytesPerInt;
+        }
+        nonstatic_short_space_offset = offset;
+        while( length >= BytesPerShort && nonstatic_short_count > 0 ) {
+          nonstatic_short_count       -= 1;
+          nonstatic_short_space_count += 1;
+          length -= BytesPerShort;
+          offset += BytesPerShort;
+        }
+        nonstatic_byte_space_offset = offset;
+        while( length > 0 && nonstatic_byte_count > 0 ) {
+          nonstatic_byte_count       -= 1;
+          nonstatic_byte_space_count += 1;
+          length -= 1;
+        }
+        // Allocate oop field in the gap if there are no other fields for that.
+        nonstatic_oop_space_offset = offset;
+        if( length >= oopSize && nonstatic_oop_count > 0 &&  
+            allocation_style != 0 ) { // when oop fields not first
+          nonstatic_oop_count      -= 1;
+          nonstatic_oop_space_count = 1; // Only one will fit
+          length -= oopSize;
+          offset += oopSize;
+        }
+      }
+    }
+
+    next_nonstatic_word_offset  = next_nonstatic_double_offset + 
+                                  (nonstatic_double_count * BytesPerLong);
+    next_nonstatic_short_offset = next_nonstatic_word_offset + 
+                                  (nonstatic_word_count * BytesPerInt);
+    next_nonstatic_byte_offset  = next_nonstatic_short_offset + 
+                                  (nonstatic_short_count * BytesPerShort);
+
+    int notaligned_offset;
+    if( allocation_style == 0 ) {
+      notaligned_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
+    } else { // allocation_style == 1 
+      next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
+      if( nonstatic_oop_count > 0 ) {
+        notaligned_offset = next_nonstatic_oop_offset;
+        next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, oopSize);
+      }
+      notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * oopSize);
+    }
+    next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
+    nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
+                                      - first_nonstatic_field_offset)/wordSize);
+
+    // Iterate over fields again and compute correct offsets.
+    // The field allocation type was temporarily stored in the offset slot.
+    // oop fields are located before non-oop fields (static and non-static).
+    int len = fields->length();
+    for (int i = 0; i < len; i += instanceKlass::next_offset) {
+      int real_offset;
+      FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i+4);
+      switch (atype) {
+        case STATIC_OOP:
+          real_offset = next_static_oop_offset;
+          next_static_oop_offset += oopSize;
+          break;
+        case STATIC_BYTE:
+          real_offset = next_static_byte_offset;
+          next_static_byte_offset += 1;
+          break;
+        case STATIC_SHORT:
+          real_offset = next_static_short_offset;
+          next_static_short_offset += BytesPerShort;
+          break;
+        case STATIC_WORD:
+          real_offset = next_static_word_offset;
+          next_static_word_offset += BytesPerInt;
+          break;
+        case STATIC_ALIGNED_DOUBLE:
+        case STATIC_DOUBLE:
+          real_offset = next_static_double_offset;
+          next_static_double_offset += BytesPerLong;
+          break;
+        case NONSTATIC_OOP:
+          if( nonstatic_oop_space_count > 0 ) {
+            real_offset = nonstatic_oop_space_offset;
+            nonstatic_oop_space_offset += oopSize;
+            nonstatic_oop_space_count  -= 1;
+          } else {
+            real_offset = next_nonstatic_oop_offset;
+            next_nonstatic_oop_offset += oopSize;
+          }
+          // Update oop maps
+          if( nonstatic_oop_map_count > 0 &&
+              nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == 
+              (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) {
+            // Extend current oop map
+            nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
+          } else {
+            // Create new oop map
+            nonstatic_oop_offsets[nonstatic_oop_map_count] = (u2)real_offset;
+            nonstatic_oop_length [nonstatic_oop_map_count] = 1;
+            nonstatic_oop_map_count += 1;
+            if( first_nonstatic_oop_offset == 0 ) { // Undefined
+              first_nonstatic_oop_offset = real_offset;
+            }
+          }
+          break;
+        case NONSTATIC_BYTE:
+          if( nonstatic_byte_space_count > 0 ) {
+            real_offset = nonstatic_byte_space_offset;
+            nonstatic_byte_space_offset += 1;
+            nonstatic_byte_space_count  -= 1;
+          } else {
+            real_offset = next_nonstatic_byte_offset;
+            next_nonstatic_byte_offset += 1;
+          }
+          break;
+        case NONSTATIC_SHORT:
+          if( nonstatic_short_space_count > 0 ) {
+            real_offset = nonstatic_short_space_offset;
+            nonstatic_short_space_offset += BytesPerShort;
+            nonstatic_short_space_count  -= 1;
+          } else {
+            real_offset = next_nonstatic_short_offset;
+            next_nonstatic_short_offset += BytesPerShort;
+          }
+          break;
+        case NONSTATIC_WORD:
+          if( nonstatic_word_space_count > 0 ) {
+            real_offset = nonstatic_word_space_offset;
+            nonstatic_word_space_offset += BytesPerInt;
+            nonstatic_word_space_count  -= 1;
+          } else {
+            real_offset = next_nonstatic_word_offset;
+            next_nonstatic_word_offset += BytesPerInt;
+          }
+          break;
+        case NONSTATIC_ALIGNED_DOUBLE:
+        case NONSTATIC_DOUBLE:
+          real_offset = next_nonstatic_double_offset;
+          next_nonstatic_double_offset += BytesPerLong;
+          break;
+        default:
+          ShouldNotReachHere();
+      }
+      fields->short_at_put(i+4, extract_low_short_from_int(real_offset) );
+      fields->short_at_put(i+5, extract_high_short_from_int(real_offset) ); 
+    }
+
+    // Size of instances
+    int instance_size;
+
+    instance_size = align_object_size(next_nonstatic_type_offset / wordSize);
+
+    assert(instance_size == align_object_size(instanceOopDesc::header_size() + nonstatic_field_size), "consistent layout helper value");
+
+    // Size of non-static oop map blocks (in words) allocated at end of klass
+    int nonstatic_oop_map_size = compute_oop_map_size(super_klass, nonstatic_oop_map_count, first_nonstatic_oop_offset);
+
+    // Compute reference type
+    ReferenceType rt;
+    if (super_klass() == NULL) {
+      rt = REF_NONE;
+    } else {
+      rt = super_klass->reference_type();
+    }
+
+    // We can now create the basic klassOop for this klass    
+    klassOop ik = oopFactory::new_instanceKlass(
+                                    vtable_size, itable_size, 
+                                    static_field_size, nonstatic_oop_map_size, 
+                                    rt, CHECK_(nullHandle));
+    instanceKlassHandle this_klass (THREAD, ik); 
+
+    assert(this_klass->static_field_size() == static_field_size && 
+           this_klass->nonstatic_oop_map_size() == nonstatic_oop_map_size, "sanity check");
+    
+    // Fill in information already parsed
+    this_klass->set_access_flags(access_flags);
+    jint lh = Klass::instance_layout_helper(instance_size, false);
+    this_klass->set_layout_helper(lh);
+    assert(this_klass->oop_is_instance(), "layout is correct");
+    assert(this_klass->size_helper() == instance_size, "correct size_helper");
+    // Not yet: supers are done below to support the new subtype-checking fields
+    //this_klass->set_super(super_klass());  
+    this_klass->set_class_loader(class_loader());    
+    this_klass->set_nonstatic_field_size(nonstatic_field_size);
+    this_klass->set_static_oop_field_size(fac.static_oop_count);       
+    cp->set_pool_holder(this_klass());
+    this_klass->set_constants(cp());
+    this_klass->set_local_interfaces(local_interfaces());
+    this_klass->set_fields(fields());
+    this_klass->set_methods(methods());
+    if (has_final_method) {
+      this_klass->set_has_final_method();
+    }
+    this_klass->set_method_ordering(method_ordering());
+    this_klass->set_initial_method_idnum(methods->length());
+    this_klass->set_name(cp->klass_name_at(this_class_index));
+    this_klass->set_protection_domain(protection_domain());
+    this_klass->set_fields_annotations(fields_annotations());
+    this_klass->set_methods_annotations(methods_annotations());
+    this_klass->set_methods_parameter_annotations(methods_parameter_annotations());
+    this_klass->set_methods_default_annotations(methods_default_annotations());
+
+    this_klass->set_minor_version(minor_version);
+    this_klass->set_major_version(major_version);
+
+    if (cached_class_file_bytes != NULL) {
+      // JVMTI: we have an instanceKlass now, tell it about the cached bytes
+      this_klass->set_cached_class_file(cached_class_file_bytes, 
+                                        cached_class_file_length);
+    }
+      
+    // Miranda methods
+    if ((num_miranda_methods > 0) || 
+	// if this class introduced new miranda methods or
+	(super_klass.not_null() && (super_klass->has_miranda_methods()))
+	// super class exists and this class inherited miranda methods
+	) {
+      this_klass->set_has_miranda_methods(); // then set a flag
+    }
+
+    // Additional attributes
+    parse_classfile_attributes(cp, this_klass, CHECK_(nullHandle));
+
+    // Make sure this is the end of class file stream
+    guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
+
+    // Initialize static fields
+    this_klass->do_local_static_fields(&initialize_static_field, CHECK_(nullHandle));
+
+    // VerifyOops believes that once this has been set, the object is completely loaded.
+    // Compute transitive closure of interfaces this class implements
+    this_klass->set_transitive_interfaces(transitive_interfaces());    
+
+    // Fill in information needed to compute superclasses.
+    this_klass->initialize_supers(super_klass(), CHECK_(nullHandle));
+
+    // Initialize itable offset tables
+    klassItable::setup_itable_offset_table(this_klass);
+
+    // Do final class setup
+    fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_length);
+
+    set_precomputed_flags(this_klass);
+
+    // reinitialize modifiers, using the InnerClasses attribute
+    int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle));
+    this_klass->set_modifier_flags(computed_modifiers);
+
+    // check if this class can access its super class
+    check_super_class_access(this_klass, CHECK_(nullHandle));
+
+    // check if this class can access its superinterfaces
+    check_super_interface_access(this_klass, CHECK_(nullHandle));
+
+    // check if this class overrides any final method
+    check_final_method_override(this_klass, CHECK_(nullHandle));
+
+    // check that if this class is an interface then it doesn't have static methods
+    if (this_klass->is_interface()) {
+      check_illegal_static_method(this_klass, CHECK_(nullHandle));
+    }
+
+    ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), 
+                                             false /* not shared class */);
+	  
+    if (TraceClassLoading) {
+      // print in a single call to reduce interleaving of output
+      if (cfs->source() != NULL) {
+        tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
+                   cfs->source());
+      } else if (class_loader.is_null()) {
+        if (THREAD->is_Java_thread()) {
+          klassOop caller = ((JavaThread*)THREAD)->security_get_caller_class(1);
+          tty->print("[Loaded %s by instance of %s]\n",
+                     this_klass->external_name(),
+                     instanceKlass::cast(caller)->external_name());
+        } else {
+          tty->print("[Loaded %s]\n", this_klass->external_name());
+        }
+      } else {
+        ResourceMark rm;
+        tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
+                   instanceKlass::cast(class_loader->klass())->external_name());
+      }
+    }
+
+    if (TraceClassResolution) {
+      // print out the superclass.
+      const char * from = Klass::cast(this_klass())->external_name();
+      if (this_klass->java_super() != NULL) {
+        tty->print("RESOLVE %s %s\n", from, instanceKlass::cast(this_klass->java_super())->external_name());
+      }
+      // print out each of the interface classes referred to by this class.
+      objArrayHandle local_interfaces(THREAD, this_klass->local_interfaces());
+      if (!local_interfaces.is_null()) {
+        int length = local_interfaces->length();
+        for (int i = 0; i < length; i++) {
+          klassOop k = klassOop(local_interfaces->obj_at(i)); 
+          instanceKlass* to_class = instanceKlass::cast(k);
+          const char * to = to_class->external_name();
+          tty->print("RESOLVE %s %s\n", from, to);
+        }
+      }
+    }
+
+#ifndef PRODUCT
+    if( PrintCompactFieldsSavings ) {
+      if( nonstatic_field_size < orig_nonstatic_field_size ) {
+        tty->print("[Saved %d of %3d words in %s]\n", 
+                 orig_nonstatic_field_size - nonstatic_field_size,
+                 orig_nonstatic_field_size, this_klass->external_name());
+      } else if( nonstatic_field_size > orig_nonstatic_field_size ) {
+        tty->print("[Wasted %d over %3d words in %s]\n", 
+                 nonstatic_field_size - orig_nonstatic_field_size,
+                 orig_nonstatic_field_size, this_klass->external_name());
+      }
+    }
+#endif
+
+    // preserve result across HandleMark  
+    preserve_this_klass = this_klass();    
+  }
+
+  // Create new handle outside HandleMark
+  instanceKlassHandle this_klass (THREAD, preserve_this_klass);
+  debug_only(this_klass->as_klassOop()->verify();)
+
+  return this_klass;
+}
+
+
+int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_map_count, int first_nonstatic_oop_offset) {
+  int map_size = super.is_null() ? 0 : super->nonstatic_oop_map_size();
+  if (nonstatic_oop_map_count > 0) {
+    // We have oops to add to map
+    if (map_size == 0) {
+      map_size = nonstatic_oop_map_count;
+    } else {
+      // Check whether we should add a new map block or whether the last one can be extended
+      OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
+      OopMapBlock* last_map = first_map + map_size - 1;
+
+      int next_offset = last_map->offset() + (last_map->length() * oopSize);
+      if (next_offset == first_nonstatic_oop_offset) {
+        // There is no gap bettwen superklass's last oop field and first 
+        // local oop field, merge maps.
+        nonstatic_oop_map_count -= 1;
+      } else {
+        // Superklass didn't end with a oop field, add extra maps
+        assert(next_offset<first_nonstatic_oop_offset, "just checking");
+      }
+      map_size += nonstatic_oop_map_count;
+    }
+  }
+  return map_size;
+}
+
+
+void ClassFileParser::fill_oop_maps(instanceKlassHandle k, 
+                        int nonstatic_oop_map_count, 
+                        u2* nonstatic_oop_offsets, u2* nonstatic_oop_length) {
+  OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
+  OopMapBlock* last_oop_map = this_oop_map + k->nonstatic_oop_map_size();
+  instanceKlass* super = k->superklass();
+  if (super != NULL) {
+    int super_oop_map_size     = super->nonstatic_oop_map_size();
+    OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
+    // Copy maps from superklass
+    while (super_oop_map_size-- > 0) {
+      *this_oop_map++ = *super_oop_map++;
+    }
+  }
+  if (nonstatic_oop_map_count > 0) {
+    if (this_oop_map + nonstatic_oop_map_count > last_oop_map) {
+      // Calculated in compute_oop_map_size() number of oop maps is less then 
+      // collected oop maps since there is no gap between superklass's last oop 
+      // field and first local oop field. Extend the last oop map copied 
+      // from the superklass instead of creating new one.
+      nonstatic_oop_map_count--;
+      nonstatic_oop_offsets++;
+      this_oop_map--;
+      this_oop_map->set_length(this_oop_map->length() + *nonstatic_oop_length++);
+      this_oop_map++;
+    }
+    assert((this_oop_map + nonstatic_oop_map_count) == last_oop_map, "just checking");
+    // Add new map blocks, fill them
+    while (nonstatic_oop_map_count-- > 0) {
+      this_oop_map->set_offset(*nonstatic_oop_offsets++);
+      this_oop_map->set_length(*nonstatic_oop_length++);
+      this_oop_map++;
+    }
+  }
+}
+
+
+void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) {
+  klassOop super = k->super();
+
+  // Check if this klass has an empty finalize method (i.e. one with return bytecode only),
+  // in which case we don't have to register objects as finalizable
+  if (!_has_empty_finalizer) {
+    if (_has_finalizer ||
+        (super != NULL && super->klass_part()->has_finalizer())) {
+      k->set_has_finalizer();
+    }
+  }
+
+#ifdef ASSERT
+  bool f = false;
+  methodOop m = k->lookup_method(vmSymbols::finalize_method_name(),
+                                 vmSymbols::void_method_signature());
+  if (m != NULL && !m->is_empty_method()) {
+    f = true;
+  }
+  assert(f == k->has_finalizer(), "inconsistent has_finalizer");
+#endif
+
+  // Check if this klass supports the java.lang.Cloneable interface
+  if (SystemDictionary::cloneable_klass_loaded()) {
+    if (k->is_subtype_of(SystemDictionary::cloneable_klass())) {
+      k->set_is_cloneable();
+    }
+  }
+
+  // Check if this klass has a vanilla default constructor
+  if (super == NULL) {
+    // java.lang.Object has empty default constructor
+    k->set_has_vanilla_constructor();
+  } else {
+    if (Klass::cast(super)->has_vanilla_constructor() &&
+        _has_vanilla_constructor) {
+      k->set_has_vanilla_constructor();
+    }
+#ifdef ASSERT
+    bool v = false;
+    if (Klass::cast(super)->has_vanilla_constructor()) {
+      methodOop constructor = k->find_method(vmSymbols::object_initializer_name(
+), vmSymbols::void_method_signature());
+      if (constructor != NULL && constructor->is_vanilla_constructor()) {
+        v = true;
+      }
+    }
+    assert(v == k->has_vanilla_constructor(), "inconsistent has_vanilla_constructor");
+#endif
+  }
+
+  // If it cannot be fast-path allocated, set a bit in the layout helper.
+  // See documentation of instanceKlass::can_be_fastpath_allocated().
+  assert(k->size_helper() > 0, "layout_helper is initialized");
+  if ((!RegisterFinalizersAtInit && k->has_finalizer())
+      || k->is_abstract() || k->is_interface()
+      || (k->name() == vmSymbols::java_lang_Class()
+          && k->class_loader() == NULL)
+      || k->size_helper() >= FastAllocateSizeLimit) {
+    // Forbid fast-path allocation.
+    jint lh = Klass::instance_layout_helper(k->size_helper(), true);
+    k->set_layout_helper(lh);
+  }
+}
+
+
+// utility method for appending and array with check for duplicates
+
+void append_interfaces(objArrayHandle result, int& index, objArrayOop ifs) {
+  // iterate over new interfaces
+  for (int i = 0; i < ifs->length(); i++) {
+    oop e = ifs->obj_at(i);
+    assert(e->is_klass() && instanceKlass::cast(klassOop(e))->is_interface(), "just checking");
+    // check for duplicates
+    bool duplicate = false;
+    for (int j = 0; j < index; j++) {
+      if (result->obj_at(j) == e) {
+        duplicate = true;
+        break;
+      }
+    }
+    // add new interface
+    if (!duplicate) {
+      result->obj_at_put(index++, e);
+    }
+  }
+}
+
+objArrayHandle ClassFileParser::compute_transitive_interfaces(instanceKlassHandle super, objArrayHandle local_ifs, TRAPS) {
+  // Compute maximum size for transitive interfaces
+  int max_transitive_size = 0;
+  int super_size = 0;
+  // Add superclass transitive interfaces size
+  if (super.not_null()) {
+    super_size = super->transitive_interfaces()->length();
+    max_transitive_size += super_size;
+  }
+  // Add local interfaces' super interfaces  
+  int local_size = local_ifs->length();
+  for (int i = 0; i < local_size; i++) {
+    klassOop l = klassOop(local_ifs->obj_at(i));
+    max_transitive_size += instanceKlass::cast(l)->transitive_interfaces()->length();
+  }
+  // Finally add local interfaces
+  max_transitive_size += local_size;
+  // Construct array
+  objArrayHandle result;
+  if (max_transitive_size == 0) {
+    // no interfaces, use canonicalized array
+    result = objArrayHandle(THREAD, Universe::the_empty_system_obj_array());
+  } else if (max_transitive_size == super_size) {
+    // no new local interfaces added, share superklass' transitive interface array
+    result = objArrayHandle(THREAD, super->transitive_interfaces());
+  } else if (max_transitive_size == local_size) {
+    // only local interfaces added, share local interface array
+    result = local_ifs;
+  } else {
+    objArrayHandle nullHandle;
+    objArrayOop new_objarray = oopFactory::new_system_objArray(max_transitive_size, CHECK_(nullHandle));
+    result = objArrayHandle(THREAD, new_objarray);
+    int index = 0;
+    // Copy down from superclass
+    if (super.not_null()) {
+      append_interfaces(result, index, super->transitive_interfaces());
+    }    
+    // Copy down from local interfaces' superinterfaces
+    for (int i = 0; i < local_ifs->length(); i++) {
+      klassOop l = klassOop(local_ifs->obj_at(i));
+      append_interfaces(result, index, instanceKlass::cast(l)->transitive_interfaces());
+    }
+    // Finally add local interfaces
+    append_interfaces(result, index, local_ifs());
+
+    // Check if duplicates were removed
+    if (index != max_transitive_size) {
+      assert(index < max_transitive_size, "just checking");
+      objArrayOop new_result = oopFactory::new_system_objArray(index, CHECK_(nullHandle));
+      for (int i = 0; i < index; i++) {
+        oop e = result->obj_at(i);
+        assert(e != NULL, "just checking");
+        new_result->obj_at_put(i, e);
+      }
+      result = objArrayHandle(THREAD, new_result);
+    }
+  }
+  return result;  
+}
+
+
+void ClassFileParser::check_super_class_access(instanceKlassHandle this_klass, TRAPS) {
+  klassOop super = this_klass->super();
+  if ((super != NULL) &&
+      (!Reflection::verify_class_access(this_klass->as_klassOop(), super, false))) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(  
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_IllegalAccessError(),
+      "class %s cannot access its superclass %s",
+      this_klass->external_name(),
+      instanceKlass::cast(super)->external_name()
+    );
+    return;
+  }
+}
+
+
+void ClassFileParser::check_super_interface_access(instanceKlassHandle this_klass, TRAPS) {
+  objArrayHandle local_interfaces (THREAD, this_klass->local_interfaces());
+  int lng = local_interfaces->length();
+  for (int i = lng - 1; i >= 0; i--) {
+    klassOop k = klassOop(local_interfaces->obj_at(i)); 
+    assert (k != NULL && Klass::cast(k)->is_interface(), "invalid interface");
+    if (!Reflection::verify_class_access(this_klass->as_klassOop(), k, false)) {
+      ResourceMark rm(THREAD);
+      Exceptions::fthrow(  
+        THREAD_AND_LOCATION,
+        vmSymbolHandles::java_lang_IllegalAccessError(),
+        "class %s cannot access its superinterface %s",
+        this_klass->external_name(),
+        instanceKlass::cast(k)->external_name()
+      );
+      return;
+    }
+  }
+}
+
+
+void ClassFileParser::check_final_method_override(instanceKlassHandle this_klass, TRAPS) {
+  objArrayHandle methods (THREAD, this_klass->methods());
+  int num_methods = methods->length();
+  
+  // go thru each method and check if it overrides a final method
+  for (int index = 0; index < num_methods; index++) {
+    methodOop m = (methodOop)methods->obj_at(index);
+
+    // skip private, static and <init> methods
+    if ((!m->is_private()) &&
+        (!m->is_static()) &&
+        (m->name() != vmSymbols::object_initializer_name())) {
+	
+      symbolOop name = m->name();
+      symbolOop signature = m->signature();
+      klassOop k = this_klass->super();
+      methodOop super_m = NULL;
+      while (k != NULL) {
+        // skip supers that don't have final methods.
+        if (k->klass_part()->has_final_method()) {
+          // lookup a matching method in the super class hierarchy
+          super_m = instanceKlass::cast(k)->lookup_method(name, signature); 
+          if (super_m == NULL) {
+            break; // didn't find any match; get out
+          }
+  
+          if (super_m->is_final() &&
+              // matching method in super is final
+              (Reflection::verify_field_access(this_klass->as_klassOop(), 
+                                               super_m->method_holder(),
+                                               super_m->method_holder(),
+                                               super_m->access_flags(), false))
+            // this class can access super final method and therefore override
+            ) {
+            ResourceMark rm(THREAD);
+            Exceptions::fthrow(  
+              THREAD_AND_LOCATION,
+              vmSymbolHandles::java_lang_VerifyError(),
+              "class %s overrides final method %s.%s",
+              this_klass->external_name(),
+              name->as_C_string(),
+              signature->as_C_string()
+            );
+            return;
+          }
+
+          // continue to look from super_m's holder's super.
+          k = instanceKlass::cast(super_m->method_holder())->super();
+          continue;
+        }
+
+        k = k->klass_part()->super();
+      }
+    }
+  }
+}
+
+
+// assumes that this_klass is an interface
+void ClassFileParser::check_illegal_static_method(instanceKlassHandle this_klass, TRAPS) {
+  assert(this_klass->is_interface(), "not an interface");
+  objArrayHandle methods (THREAD, this_klass->methods());
+  int num_methods = methods->length();
+
+  for (int index = 0; index < num_methods; index++) {
+    methodOop m = (methodOop)methods->obj_at(index);
+    // if m is static and not the init method, throw a verify error
+    if ((m->is_static()) && (m->name() != vmSymbols::class_initializer_name())) {
+      ResourceMark rm(THREAD);
+      Exceptions::fthrow(  
+        THREAD_AND_LOCATION,
+        vmSymbolHandles::java_lang_VerifyError(),
+        "Illegal static method %s in interface %s",
+        m->name()->as_C_string(),
+        this_klass->external_name()
+      );
+      return;
+    }
+  }
+}
+
+// utility methods for format checking 
+
+void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) {
+  if (!_need_verify) { return; }
+
+  const bool is_interface  = (flags & JVM_ACC_INTERFACE)  != 0;
+  const bool is_abstract   = (flags & JVM_ACC_ABSTRACT)   != 0;
+  const bool is_final      = (flags & JVM_ACC_FINAL)      != 0;
+  const bool is_super      = (flags & JVM_ACC_SUPER)      != 0;
+  const bool is_enum       = (flags & JVM_ACC_ENUM)       != 0;
+  const bool is_annotation = (flags & JVM_ACC_ANNOTATION) != 0;
+  const bool major_gte_15  = _major_version >= JAVA_1_5_VERSION;
+
+  if ((is_abstract && is_final) ||
+      (is_interface && !is_abstract) ||
+      (is_interface && major_gte_15 && (is_super || is_enum)) ||
+      (!is_interface && major_gte_15 && is_annotation)) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_ClassFormatError(),
+      "Illegal class modifiers in class %s: 0x%X",
+      _class_name->as_C_string(), flags
+    );
+    return;
+  }
+}
+
+bool ClassFileParser::has_illegal_visibility(jint flags) {
+  const bool is_public    = (flags & JVM_ACC_PUBLIC)    != 0;
+  const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0;
+  const bool is_private   = (flags & JVM_ACC_PRIVATE)   != 0;
+
+  return ((is_public && is_protected) ||
+          (is_public && is_private) ||
+          (is_protected && is_private));
+}
+
+bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
+  return (major >= JAVA_MIN_SUPPORTED_VERSION) && 
+         (major <= JAVA_MAX_SUPPORTED_VERSION) && 
+         ((major != JAVA_MAX_SUPPORTED_VERSION) || 
+          (minor <= JAVA_MAX_SUPPORTED_MINOR_VERSION));
+}
+
+void ClassFileParser::verify_legal_field_modifiers(
+    jint flags, bool is_interface, TRAPS) {
+  if (!_need_verify) { return; }
+
+  const bool is_public    = (flags & JVM_ACC_PUBLIC)    != 0;
+  const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0;
+  const bool is_private   = (flags & JVM_ACC_PRIVATE)   != 0;
+  const bool is_static    = (flags & JVM_ACC_STATIC)    != 0;
+  const bool is_final     = (flags & JVM_ACC_FINAL)     != 0;
+  const bool is_volatile  = (flags & JVM_ACC_VOLATILE)  != 0;
+  const bool is_transient = (flags & JVM_ACC_TRANSIENT) != 0;
+  const bool is_enum      = (flags & JVM_ACC_ENUM)      != 0;
+  const bool major_gte_15 = _major_version >= JAVA_1_5_VERSION;
+
+  bool is_illegal = false;
+
+  if (is_interface) {
+    if (!is_public || !is_static || !is_final || is_private || 
+        is_protected || is_volatile || is_transient || 
+        (major_gte_15 && is_enum)) {
+      is_illegal = true;
+    }
+  } else { // not interface
+    if (has_illegal_visibility(flags) || (is_final && is_volatile)) {
+      is_illegal = true;
+    }
+  }
+
+  if (is_illegal) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_ClassFormatError(),
+      "Illegal field modifiers in class %s: 0x%X",
+      _class_name->as_C_string(), flags);
+    return;
+  }
+}
+
+void ClassFileParser::verify_legal_method_modifiers(
+    jint flags, bool is_interface, symbolHandle name, TRAPS) {
+  if (!_need_verify) { return; }
+
+  const bool is_public       = (flags & JVM_ACC_PUBLIC)       != 0;
+  const bool is_private      = (flags & JVM_ACC_PRIVATE)      != 0;
+  const bool is_static       = (flags & JVM_ACC_STATIC)       != 0;
+  const bool is_final        = (flags & JVM_ACC_FINAL)        != 0;
+  const bool is_native       = (flags & JVM_ACC_NATIVE)       != 0;
+  const bool is_abstract     = (flags & JVM_ACC_ABSTRACT)     != 0;
+  const bool is_bridge       = (flags & JVM_ACC_BRIDGE)       != 0;
+  const bool is_strict       = (flags & JVM_ACC_STRICT)       != 0;
+  const bool is_synchronized = (flags & JVM_ACC_SYNCHRONIZED) != 0;
+  const bool major_gte_15    = _major_version >= JAVA_1_5_VERSION;
+  const bool is_initializer  = (name == vmSymbols::object_initializer_name());
+
+  bool is_illegal = false;
+
+  if (is_interface) {
+    if (!is_abstract || !is_public || is_static || is_final || 
+        is_native || (major_gte_15 && (is_synchronized || is_strict))) {
+      is_illegal = true;
+    }
+  } else { // not interface
+    if (is_initializer) {
+      if (is_static || is_final || is_synchronized || is_native || 
+          is_abstract || (major_gte_15 && is_bridge)) {
+        is_illegal = true;
+      }
+    } else { // not initializer
+      if (is_abstract) {
+        if ((is_final || is_native || is_private || is_static || 
+            (major_gte_15 && (is_synchronized || is_strict)))) {
+          is_illegal = true;
+        }
+      }
+      if (has_illegal_visibility(flags)) {
+        is_illegal = true;
+      }
+    }
+  }
+
+  if (is_illegal) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_ClassFormatError(),
+      "Method %s in class %s has illegal modifiers: 0x%X", 
+      name->as_C_string(), _class_name->as_C_string(), flags);
+    return;
+  }
+}
+
+void ClassFileParser::verify_legal_utf8(const unsigned char* buffer, int length, TRAPS) {
+  assert(_need_verify, "only called when _need_verify is true");
+  int i = 0;
+  int count = length >> 2;
+  for (int k=0; k<count; k++) {
+    unsigned char b0 = buffer[i];
+    unsigned char b1 = buffer[i+1];
+    unsigned char b2 = buffer[i+2];
+    unsigned char b3 = buffer[i+3];
+    // For an unsigned char v,
+    // (v | v - 1) is < 128 (highest bit 0) for 0 < v < 128;
+    // (v | v - 1) is >= 128 (highest bit 1) for v == 0 or v >= 128.
+    unsigned char res = b0 | b0 - 1 |
+                        b1 | b1 - 1 |
+                        b2 | b2 - 1 |
+                        b3 | b3 - 1;
+    if (res >= 128) break;
+    i += 4;
+  }
+  for(; i < length; i++) {
+    unsigned short c;
+    // no embedded zeros
+    guarantee_property((buffer[i] != 0), "Illegal UTF8 string in constant pool in class file %s", CHECK);
+    if(buffer[i] < 128) {
+      continue;
+    }
+    if ((i + 5) < length) { // see if it's legal supplementary character
+      if (UTF8::is_supplementary_character(&buffer[i])) {
+        c = UTF8::get_supplementary_character(&buffer[i]);
+        i += 5;
+        continue;
+      } 
+    }
+    switch (buffer[i] >> 4) {
+      default: break;
+      case 0x8: case 0x9: case 0xA: case 0xB: case 0xF:
+        classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", CHECK);
+      case 0xC: case 0xD:  // 110xxxxx  10xxxxxx
+        c = (buffer[i] & 0x1F) << 6;
+        i++;
+        if ((i < length) && ((buffer[i] & 0xC0) == 0x80)) {
+          c += buffer[i] & 0x3F;
+          if (_major_version <= 47 || c == 0 || c >= 0x80) {
+            // for classes with major > 47, c must a null or a character in its shortest form
+            break;
+          }
+        } 
+        classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", CHECK);
+      case 0xE:  // 1110xxxx 10xxxxxx 10xxxxxx
+        c = (buffer[i] & 0xF) << 12;
+        i += 2;
+        if ((i < length) && ((buffer[i-1] & 0xC0) == 0x80) && ((buffer[i] & 0xC0) == 0x80)) {
+          c += ((buffer[i-1] & 0x3F) << 6) + (buffer[i] & 0x3F);
+          if (_major_version <= 47 || c >= 0x800) {
+            // for classes with major > 47, c must be in its shortest form
+            break;
+          }
+        }
+        classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", CHECK);
+    }  // end of switch
+  } // end of for
+}
+
+// Checks if name is a legal class name.
+void ClassFileParser::verify_legal_class_name(symbolHandle name, TRAPS) {
+  if (!_need_verify || _relax_verify) { return; }
+
+  char buf[fixed_buffer_size];
+  char* bytes = name->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
+  unsigned int length = name->utf8_length();
+  bool legal = false;
+
+  if (length > 0) {
+    char* p;
+    if (bytes[0] == JVM_SIGNATURE_ARRAY) {
+      p = skip_over_field_signature(bytes, false, length, CHECK);
+      legal = (p != NULL) && ((p - bytes) == (int)length);
+    } else if (_major_version < JAVA_1_5_VERSION) {
+      if (bytes[0] != '<') {
+        p = skip_over_field_name(bytes, true, length);
+        legal = (p != NULL) && ((p - bytes) == (int)length);
+      }
+    } else {
+      // 4900761: relax the constraints based on JSR202 spec
+      // Class names may be drawn from the entire Unicode character set.
+      // Identifiers between '/' must be unqualified names.
+      // The utf8 string has been verified when parsing cpool entries.
+      legal = verify_unqualified_name(bytes, length, LegalClass);  
+    }
+  } 
+  if (!legal) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_ClassFormatError(),
+      "Illegal class name \"%s\" in class file %s", bytes,
+      _class_name->as_C_string()
+    );
+    return;
+  }
+}
+
+// Checks if name is a legal field name.
+void ClassFileParser::verify_legal_field_name(symbolHandle name, TRAPS) {
+  if (!_need_verify || _relax_verify) { return; }
+
+  char buf[fixed_buffer_size];
+  char* bytes = name->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
+  unsigned int length = name->utf8_length();
+  bool legal = false;
+
+  if (length > 0) {
+    if (_major_version < JAVA_1_5_VERSION) {
+      if (bytes[0] != '<') { 
+        char* p = skip_over_field_name(bytes, false, length);
+        legal = (p != NULL) && ((p - bytes) == (int)length);
+      }
+    } else {
+      // 4881221: relax the constraints based on JSR202 spec
+      legal = verify_unqualified_name(bytes, length, LegalField);
+    }
+  }
+
+  if (!legal) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_ClassFormatError(),
+      "Illegal field name \"%s\" in class %s", bytes,
+      _class_name->as_C_string()
+    );
+    return;
+  }
+}
+
+// Checks if name is a legal method name.
+void ClassFileParser::verify_legal_method_name(symbolHandle name, TRAPS) {
+  if (!_need_verify || _relax_verify) { return; }
+
+  assert(!name.is_null(), "method name is null");
+  char buf[fixed_buffer_size];
+  char* bytes = name->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
+  unsigned int length = name->utf8_length();
+  bool legal = false;
+
+  if (length > 0) {
+    if (bytes[0] == '<') {
+      if (name == vmSymbols::object_initializer_name() || name == vmSymbols::class_initializer_name()) {
+        legal = true;
+      }
+    } else if (_major_version < JAVA_1_5_VERSION) {
+      char* p;
+      p = skip_over_field_name(bytes, false, length);
+      legal = (p != NULL) && ((p - bytes) == (int)length);
+    } else {
+      // 4881221: relax the constraints based on JSR202 spec
+      legal = verify_unqualified_name(bytes, length, LegalMethod);
+    }
+  }
+
+  if (!legal) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_ClassFormatError(),
+      "Illegal method name \"%s\" in class %s", bytes,
+      _class_name->as_C_string()
+    );
+    return;
+  }
+}
+
+
+// Checks if signature is a legal field signature.
+void ClassFileParser::verify_legal_field_signature(symbolHandle name, symbolHandle signature, TRAPS) {
+  if (!_need_verify) { return; }
+
+  char buf[fixed_buffer_size];
+  char* bytes = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
+  unsigned int length = signature->utf8_length();
+  char* p = skip_over_field_signature(bytes, false, length, CHECK);
+
+  if (p == NULL || (p - bytes) != (int)length) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbolHandles::java_lang_ClassFormatError(),
+      "Field \"%s\" in class %s has illegal signature \"%s\"", 
+      name->as_C_string(), _class_name->as_C_string(), bytes
+    );
+    return;
+  }
+}
+
+// Checks if signature is a legal method signature.
+// Returns number of parameters
+int ClassFileParser::verify_legal_method_signature(symbolHandle name, symbolHandle signature, TRAPS) {
+  if (!_need_verify) {
+    // make sure caller's args_size will be less than 0 even for non-static
+    // method so it will be recomputed in compute_size_of_parameters().
+    return -2;
+  }
+
+  unsigned int args_size = 0;
+  char buf[fixed_buffer_size];
+  char* p = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
+  unsigned int length = signature->utf8_length();
+  char* nextp;
+
+  // The first character must be a '('
+  if ((length > 0) && (*p++ == JVM_SIGNATURE_FUNC)) {
+    length--;
+    // Skip over legal field signatures
+    nextp = skip_over_field_signature(p, false, length, CHECK_0);
+    while ((length > 0) && (nextp != NULL)) {
+      args_size++;
+      if (p[0] == 'J' || p[0] == 'D') {
+        args_size++;
+      }
+      length -= nextp - p;
+      p = nextp;
+      nextp = skip_over_field_signature(p, false, length, CHECK_0);
+    }
+    // The first non-signature thing better be a ')'
+    if ((length > 0) && (*p++ == JVM_SIGNATURE_ENDFUNC)) {
+      length--;
+      if (name->utf8_length() > 0 && name->byte_at(0) == '<') {
+        // All internal methods must return void
+        if ((length == 1) && (p[0] == JVM_SIGNATURE_VOID)) {
+          return args_size;
+        }
+      } else {
+        // Now we better just have a return value
+        nextp = skip_over_field_signature(p, true, length, CHECK_0);
+        if (nextp && ((int)length == (nextp - p))) {
+          return args_size;
+        }
+      }
+    }
+  }
+  // Report error
+  ResourceMark rm(THREAD);
+  Exceptions::fthrow(
+    THREAD_AND_LOCATION,
+    vmSymbolHandles::java_lang_ClassFormatError(),
+    "Method \"%s\" in class %s has illegal signature \"%s\"", 
+    name->as_C_string(),  _class_name->as_C_string(), p
+  );
+  return 0;
+}
+
+
+// Unqualified names may not contain the characters '.', ';', or '/'.
+// Method names also may not contain the characters '<' or '>', unless <init> or <clinit>.
+// Note that method names may not be <init> or <clinit> in this method.
+// Because these names have been checked as special cases before calling this method
+// in verify_legal_method_name.
+bool ClassFileParser::verify_unqualified_name(char* name, unsigned int length, int type) {
+  jchar ch;
+
+  for (char* p = name; p != name + length; ) {
+    ch = *p;
+    if (ch < 128) {
+      p++;
+      if (ch == '.' || ch == ';') {
+        return false;   // do not permit '.' or ';'
+      }
+      if (type != LegalClass && ch == '/') {
+        return false;   // do not permit '/' unless it's class name
+      }
+      if (type == LegalMethod && (ch == '<' || ch == '>')) {
+        return false;   // do not permit '<' or '>' in method names
+      }
+    } else {
+      char* tmp_p = UTF8::next(p, &ch);
+      p = tmp_p;
+    }
+  }
+  return true;
+}
+
+
+// Take pointer to a string. Skip over the longest part of the string that could 
+// be taken as a fieldname. Allow '/' if slash_ok is true.
+// Return a pointer to just past the fieldname. 
+// Return NULL if no fieldname at all was found, or in the case of slash_ok 
+// being true, we saw consecutive slashes (meaning we were looking for a 
+// qualified path but found something that was badly-formed).
+char* ClassFileParser::skip_over_field_name(char* name, bool slash_ok, unsigned int length) {
+  char* p;
+  jchar ch;                     
+  jboolean last_is_slash = false;            
+  jboolean not_first_ch = false; 
+
+  for (p = name; p != name + length; not_first_ch = true) {
+    char* old_p = p;
+    ch = *p;
+    if (ch < 128) {
+      p++;
+      // quick check for ascii
+      if ((ch >= 'a' && ch <= 'z') ||
+          (ch >= 'A' && ch <= 'Z') ||
+          (ch == '_' || ch == '$') ||
+          (not_first_ch && ch >= '0' && ch <= '9')) {
+        last_is_slash = false;
+        continue;
+      }
+      if (slash_ok && ch == '/') {
+        if (last_is_slash) {
+          return NULL;  // Don't permit consecutive slashes
+        }
+        last_is_slash = true;
+        continue;
+      }
+    } else {
+      jint unicode_ch;
+      char* tmp_p = UTF8::next_character(p, &unicode_ch);
+      p = tmp_p;
+      last_is_slash = false;
+      // Check if ch is Java identifier start or is Java identifier part
+      // 4672820: call java.lang.Character methods directly without generating separate tables.
+      EXCEPTION_MARK;
+      instanceKlassHandle klass (THREAD, SystemDictionary::char_klass());
+
+      // return value
+      JavaValue result(T_BOOLEAN);
+      // Set up the arguments to isJavaIdentifierStart and isJavaIdentifierPart
+      JavaCallArguments args;
+      args.push_int(unicode_ch);
+
+      // public static boolean isJavaIdentifierStart(char ch);
+      JavaCalls::call_static(&result,
+                             klass,
+                             vmSymbolHandles::isJavaIdentifierStart_name(), 
+                             vmSymbolHandles::int_bool_signature(),
+                             &args,
+                             THREAD);
+         
+      if (HAS_PENDING_EXCEPTION) {      
+        CLEAR_PENDING_EXCEPTION;
+        return 0;
+      }
+      if (result.get_jboolean()) {
+        continue;
+      }
+        
+      if (not_first_ch) {
+        // public static boolean isJavaIdentifierPart(char ch);
+        JavaCalls::call_static(&result,
+                               klass,
+                               vmSymbolHandles::isJavaIdentifierPart_name(), 
+                               vmSymbolHandles::int_bool_signature(),
+                               &args,
+                               THREAD);
+     
+        if (HAS_PENDING_EXCEPTION) {    
+          CLEAR_PENDING_EXCEPTION;
+          return 0;
+        }
+
+        if (result.get_jboolean()) {
+          continue;
+        }
+      }
+    }
+    return (not_first_ch) ? old_p : NULL;
+  }
+  return (not_first_ch) ? p : NULL;
+}
+
+
+// Take pointer to a string. Skip over the longest part of the string that could
+// be taken as a field signature. Allow "void" if void_ok.
+// Return a pointer to just past the signature. 
+// Return NULL if no legal signature is found.
+char* ClassFileParser::skip_over_field_signature(char* signature, 
+                                                 bool void_ok, 
+                                                 unsigned int length,
+                                                 TRAPS) {
+  unsigned int array_dim = 0;
+  while (length > 0) {
+    switch (signature[0]) {
+      case JVM_SIGNATURE_VOID: if (!void_ok) { return NULL; }
+      case JVM_SIGNATURE_BOOLEAN:
+      case JVM_SIGNATURE_BYTE:
+      case JVM_SIGNATURE_CHAR:
+      case JVM_SIGNATURE_SHORT:
+      case JVM_SIGNATURE_INT:
+      case JVM_SIGNATURE_FLOAT:
+      case JVM_SIGNATURE_LONG:
+      case JVM_SIGNATURE_DOUBLE:
+        return signature + 1;
+      case JVM_SIGNATURE_CLASS: {
+        if (_major_version < JAVA_1_5_VERSION) {
+          // Skip over the class name if one is there
+          char* p = skip_over_field_name(signature + 1, true, --length);
+        
+          // The next character better be a semicolon
+          if (p && (p - signature) > 1 && p[0] == ';') {
+            return p + 1;
+          }
+        } else {
+          // 4900761: For class version > 48, any unicode is allowed in class name.
+          length--; 
+          signature++; 
+          while (length > 0 && signature[0] != ';') {
+            if (signature[0] == '.') {
+              classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0);
+            }
+            length--; 
+            signature++; 
+          }            
+          if (signature[0] == ';') { return signature + 1; }
+        }
+            
+        return NULL;
+      }
+      case JVM_SIGNATURE_ARRAY:
+        array_dim++;
+        if (array_dim > 255) {
+          // 4277370: array descriptor is valid only if it represents 255 or fewer dimensions.
+          classfile_parse_error("Array type descriptor has more than 255 dimensions in class file %s", CHECK_0);
+        }
+        // The rest of what's there better be a legal signature
+        signature++;
+        length--;
+        void_ok = false;
+        break;
+
+      default:
+        return NULL;
+    }
+  }
+  return NULL;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,221 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)classFileParser.hpp	1.84 07/05/05 17:06:45 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// Parser for for .class files
+//
+// The bytes describing the class file structure is read from a Stream object
+
+class ClassFileParser VALUE_OBJ_CLASS_SPEC {
+ private:
+  bool _need_verify;
+  bool _relax_verify;  
+  u2   _major_version;
+  u2   _minor_version;
+  symbolHandle _class_name;
+
+  bool _has_finalizer;
+  bool _has_empty_finalizer;
+  bool _has_vanilla_constructor;
+
+  enum { fixed_buffer_size = 128 };
+  u_char _fixed_buffer[fixed_buffer_size];
+
+  ClassFileStream* _stream;              // Actual input stream
+
+  enum { LegalClass, LegalField, LegalMethod }; // used to verify unqualified names
+
+  // Accessors
+  ClassFileStream* stream()                        { return _stream; }
+  void set_stream(ClassFileStream* st)             { _stream = st; }
+
+  // Constant pool parsing
+  void parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS);
+
+  constantPoolHandle parse_constant_pool(TRAPS);
+
+  // Interface parsing
+  objArrayHandle parse_interfaces(constantPoolHandle cp,
+                                  int length,
+                                  Handle class_loader, 
+                                  Handle protection_domain,
+                                  PerfTraceTime* vmtimer,
+                                  symbolHandle class_name,
+                                  TRAPS);
+
+  // Field parsing
+  void parse_field_attributes(constantPoolHandle cp, u2 attributes_count,
+                              bool is_static, u2 signature_index, 
+                              u2* constantvalue_index_addr,
+                              bool* is_synthetic_addr, 
+                              u2* generic_signature_index_addr,
+                              typeArrayHandle* field_annotations, TRAPS);
+  typeArrayHandle parse_fields(constantPoolHandle cp, bool is_interface, 
+                               struct FieldAllocationCount *fac,
+                               objArrayHandle* fields_annotations, TRAPS);
+
+  // Method parsing
+  methodHandle parse_method(constantPoolHandle cp, bool is_interface, 
+                            AccessFlags* promoted_flags,
+                            typeArrayHandle* method_annotations,
+                            typeArrayHandle* method_parameter_annotations,
+                            typeArrayHandle* method_default_annotations,
+                            TRAPS);
+  objArrayHandle parse_methods (constantPoolHandle cp, bool is_interface, 
+                                AccessFlags* promoted_flags,
+                                bool* has_final_method,
+                                objArrayOop* methods_annotations_oop,
+                                objArrayOop* methods_parameter_annotations_oop,
+                                objArrayOop* methods_default_annotations_oop,
+                                TRAPS);
+  typeArrayHandle sort_methods (objArrayHandle methods,
+                                objArrayHandle methods_annotations,
+                                objArrayHandle methods_parameter_annotations,
+                                objArrayHandle methods_default_annotations,
+                                TRAPS);
+  typeArrayHandle parse_exception_table(u4 code_length, u4 exception_table_length, 
+                                        constantPoolHandle cp, TRAPS);
+  u_char* parse_linenumber_table(u4 code_attribute_length, u4 code_length,
+                                 int* compressed_linenumber_table_size, TRAPS);
+  u2* parse_localvariable_table(u4 code_length, u2 max_locals, u4 code_attribute_length,
+                                constantPoolHandle cp, u2* localvariable_table_length,
+                                bool isLVTT, TRAPS);
+  u2* parse_checked_exceptions(u2* checked_exceptions_length, u4 method_attribute_length,
+                               constantPoolHandle cp, TRAPS);
+  void parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
+                        u1* u1_array, u2* u2_array, constantPoolHandle cp, TRAPS);
+  typeArrayOop parse_stackmap_table(u4 code_attribute_length, TRAPS);
+
+  // Classfile attribute parsing
+  void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+  void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, 
+                                                instanceKlassHandle k, int length, TRAPS);
+  u2   parse_classfile_inner_classes_attribute(constantPoolHandle cp, 
+                                               instanceKlassHandle k, TRAPS);
+  void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+  void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+  void parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
+  
+  // Annotations handling
+  typeArrayHandle assemble_annotations(u1* runtime_visible_annotations,
+                                       int runtime_visible_annotations_length,
+                                       u1* runtime_invisible_annotations,
+                                       int runtime_invisible_annotations_length, TRAPS);
+
+  // Final setup
+  int  compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_count, 
+                            int first_nonstatic_oop_offset);
+  void fill_oop_maps(instanceKlassHandle k, int nonstatic_oop_map_count, 
+                     u2* nonstatic_oop_offsets, u2* nonstatic_oop_length);
+  void set_precomputed_flags(instanceKlassHandle k);
+  objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, 
+                                               objArrayHandle local_ifs, TRAPS);
+
+  // Special handling for certain classes.
+  // Add the "discovered" field to java.lang.ref.Reference if
+  // it does not exist.
+  void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr, 
+    constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS);
+  // Adjust the field allocation counts for java.lang.Class to add
+  // fake fields.
+  void java_lang_Class_fix_pre(objArrayHandle* methods_ptr,
+    FieldAllocationCount *fac_ptr, TRAPS);
+  // Adjust the next_nonstatic_oop_offset to place the fake fields
+  // before any Java fields.
+  void java_lang_Class_fix_post(int* next_nonstatic_oop_offset);
+
+  // Format checker methods
+  void classfile_parse_error(const char* msg, TRAPS);
+  void classfile_parse_error(const char* msg, int index, TRAPS);
+  void classfile_parse_error(const char* msg, const char *name, TRAPS);
+  void classfile_parse_error(const char* msg, int index, const char *name, TRAPS);
+  inline void guarantee_property(bool b, const char* msg, TRAPS) {
+    if (!b) { classfile_parse_error(msg, CHECK); }
+  }
+
+  inline void assert_property(bool b, const char* msg, TRAPS) {
+#ifdef ASSERT
+    if (!b) { fatal(msg); }
+#endif
+  }
+
+  inline void check_property(bool property, const char* msg, int index, TRAPS) {
+    if (_need_verify) {
+      guarantee_property(property, msg, index, CHECK);
+    } else {
+      assert_property(property, msg, CHECK);
+    }
+  }
+  inline void guarantee_property(bool b, const char* msg, int index, TRAPS) {
+    if (!b) { classfile_parse_error(msg, index, CHECK); }
+  }
+  inline void guarantee_property(bool b, const char* msg, const char *name, TRAPS) {
+    if (!b) { classfile_parse_error(msg, name, CHECK); }
+  }
+  inline void guarantee_property(bool b, const char* msg, int index, const char *name, TRAPS) {
+    if (!b) { classfile_parse_error(msg, index, name, CHECK); }
+  }
+
+  bool is_supported_version(u2 major, u2 minor);
+  bool has_illegal_visibility(jint flags);
+
+  void verify_constantvalue(int constantvalue_index, int signature_index, constantPoolHandle cp, TRAPS);
+  void verify_legal_utf8(const unsigned char* buffer, int length, TRAPS);
+  void verify_legal_class_name(symbolHandle name, TRAPS);
+  void verify_legal_field_name(symbolHandle name, TRAPS);
+  void verify_legal_method_name(symbolHandle name, TRAPS);
+  void verify_legal_field_signature(symbolHandle fieldname, symbolHandle signature, TRAPS);
+  int  verify_legal_method_signature(symbolHandle methodname, symbolHandle signature, TRAPS);
+  void verify_legal_class_modifiers(jint flags, TRAPS);
+  void verify_legal_field_modifiers(jint flags, bool is_interface, TRAPS);
+  void verify_legal_method_modifiers(jint flags, bool is_interface, symbolHandle name, TRAPS);
+  bool verify_unqualified_name(char* name, unsigned int length, int type);
+  char* skip_over_field_name(char* name, bool slash_ok, unsigned int length);
+  char* skip_over_field_signature(char* signature, bool void_ok, unsigned int length, TRAPS);
+
+ public:
+  // Constructor
+  ClassFileParser(ClassFileStream* st) { set_stream(st); }
+
+  // Parse .class file and return new klassOop. The klassOop is not hooked up
+  // to the system dictionary or any other structures, so a .class file can 
+  // be loaded several times if desired. 
+  // The system dictionary hookup is done by the caller.
+  //
+  // "parsed_name" is updated by this method, and is the name found
+  // while parsing the stream.
+  instanceKlassHandle parseClassFile(symbolHandle name, 
+                                     Handle class_loader, 
+                                     Handle protection_domain, 
+                                     symbolHandle& parsed_name,
+                                     TRAPS);
+
+  // Verifier checks
+  static void check_super_class_access(instanceKlassHandle this_klass, TRAPS);
+  static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS);
+  static void check_final_method_override(instanceKlassHandle this_klass, TRAPS);
+  static void check_illegal_static_method(instanceKlassHandle this_klass, TRAPS);
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classFileStream.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,97 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)classFileStream.cpp	1.40 07/05/05 17:06:44 JVM"
+#endif
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_classFileStream.cpp.incl"
+
+void ClassFileStream::truncated_file_error(TRAPS) {
+  THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
+}
+
+ClassFileStream::ClassFileStream(u1* buffer, int length, char* source) {
+  _buffer_start = buffer;
+  _buffer_end   = buffer + length;
+  _current      = buffer;
+  _source       = source;
+  _need_verify  = false;
+}
+
+u1 ClassFileStream::get_u1(TRAPS) {
+  if (_need_verify) {
+    check_truncated_file(_current + 1 > _buffer_end, CHECK_0);
+  } else {
+    assert(_current + 1 <= _buffer_end, "buffer overflow");
+  }
+  return *_current++;
+}
+
+u2 ClassFileStream::get_u2(TRAPS) {
+  if (_need_verify) {
+    check_truncated_file(_current + 2 > _buffer_end, CHECK_0);
+  } else {
+    assert(_current + 2 <= _buffer_end, "buffer overflow");
+  }
+  u1* tmp = _current;
+  _current += 2;
+  return Bytes::get_Java_u2(tmp);
+}
+
+u4 ClassFileStream::get_u4(TRAPS) {
+  if (_need_verify) {
+    check_truncated_file(_current + 4 > _buffer_end, CHECK_0);
+  } else {
+    assert(_current + 4 <= _buffer_end, "buffer overflow");
+  }
+  u1* tmp = _current;
+  _current += 4;
+  return Bytes::get_Java_u4(tmp);
+}
+
+u8 ClassFileStream::get_u8(TRAPS) {
+  if (_need_verify) {
+    check_truncated_file(_current + 8 > _buffer_end, CHECK_0);
+  } else {
+    assert(_current + 8 <= _buffer_end, "buffer overflow");
+  }
+  u1* tmp = _current;
+  _current += 8;
+  return Bytes::get_Java_u8(tmp);
+}
+
+void ClassFileStream::skip_u1(int length, TRAPS) {
+  if (_need_verify) {
+    check_truncated_file(_current + length > _buffer_end, CHECK);
+  } 
+  _current += length;
+}
+
+void ClassFileStream::skip_u2(int length, TRAPS) {
+  if (_need_verify) {
+    check_truncated_file(_current + length * 2 > _buffer_end, CHECK);
+  } 
+  _current += length * 2;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classFileStream.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,119 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)classFileStream.hpp	1.32 07/05/05 17:06:44 JVM"
+#endif
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// Input stream for reading .class file
+//
+// The entire input stream is present in a buffer allocated by the caller.
+// The caller is responsible for deallocating the buffer and for using
+// ResourceMarks appropriately when constructing streams.
+
+class ClassFileStream: public ResourceObj {
+ private:
+  u1*   _buffer_start; // Buffer bottom
+  u1*   _buffer_end;   // Buffer top (one past last element)
+  u1*   _current;      // Current buffer position
+  char* _source;       // Source of stream (directory name, ZIP/JAR archive name)
+  bool  _need_verify;  // True if verification is on for the class file
+
+  void truncated_file_error(TRAPS);
+ public:
+  // Constructor
+  ClassFileStream(u1* buffer, int length, char* source);
+
+  // Buffer access
+  u1* buffer() const           { return _buffer_start; }
+  int length() const           { return _buffer_end - _buffer_start; }
+  u1* current() const          { return _current; }
+  void set_current(u1* pos)    { _current = pos; }
+  char* source() const         { return _source; }
+  void set_verify(bool flag)   { _need_verify = flag; }
+
+  void check_truncated_file(bool b, TRAPS) {
+    if (b) {
+      truncated_file_error(THREAD);
+    }
+  }
+
+  void guarantee_more(int size, TRAPS) {
+    check_truncated_file(_current + size > _buffer_end, CHECK);
+  }
+
+  // Read u1 from stream
+  u1 get_u1(TRAPS);
+  u1 get_u1_fast() {
+    return *_current++;
+  }
+
+  // Read u2 from stream
+  u2 get_u2(TRAPS);
+  u2 get_u2_fast() {
+    u2 res = Bytes::get_Java_u2(_current);
+    _current += 2;
+    return res;
+  }
+
+  // Read u4 from stream
+  u4 get_u4(TRAPS);
+  u4 get_u4_fast() {
+    u4 res = Bytes::get_Java_u4(_current);
+    _current += 4;
+    return res;
+  }
+
+  // Read u8 from stream
+  u8 get_u8(TRAPS);
+  u8 get_u8_fast() {
+    u8 res = Bytes::get_Java_u8(_current);
+    _current += 8;
+    return res;
+  }
+
+  // Get direct pointer into stream at current position. 
+  // Returns NULL if length elements are not remaining. The caller is 
+  // responsible for calling skip below if buffer contents is used.
+  u1* get_u1_buffer() {
+    return _current;
+  }
+
+  u2* get_u2_buffer() {
+    return (u2*) _current;
+  }
+
+  // Skip length u1 or u2 elements from stream
+  void skip_u1(int length, TRAPS);
+  void skip_u1_fast(int length) {
+    _current += length;
+  }
+
+  void skip_u2(int length, TRAPS);
+  void skip_u2_fast(int length) {
+    _current += 2 * length;
+  }
+
+  // Tells whether eos is reached
+  bool at_eos() const          { return _current == _buffer_end; }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,1260 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)classLoader.cpp	1.186 07/05/05 17:06:44 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_classLoader.cpp.incl"
+
+
+// Entry points in zip.dll for loading zip/jar file entries
+
+typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
+typedef void (JNICALL *ZipClose_t)(jzfile *zip);
+typedef jzentry* (JNICALL *FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
+typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
+typedef jboolean (JNICALL *ReadMappedEntry_t)(jzfile *zip, jzentry *entry, unsigned char **buf, char *namebuf);
+typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
+
+static ZipOpen_t         ZipOpen            = NULL;
+static ZipClose_t        ZipClose           = NULL;
+static FindEntry_t       FindEntry          = NULL;
+static ReadEntry_t       ReadEntry          = NULL;
+static ReadMappedEntry_t ReadMappedEntry    = NULL;
+static GetNextEntry_t    GetNextEntry       = NULL;
+static canonicalize_fn_t CanonicalizeEntry  = NULL;
+
+// Globals
+
+PerfCounter*    ClassLoader::_perf_accumulated_time = NULL;
+PerfCounter*    ClassLoader::_perf_classes_inited = NULL;
+PerfCounter*    ClassLoader::_perf_class_init_time = NULL;
+PerfCounter*    ClassLoader::_perf_class_verify_time = NULL;
+PerfCounter*    ClassLoader::_perf_classes_linked = NULL;
+PerfCounter*    ClassLoader::_perf_class_link_time = NULL;
+PerfCounter*    ClassLoader::_sync_systemLoaderLockContentionRate = NULL;
+PerfCounter*    ClassLoader::_sync_nonSystemLoaderLockContentionRate = NULL;
+PerfCounter*    ClassLoader::_sync_JVMFindLoadedClassLockFreeCounter = NULL;
+PerfCounter*    ClassLoader::_sync_JVMDefineClassLockFreeCounter = NULL;
+PerfCounter*    ClassLoader::_sync_JNIDefineClassLockFreeCounter = NULL;
+PerfCounter*    ClassLoader::_unsafe_defineClassCallCounter = NULL;
+PerfCounter*    ClassLoader::_isUnsyncloadClass = NULL;
+PerfCounter*    ClassLoader::_load_instance_class_failCounter = NULL;
+
+ClassPathEntry* ClassLoader::_first_entry         = NULL;
+ClassPathEntry* ClassLoader::_last_entry          = NULL;
+PackageHashtable* ClassLoader::_package_hash_table = NULL;
+
+// helper routines
+bool string_starts_with(const char* str, const char* str_to_find) {
+  size_t str_len = strlen(str);
+  size_t str_to_find_len = strlen(str_to_find);
+  if (str_to_find_len > str_len) {
+    return false;
+  }
+  return (strncmp(str, str_to_find, str_to_find_len) == 0);
+}
+
+bool string_ends_with(const char* str, const char* str_to_find) {
+  size_t str_len = strlen(str);
+  size_t str_to_find_len = strlen(str_to_find);
+  if (str_to_find_len > str_len) {
+    return false;
+  }
+  return (strncmp(str + (str_len - str_to_find_len), str_to_find, str_to_find_len) == 0);
+}
+
+
+MetaIndex::MetaIndex(char** meta_package_names, int num_meta_package_names) {
+  if (num_meta_package_names == 0) {
+    _meta_package_names = NULL;
+    _num_meta_package_names = 0;
+  } else {
+    _meta_package_names = NEW_C_HEAP_ARRAY(char*, num_meta_package_names);
+    _num_meta_package_names = num_meta_package_names;
+    memcpy(_meta_package_names, meta_package_names, num_meta_package_names * sizeof(char*));
+  }
+}
+
+
+MetaIndex::~MetaIndex() {
+  FREE_C_HEAP_ARRAY(char*, _meta_package_names);
+}
+
+
+bool MetaIndex::may_contain(const char* class_name) {
+  if ( _num_meta_package_names == 0) {
+    return false;
+  }
+  size_t class_name_len = strlen(class_name);
+  for (int i = 0; i < _num_meta_package_names; i++) {
+    char* pkg = _meta_package_names[i];
+    size_t pkg_len = strlen(pkg);
+    size_t min_len = MIN2(class_name_len, pkg_len);
+    if (!strncmp(class_name, pkg, min_len)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+ClassPathEntry::ClassPathEntry() {
+  set_next(NULL);
+}
+
+
+bool ClassPathEntry::is_lazy() {
+  return false;
+}
+
+ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
+  _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1);
+  strcpy(_dir, dir);
+}
+
+
+ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
+  // construct full path name
+  char path[JVM_MAXPATHLEN];
+  if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
+    return NULL;
+  }
+  // check if file exists
+  struct stat st;
+  if (os::stat(path, &st) == 0) {
+    // found file, open it
+    int file_handle = hpi::open(path, 0, 0);
+    if (file_handle != -1) {
+      // read contents into resource array
+      u1* buffer = NEW_RESOURCE_ARRAY(u1, st.st_size);
+      size_t num_read = os::read(file_handle, (char*) buffer, st.st_size);
+      // close file
+      hpi::close(file_handle);
+      // construct ClassFileStream
+      if (num_read == (size_t)st.st_size) {
+        return new ClassFileStream(buffer, st.st_size, _dir);    // Resource allocated
+      }
+    }
+  }
+  return NULL;
+}
+
+
+ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() {
+  _zip = zip;
+  _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1);
+  strcpy(_zip_name, zip_name);
+}
+
+ClassPathZipEntry::~ClassPathZipEntry() {
+  if (ZipClose != NULL) {
+    (*ZipClose)(_zip);
+  }
+  FREE_C_HEAP_ARRAY(char, _zip_name);
+}
+
+ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
+  // enable call to C land
+  JavaThread* thread = JavaThread::current();
+  ThreadToNativeFromVM ttn(thread);
+  // check whether zip archive contains name
+  jint filesize, name_len;
+  jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len);
+  if (entry == NULL) return NULL;
+  u1* buffer;
+  char name_buf[128];
+  char* filename;
+  if (name_len < 128) {
+    filename = name_buf;
+  } else {
+    filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
+  }
+
+  // file found, get pointer to class in mmaped jar file.
+  if (ReadMappedEntry == NULL ||
+      !(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
+      // mmaped access not available, perhaps due to compression,
+      // read contents into resource array
+      buffer     = NEW_RESOURCE_ARRAY(u1, filesize);
+      if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
+  }
+  // return result
+  return new ClassFileStream(buffer, filesize, _zip_name);    // Resource allocated
+}
+
+// invoke function for each entry in the zip file
+void ClassPathZipEntry::contents_do(void f(const char* name, void* context), void* context) {
+  JavaThread* thread = JavaThread::current();
+  HandleMark  handle_mark(thread);
+  ThreadToNativeFromVM ttn(thread);  
+  for (int n = 0; ; n++) {
+    jzentry * ze = ((*GetNextEntry)(_zip, n));
+    if (ze == NULL) break;
+    (*f)(ze->name, context);
+  }
+}
+
+LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() {
+  _path = strdup(path);
+  _st = st;
+  _meta_index = NULL;
+  _resolved_entry = NULL;
+}
+
+bool LazyClassPathEntry::is_jar_file() {
+  return ((_st.st_mode & S_IFREG) == S_IFREG);
+}
+
+ClassPathEntry* LazyClassPathEntry::resolve_entry() {
+  if (_resolved_entry != NULL) {
+    return (ClassPathEntry*) _resolved_entry;
+  }
+  ClassPathEntry* new_entry = NULL;
+  ClassLoader::create_class_path_entry(_path, _st, &new_entry, false);
+  assert(new_entry != NULL, "earlier code should have caught this");
+  {
+    ThreadCritical tc;
+    if (_resolved_entry == NULL) {
+      _resolved_entry = new_entry;
+      return new_entry;
+    }
+  }
+  assert(_resolved_entry != NULL, "bug in MT-safe resolution logic");
+  delete new_entry;
+  return (ClassPathEntry*) _resolved_entry;
+}
+
+ClassFileStream* LazyClassPathEntry::open_stream(const char* name) {
+  if (_meta_index != NULL &&
+      !_meta_index->may_contain(name)) {
+    return NULL;
+  }
+  return resolve_entry()->open_stream(name);
+}
+
+bool LazyClassPathEntry::is_lazy() {
+  return true;
+}
+
+static void print_meta_index(LazyClassPathEntry* entry, 
+                             GrowableArray<char*>& meta_packages) {
+  tty->print("[Meta index for %s=", entry->name());
+  for (int i = 0; i < meta_packages.length(); i++) {
+    if (i > 0) tty->print(" ");
+    tty->print(meta_packages.at(i));
+  }
+  tty->print_cr("]");
+}
+
+
+void ClassLoader::setup_meta_index() {
+  // Set up meta index which allows us to open boot jars lazily if
+  // class data sharing is enabled
+  const char* known_version = "% VERSION 2";
+  char* meta_index_path = Arguments::get_meta_index_path();
+  char* meta_index_dir  = Arguments::get_meta_index_dir();
+  FILE* file = fopen(meta_index_path, "r");
+  int line_no = 0;
+  if (file != NULL) {
+    ResourceMark rm;
+    LazyClassPathEntry* cur_entry = NULL;
+    GrowableArray<char*> boot_class_path_packages(10);
+    char package_name[256];
+    bool skipCurrentJar = false;
+    while (fgets(package_name, sizeof(package_name), file) != NULL) {
+      ++line_no;
+      // Remove trailing newline
+      package_name[strlen(package_name) - 1] = '\0';
+      switch(package_name[0]) {
+        case '%':
+        {
+          if ((line_no == 1) && (strcmp(package_name, known_version) != 0)) {
+            if (TraceClassLoading && Verbose) {  
+              tty->print("[Unsupported meta index version]");
+            }
+            fclose(file);
+            return;
+          }
+        }
+
+        // These directives indicate jar files which contain only
+        // classes, only non-classfile resources, or a combination of
+        // the two. See src/share/classes/sun/misc/MetaIndex.java and
+        // make/tools/MetaIndex/BuildMetaIndex.java in the J2SE
+        // workspace.
+        case '#':
+        case '!':
+        case '@':
+        {
+          // Hand off current packages to current lazy entry (if any)
+          if ((cur_entry != NULL) &&
+              (boot_class_path_packages.length() > 0)) {
+            if (TraceClassLoading && Verbose) {  
+              print_meta_index(cur_entry, boot_class_path_packages);
+            }
+            MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
+                                             boot_class_path_packages.length());
+            cur_entry->set_meta_index(index);
+          }         
+          cur_entry = NULL;
+          boot_class_path_packages.clear();
+
+          // Find lazy entry corresponding to this jar file
+          for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) {
+            if (entry->is_lazy() &&
+                string_starts_with(entry->name(), meta_index_dir) &&
+                string_ends_with(entry->name(), &package_name[2])) {
+              cur_entry = (LazyClassPathEntry*) entry;
+              break;
+            }
+          }
+   
+          // If the first character is '@', it indicates the following jar
+          // file is a resource only jar file in which case, we should skip
+          // reading the subsequent entries since the resource loading is
+          // totally handled by J2SE side.
+          if (package_name[0] == '@') {
+            if (cur_entry != NULL) {
+              cur_entry->set_meta_index(new MetaIndex(NULL, 0));
+            }
+            cur_entry = NULL;
+            skipCurrentJar = true;
+          } else {
+            skipCurrentJar = false;
+          }
+  
+          break;
+        }
+
+        default:
+        {
+          if (!skipCurrentJar && cur_entry != NULL) {
+            char* new_name = strdup(package_name);
+            boot_class_path_packages.append(new_name);
+          }
+        }
+      }
+    }
+    // Hand off current packages to current lazy entry (if any)
+    if ((cur_entry != NULL) &&
+        (boot_class_path_packages.length() > 0)) {
+      if (TraceClassLoading && Verbose) {  
+        print_meta_index(cur_entry, boot_class_path_packages);
+      }
+      MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
+                                       boot_class_path_packages.length());
+      cur_entry->set_meta_index(index);
+    }          
+    fclose(file);
+  }
+}
+
+void ClassLoader::setup_bootstrap_search_path() {
+  assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
+  char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
+  if (TraceClassLoading && Verbose) {  
+    tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
+  }
+
+  int len = (int)strlen(sys_class_path);
+  int end = 0;
+
+  // Iterate over class path entries
+  for (int start = 0; start < len; start = end) {
+    while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
+      end++;
+    }
+    char* path = NEW_C_HEAP_ARRAY(char, end-start+1);
+    strncpy(path, &sys_class_path[start], end-start);
+    path[end-start] = '\0';
+    update_class_path_entry_list(path);
+    FREE_C_HEAP_ARRAY(char, path);
+    while (sys_class_path[end] == os::path_separator()[0]) {
+      end++;
+    }
+  }
+}
+
+void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) {
+  JavaThread* thread = JavaThread::current();
+  if (lazy) {
+    *new_entry = new LazyClassPathEntry(path, st);
+    return;
+  }
+  if ((st.st_mode & S_IFREG) == S_IFREG) {
+    // Regular file, should be a zip file
+    // Canonicalized filename
+    char canonical_path[JVM_MAXPATHLEN];
+    if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
+      // This matches the classic VM
+      EXCEPTION_MARK;
+      THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");          
+    }
+    char* error_msg = NULL;
+    jzfile* zip;
+    {
+      // enable call to C land
+      ThreadToNativeFromVM ttn(thread);
+      HandleMark hm(thread);
+      zip = (*ZipOpen)(canonical_path, &error_msg);
+    }
+    if (zip != NULL && error_msg == NULL) {
+      *new_entry = new ClassPathZipEntry(zip, path);
+      if (TraceClassLoading) {
+        tty->print_cr("[Opened %s]", path);
+      }
+    } else { 
+      ResourceMark rm(thread);
+      char *msg;
+      if (error_msg == NULL) {
+        msg = NEW_RESOURCE_ARRAY(char, strlen(path) + 128); ;
+        jio_snprintf(msg, strlen(path) + 127, "error in opening JAR file %s", path);
+      } else {
+        int len = (int)(strlen(path) + strlen(error_msg) + 128);
+        msg = NEW_RESOURCE_ARRAY(char, len); ;
+        jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
+      }
+      EXCEPTION_MARK;
+      THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);          
+    } 
+  } else {
+    // Directory
+    *new_entry = new ClassPathDirEntry(path);
+    if (TraceClassLoading) {
+      tty->print_cr("[Path %s]", path);
+    }
+  }      
+}
+
+
+// Create a class path zip entry for a given path (return NULL if not found
+// or zip/JAR file cannot be opened)
+ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path) {
+  // check for a regular file
+  struct stat st;
+  if (os::stat(path, &st) == 0) {
+    if ((st.st_mode & S_IFREG) == S_IFREG) {	        
+      char orig_path[JVM_MAXPATHLEN];
+      char canonical_path[JVM_MAXPATHLEN];
+      
+      strcpy(orig_path, path);
+      if (get_canonical_path(orig_path, canonical_path, JVM_MAXPATHLEN)) {
+        char* error_msg = NULL;
+	jzfile* zip;
+	{
+	  // enable call to C land
+	  JavaThread* thread = JavaThread::current();
+	  ThreadToNativeFromVM ttn(thread);
+	  HandleMark hm(thread);
+	  zip = (*ZipOpen)(canonical_path, &error_msg);
+	}
+	if (zip != NULL && error_msg == NULL) {
+	  // create using canonical path
+          return new ClassPathZipEntry(zip, canonical_path);
+	}
+      }
+    }
+  }
+  return NULL;
+}
+
+// returns true if entry already on class path
+bool ClassLoader::contains_entry(ClassPathEntry *entry) {
+  ClassPathEntry* e = _first_entry;
+  while (e != NULL) {
+    // assume zip entries have been canonicalized
+    if (strcmp(entry->name(), e->name()) == 0) {   
+      return true;
+    }
+    e = e->next();
+  }
+  return false;
+}
+
+void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
+  if (new_entry != NULL) {
+    if (_last_entry == NULL) {
+      _first_entry = _last_entry = new_entry;
+    } else {
+      _last_entry->set_next(new_entry);
+      _last_entry = new_entry;
+    }
+  }
+}
+
+void ClassLoader::update_class_path_entry_list(const char *path) {
+  struct stat st;
+  if (os::stat((char *)path, &st) == 0) {
+    // File or directory found
+    ClassPathEntry* new_entry = NULL;
+    create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
+    // Add new entry to linked list 
+    add_to_list(new_entry);
+  }
+}
+
+void ClassLoader::load_zip_library() {
+  assert(ZipOpen == NULL, "should not load zip library twice");
+  // First make sure native library is loaded
+  os::native_java_library();
+  // Load zip library
+  char path[JVM_MAXPATHLEN];
+  char ebuf[1024];
+  hpi::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "zip");
+  void* handle = hpi::dll_load(path, ebuf, sizeof ebuf);
+  if (handle == NULL) {
+    vm_exit_during_initialization("Unable to load ZIP library", path);
+  }
+  // Lookup zip entry points
+  ZipOpen      = CAST_TO_FN_PTR(ZipOpen_t, hpi::dll_lookup(handle, "ZIP_Open"));
+  ZipClose     = CAST_TO_FN_PTR(ZipClose_t, hpi::dll_lookup(handle, "ZIP_Close"));
+  FindEntry    = CAST_TO_FN_PTR(FindEntry_t, hpi::dll_lookup(handle, "ZIP_FindEntry"));
+  ReadEntry    = CAST_TO_FN_PTR(ReadEntry_t, hpi::dll_lookup(handle, "ZIP_ReadEntry"));
+  ReadMappedEntry = CAST_TO_FN_PTR(ReadMappedEntry_t, hpi::dll_lookup(handle, "ZIP_ReadMappedEntry"));
+  GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, hpi::dll_lookup(handle, "ZIP_GetNextEntry"));
+
+  // ZIP_Close is not exported on Windows in JDK5.0 so don't abort if ZIP_Close is NULL
+  if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL || GetNextEntry == NULL) {
+    vm_exit_during_initialization("Corrupted ZIP library", path);
+  }
+
+  // Lookup canonicalize entry in libjava.dll  
+  void *javalib_handle = os::native_java_library();
+  CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, hpi::dll_lookup(javalib_handle, "Canonicalize"));
+  // This lookup only works on 1.3. Do not check for non-null here
+}
+
+// PackageInfo data exists in order to support the java.lang.Package
+// class.  A Package object provides information about a java package
+// (version, vendor, etc.) which originates in the manifest of the jar
+// file supplying the package.  For application classes, the ClassLoader
+// object takes care of this.
+
+// For system (boot) classes, the Java code in the Package class needs
+// to be able to identify which source jar file contained the boot
+// class, so that it can extract the manifest from it.  This table
+// identifies java packages with jar files in the boot classpath.
+
+// Because the boot classpath cannot change, the classpath index is
+// sufficient to identify the source jar file or directory.  (Since
+// directories have no manifests, the directory name is not required,
+// but is available.)
+
+// When using sharing -- the pathnames of entries in the boot classpath
+// may not be the same at runtime as they were when the archive was
+// created (NFS, Samba, etc.).  The actual files and directories named
+// in the classpath must be the same files, in the same order, even
+// though the exact name is not the same.
+
+class PackageInfo: public BasicHashtableEntry {
+public:
+  const char* _pkgname;       // Package name
+  int _classpath_index;	      // Index of directory or JAR file loaded from
+
+  PackageInfo* next() {
+    return (PackageInfo*)BasicHashtableEntry::next();
+  }
+
+  const char* pkgname()           { return _pkgname; }
+  void set_pkgname(char* pkgname) { _pkgname = pkgname; }
+
+  const char* filename() {
+    return ClassLoader::classpath_entry(_classpath_index)->name();
+  }
+
+  void set_index(int index) {
+    _classpath_index = index;
+  }
+};
+
+
+class PackageHashtable : public BasicHashtable {
+private:
+  inline unsigned int compute_hash(const char *s, int n) {
+    unsigned int val = 0;
+    while (--n >= 0) {
+      val = *s++ + 31 * val;
+    }
+    return val;
+  }
+
+  PackageInfo* bucket(int index) {
+    return (PackageInfo*)BasicHashtable::bucket(index);
+  }
+
+  PackageInfo* get_entry(int index, unsigned int hash,
+                         const char* pkgname, size_t n) {
+    for (PackageInfo* pp = bucket(index); pp != NULL; pp = pp->next()) {
+      if (pp->hash() == hash &&
+          strncmp(pkgname, pp->pkgname(), n) == 0 &&
+          pp->pkgname()[n] == '\0') {
+        return pp;
+      }
+    }
+    return NULL;
+  }
+
+public:
+  PackageHashtable(int table_size)
+    : BasicHashtable(table_size, sizeof(PackageInfo)) {}
+
+  PackageHashtable(int table_size, HashtableBucket* t, int number_of_entries)
+    : BasicHashtable(table_size, sizeof(PackageInfo), t, number_of_entries) {}
+
+  PackageInfo* get_entry(const char* pkgname, int n) {
+    unsigned int hash = compute_hash(pkgname, n);
+    return get_entry(hash_to_index(hash), hash, pkgname, n);
+  }
+
+  PackageInfo* new_entry(char* pkgname, int n) {
+    unsigned int hash = compute_hash(pkgname, n);
+    PackageInfo* pp;
+    pp = (PackageInfo*)BasicHashtable::new_entry(hash);
+    pp->set_pkgname(pkgname);
+    return pp;
+  }
+
+  void add_entry(PackageInfo* pp) {
+    int index = hash_to_index(pp->hash());
+    BasicHashtable::add_entry(index, pp);
+  }
+
+  void copy_pkgnames(const char** packages) {
+    int n = 0;
+    for (int i = 0; i < table_size(); ++i) {
+      for (PackageInfo* pp = bucket(i); pp != NULL; pp = pp->next()) {
+        packages[n++] = pp->pkgname();
+      }
+    }
+    assert(n == number_of_entries(), "just checking");
+  }
+
+  void copy_table(char** top, char* end, PackageHashtable* table);
+};
+
+
+void PackageHashtable::copy_table(char** top, char* end,
+                                  PackageHashtable* table) {
+  // Copy (relocate) the table to the shared space.
+  BasicHashtable::copy_table(top, end);
+
+  // Calculate the space needed for the package name strings.
+  int i;
+  int n = 0;
+  for (i = 0; i < table_size(); ++i) {
+    for (PackageInfo* pp = table->bucket(i);
+                      pp != NULL;
+                      pp = pp->next()) {
+      n += (int)(strlen(pp->pkgname()) + 1);
+    }
+  }
+  if (*top + n + sizeof(intptr_t) >= end) {
+    warning("\nThe shared miscellaneous data space is not large "
+            "enough to \npreload requested classes.  Use "
+            "-XX:SharedMiscDataSize= to increase \nthe initial "
+            "size of the miscellaneous data space.\n");
+    exit(2);
+  }
+
+  // Copy the table data (the strings) to the shared space.
+  n = align_size_up(n, sizeof(HeapWord));
+  *(intptr_t*)(*top) = n;
+  *top += sizeof(intptr_t);
+
+  for (i = 0; i < table_size(); ++i) {
+    for (PackageInfo* pp = table->bucket(i);
+                      pp != NULL;
+                      pp = pp->next()) {
+      int n1 = (int)(strlen(pp->pkgname()) + 1);
+      pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1));
+      *top += n1;
+    }
+  }
+  *top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord));
+}
+
+
+void ClassLoader::copy_package_info_buckets(char** top, char* end) {
+  _package_hash_table->copy_buckets(top, end);
+}
+
+void ClassLoader::copy_package_info_table(char** top, char* end) {
+  _package_hash_table->copy_table(top, end, _package_hash_table);
+}
+
+
+PackageInfo* ClassLoader::lookup_package(const char *pkgname) {
+  const char *cp = strrchr(pkgname, '/');
+  if (cp != NULL) {
+    // Package prefix found
+    int n = cp - pkgname + 1;
+    return _package_hash_table->get_entry(pkgname, n);
+  }
+  return NULL;
+}
+
+
+bool ClassLoader::add_package(const char *pkgname, int classpath_index, TRAPS) {
+  assert(pkgname != NULL, "just checking");
+  // Bootstrap loader no longer holds system loader lock obj serializing
+  // load_instance_class and thereby add_package
+  {
+    MutexLocker ml(PackageTable_lock, THREAD);
+    // First check for previously loaded entry
+    PackageInfo* pp = lookup_package(pkgname);
+    if (pp != NULL) {
+      // Existing entry found, check source of package
+      pp->set_index(classpath_index);
+      return true;
+    }
+
+    const char *cp = strrchr(pkgname, '/');
+    if (cp != NULL) {
+      // Package prefix found
+      int n = cp - pkgname + 1;
+
+      char* new_pkgname = NEW_C_HEAP_ARRAY(char, n + 1);
+      if (new_pkgname == NULL) {
+        return false;
+      }
+  
+      memcpy(new_pkgname, pkgname, n);
+      new_pkgname[n] = '\0';
+      pp = _package_hash_table->new_entry(new_pkgname, n);
+      pp->set_index(classpath_index);
+      
+      // Insert into hash table
+      _package_hash_table->add_entry(pp);
+    }
+    return true;
+  }
+}
+
+
+oop ClassLoader::get_system_package(const char* name, TRAPS) {
+  PackageInfo* pp;
+  {
+    MutexLocker ml(PackageTable_lock, THREAD);
+    pp = lookup_package(name);
+  }
+  if (pp == NULL) {
+    return NULL;
+  } else {
+    Handle p = java_lang_String::create_from_str(pp->filename(), THREAD);
+    return p();
+  }
+}
+
+
+objArrayOop ClassLoader::get_system_packages(TRAPS) {
+  ResourceMark rm(THREAD);
+  int nof_entries;
+  const char** packages;
+  {
+    MutexLocker ml(PackageTable_lock, THREAD);
+    // Allocate resource char* array containing package names
+    nof_entries = _package_hash_table->number_of_entries();
+    if ((packages = NEW_RESOURCE_ARRAY(const char*, nof_entries)) == NULL) {
+      return NULL;
+    }
+    _package_hash_table->copy_pkgnames(packages);
+  }
+  // Allocate objArray and fill with java.lang.String
+  objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+                                           nof_entries, CHECK_0);
+  objArrayHandle result(THREAD, r);
+  for (int i = 0; i < nof_entries; i++) {
+    Handle str = java_lang_String::create_from_str(packages[i], CHECK_0);
+    result->obj_at_put(i, str());
+  }
+
+  return result();
+}
+
+
+instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) {
+  VTuneClassLoadMarker clm;
+  ResourceMark rm(THREAD);
+  EventMark m("loading class " INTPTR_FORMAT, (address)h_name());
+  ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
+
+  stringStream st;
+  // st.print() uses too much stack space while handling a StackOverflowError
+  // st.print("%s.class", h_name->as_utf8());
+  st.print_raw(h_name->as_utf8());
+  st.print_raw(".class");
+  char* name = st.as_string();
+
+  // Lookup stream for parsing .class file
+  ClassFileStream* stream = NULL;
+  int classpath_index = 0;
+  {
+    PerfTraceTime vmtimer(perf_accumulated_time());
+    ClassPathEntry* e = _first_entry;
+    while (e != NULL) {
+      stream = e->open_stream(name);
+      if (stream != NULL) {
+        break;
+      }
+      e = e->next();
+      ++classpath_index;
+    }
+  }
+
+  instanceKlassHandle h(THREAD, klassOop(NULL));
+  if (stream != NULL) {
+
+    // class file found, parse it
+    ClassFileParser parser(stream);
+    Handle class_loader;
+    Handle protection_domain;
+    symbolHandle parsed_name;
+    instanceKlassHandle result = parser.parseClassFile(h_name, 
+                                                       class_loader, 
+                                                       protection_domain, 
+                                                       parsed_name,
+                                                       CHECK_(h));
+
+    // add to package table
+    if (add_package(name, classpath_index, THREAD)) {
+      h = result;
+    }
+  }
+
+  return h;
+}
+
+
+void ClassLoader::create_package_info_table(HashtableBucket *t, int length,
+                                            int number_of_entries) {
+  assert(_package_hash_table == NULL, "One package info table allowed.");
+  assert(length == package_hash_table_size * sizeof(HashtableBucket),
+         "bad shared package info size.");
+  _package_hash_table = new PackageHashtable(package_hash_table_size, t,
+                                             number_of_entries);
+}
+
+
+void ClassLoader::create_package_info_table() {
+    assert(_package_hash_table == NULL, "shouldn't have one yet");
+    _package_hash_table = new PackageHashtable(package_hash_table_size);
+}
+
+
+// Initialize the class loader's access to methods in libzip.  Parse and
+// process the boot classpath into a list ClassPathEntry objects.  Once
+// this list has been created, it must not change (see class PackageInfo).
+
+void ClassLoader::initialize() {
+  assert(_package_hash_table == NULL, "should have been initialized by now.");
+  EXCEPTION_MARK;
+
+  if (UsePerfData) {
+    // jvmstat performance counters
+    NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time"); 
+    NEWPERFTICKCOUNTER(_perf_class_init_time, SUN_CLS, "classInitTime");
+    NEWPERFTICKCOUNTER(_perf_class_verify_time, SUN_CLS, "classVerifyTime");
+    NEWPERFTICKCOUNTER(_perf_class_link_time, SUN_CLS, "classLinkedTime");
+
+    NEWPERFEVENTCOUNTER(_perf_classes_inited, SUN_CLS, "initializedClasses");
+    NEWPERFEVENTCOUNTER(_perf_classes_linked, SUN_CLS, "linkedClasses");
+
+    // The following performance counters are added for measuring the impact
+    // of the bug fix of 6365597. They are mainly focused on finding out
+    // the behavior of system & user-defined classloader lock, whether 
+    // ClassLoader.loadClass/findClass is being called synchronized or not.
+    // Also two additional counters are created to see whether 'UnsyncloadClass'
+    // flag is being set or not and how many times load_instance_class call
+    // fails with linkageError etc.
+    NEWPERFEVENTCOUNTER(_sync_systemLoaderLockContentionRate, SUN_CLS, 
+			"systemLoaderLockContentionRate");    
+    NEWPERFEVENTCOUNTER(_sync_nonSystemLoaderLockContentionRate, SUN_CLS,
+			"nonSystemLoaderLockContentionRate");
+    NEWPERFEVENTCOUNTER(_sync_JVMFindLoadedClassLockFreeCounter, SUN_CLS,
+			"jvmFindLoadedClassNoLockCalls");
+    NEWPERFEVENTCOUNTER(_sync_JVMDefineClassLockFreeCounter, SUN_CLS,
+			"jvmDefineClassNoLockCalls");
+
+    NEWPERFEVENTCOUNTER(_sync_JNIDefineClassLockFreeCounter, SUN_CLS,
+			"jniDefineClassNoLockCalls");
+    
+    NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS,
+			"unsafeDefineClassCalls");
+    
+    NEWPERFEVENTCOUNTER(_isUnsyncloadClass, SUN_CLS, "isUnsyncloadClassSet");
+    NEWPERFEVENTCOUNTER(_load_instance_class_failCounter, SUN_CLS,
+			"loadInstanceClassFailRate");
+    
+    // increment the isUnsyncloadClass counter if UnsyncloadClass is set.
+    if (UnsyncloadClass) {
+      _isUnsyncloadClass->inc();
+    }
+  }
+
+  // lookup zip library entry points
+  load_zip_library();
+  // initialize search path
+  setup_bootstrap_search_path();
+  if (LazyBootClassLoader) {
+    // set up meta index which makes boot classpath initialization lazier
+    setup_meta_index();
+  }
+}
+
+
+jlong ClassLoader::classloader_time_ms() {
+  return UsePerfData ?
+    Management::ticks_to_ms(_perf_accumulated_time->get_value()) : -1;
+}
+
+jlong ClassLoader::class_init_count() {
+  return UsePerfData ? _perf_classes_inited->get_value() : -1;
+}
+
+jlong ClassLoader::class_init_time_ms() {
+  return UsePerfData ? 
+    Management::ticks_to_ms(_perf_class_init_time->get_value()) : -1;
+}
+
+jlong ClassLoader::class_verify_time_ms() {
+  return UsePerfData ? 
+    Management::ticks_to_ms(_perf_class_verify_time->get_value()) : -1;
+}
+
+jlong ClassLoader::class_link_count() {
+  return UsePerfData ? _perf_classes_linked->get_value() : -1;
+}
+
+jlong ClassLoader::class_link_time_ms() {
+  return UsePerfData ? 
+    Management::ticks_to_ms(_perf_class_link_time->get_value()) : -1;
+}
+
+int ClassLoader::compute_Object_vtable() {
+  // hardwired for JDK1.2 -- would need to duplicate class file parsing
+  // code to determine actual value from file
+  // Would be value '11' if finals were in vtable
+  int JDK_1_2_Object_vtable_size = 5;
+  return JDK_1_2_Object_vtable_size * vtableEntry::size();
+}
+
+
+void classLoader_init() {
+  ClassLoader::initialize();
+}
+
+
+bool ClassLoader::get_canonical_path(char* orig, char* out, int len) {
+  assert(orig != NULL && out != NULL && len > 0, "bad arguments");        
+  if (CanonicalizeEntry != NULL) {
+    JNIEnv* env = JavaThread::current()->jni_environment();
+    if ((CanonicalizeEntry)(env, hpi::native_path(orig), out, len) < 0) {    
+      return false;  
+    }    
+  } else {
+    // On JDK 1.2.2 the Canonicalize does not exist, so just do nothing
+    strncpy(out, orig, len);
+    out[len - 1] = '\0';    
+  }
+  return true;
+}
+
+#ifndef PRODUCT
+
+void ClassLoader::verify() {
+  _package_hash_table->verify();
+}
+
+
+// CompileTheWorld
+//
+// Iterates over all class path entries and forces compilation of all methods
+// in all classes found. Currently, only zip/jar archives are searched.
+// 
+// The classes are loaded by the Java level bootstrap class loader, and the
+// initializer is called. If DelayCompilationDuringStartup is true (default),
+// the interpreter will run the initialization code. Note that forcing 
+// initialization in this way could potentially lead to initialization order
+// problems, in which case we could just force the initialization bit to be set.
+
+
+// We need to iterate over the contents of a zip/jar file, so we replicate the
+// jzcell and jzfile definitions from zip_util.h but rename jzfile to real_jzfile,
+// since jzfile already has a void* definition.
+//
+// Note that this is only used in debug mode.
+//
+// HotSpot integration note:
+// Matches zip_util.h 1.14 99/06/01 from jdk1.3 beta H build
+
+
+// JDK 1.3 version
+typedef struct real_jzentry13 { 	/* Zip file entry */
+    char *name;	  	  	/* entry name */
+    jint time;            	/* modification time */
+    jint size;	  	  	/* size of uncompressed data */
+    jint csize;  	  	/* size of compressed data (zero if uncompressed) */
+    jint crc;		  	/* crc of uncompressed data */
+    char *comment;	  	/* optional zip file comment */
+    jbyte *extra;	  	/* optional extra data */
+    jint pos;	  	  	/* position of LOC header (if negative) or data */
+} real_jzentry13;
+
+typedef struct real_jzfile13 {  /* Zip file */
+    char *name;	  	        /* zip file name */
+    jint refs;		        /* number of active references */
+    jint fd;		        /* open file descriptor */
+    void *lock;		        /* read lock */
+    char *comment; 	        /* zip file comment */
+    char *msg;		        /* zip error message */
+    void *entries;          	/* array of hash cells */
+    jint total;	  	        /* total number of entries */
+    unsigned short *table;      /* Hash chain heads: indexes into entries */
+    jint tablelen;	        /* number of hash eads */
+    real_jzfile13 *next;        /* next zip file in search list */
+    jzentry *cache;             /* we cache the most recently freed jzentry */
+    /* Information on metadata names in META-INF directory */
+    char **metanames;           /* array of meta names (may have null names) */
+    jint metacount;	        /* number of slots in metanames array */
+    /* If there are any per-entry comments, they are in the comments array */
+    char **comments;
+} real_jzfile13;
+
+// JDK 1.2 version
+typedef struct real_jzentry12 {  /* Zip file entry */
+    char *name;                  /* entry name */
+    jint time;                   /* modification time */
+    jint size;                   /* size of uncompressed data */
+    jint csize;                  /* size of compressed data (zero if uncompressed) */
+    jint crc;                    /* crc of uncompressed data */
+    char *comment;               /* optional zip file comment */
+    jbyte *extra;                /* optional extra data */
+    jint pos;                    /* position of LOC header (if negative) or data */
+    struct real_jzentry12 *next; /* next entry in hash table */
+} real_jzentry12;
+
+typedef struct real_jzfile12 {  /* Zip file */
+    char *name;                 /* zip file name */
+    jint refs;                  /* number of active references */
+    jint fd;                    /* open file descriptor */
+    void *lock;                 /* read lock */
+    char *comment;              /* zip file comment */
+    char *msg;                  /* zip error message */
+    real_jzentry12 *entries;    /* array of zip entries */
+    jint total;                 /* total number of entries */
+    real_jzentry12 **table;     /* hash table of entries */
+    jint tablelen;              /* number of buckets */
+    jzfile *next;               /* next zip file in search list */
+} real_jzfile12;
+
+
+void ClassPathDirEntry::compile_the_world(Handle loader, TRAPS) {
+  // For now we only compile all methods in all classes in zip/jar files
+  tty->print_cr("CompileTheWorld : Skipped classes in %s", _dir);
+  tty->cr();
+}
+
+
+bool ClassPathDirEntry::is_rt_jar() {
+  return false;
+}
+
+void ClassPathZipEntry::compile_the_world(Handle loader, TRAPS) {
+  if (JDK_Version::is_jdk12x_version()) {
+    compile_the_world12(loader, THREAD);
+  } else {
+    compile_the_world13(loader, THREAD);
+  }
+  if (HAS_PENDING_EXCEPTION) {
+    if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
+      CLEAR_PENDING_EXCEPTION;
+      tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
+      size_t used = Universe::heap()->permanent_used();
+      size_t capacity = Universe::heap()->permanent_capacity();
+      tty->print_cr("Permanent generation used %dK of %dK", used/K, capacity/K);
+      tty->print_cr("Increase size by setting e.g. -XX:MaxPermSize=%dK\n", capacity*2/K);
+    } else {
+      tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
+    }
+  }
+}
+
+// Version that works for JDK 1.3.x
+void ClassPathZipEntry::compile_the_world13(Handle loader, TRAPS) {
+  real_jzfile13* zip = (real_jzfile13*) _zip;
+  tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
+  tty->cr();
+  // Iterate over all entries in zip file
+  for (int n = 0; ; n++) {
+    real_jzentry13 * ze = (real_jzentry13 *)((*GetNextEntry)(_zip, n));
+    if (ze == NULL) break;
+    ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
+  }
+}
+
+
+// Version that works for JDK 1.2.x
+void ClassPathZipEntry::compile_the_world12(Handle loader, TRAPS) {
+  real_jzfile12* zip = (real_jzfile12*) _zip;
+  tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
+  tty->cr();
+  // Iterate over all entries in zip file
+  for (int n = 0; ; n++) {
+    real_jzentry12 * ze = (real_jzentry12 *)((*GetNextEntry)(_zip, n));
+    if (ze == NULL) break;
+    ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
+  }
+}
+
+bool ClassPathZipEntry::is_rt_jar() {
+  if (JDK_Version::is_jdk12x_version()) {
+    return is_rt_jar12();
+  } else {
+    return is_rt_jar13();
+  }
+}
+
+// JDK 1.3 version
+bool ClassPathZipEntry::is_rt_jar13() {
+  real_jzfile13* zip = (real_jzfile13*) _zip;
+  int len = (int)strlen(zip->name);
+  // Check whether zip name ends in "rt.jar"
+  // This will match other archives named rt.jar as well, but this is
+  // only used for debugging.
+  return (len >= 6) && (strcasecmp(zip->name + len - 6, "rt.jar") == 0);
+}
+
+// JDK 1.2 version
+bool ClassPathZipEntry::is_rt_jar12() {
+  real_jzfile12* zip = (real_jzfile12*) _zip;
+  int len = (int)strlen(zip->name);
+  // Check whether zip name ends in "rt.jar"
+  // This will match other archives named rt.jar as well, but this is
+  // only used for debugging.
+  return (len >= 6) && (strcasecmp(zip->name + len - 6, "rt.jar") == 0);
+}
+
+void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
+  resolve_entry()->compile_the_world(loader, CHECK);
+}
+
+bool LazyClassPathEntry::is_rt_jar() {
+  return resolve_entry()->is_rt_jar();
+}
+
+void ClassLoader::compile_the_world() {
+  EXCEPTION_MARK;
+  HandleMark hm(THREAD);
+  ResourceMark rm(THREAD);
+  // Make sure we don't run with background compilation
+  BackgroundCompilation = false;
+  // Find bootstrap loader
+  Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
+  // Iterate over all bootstrap class path entries
+  ClassPathEntry* e = _first_entry;
+  while (e != NULL) {
+    // We stop at rt.jar, unless it is the first bootstrap path entry
+    if (e->is_rt_jar() && e != _first_entry) break;
+    e->compile_the_world(system_class_loader, CATCH);
+    e = e->next();
+  }
+  tty->print_cr("CompileTheWorld : Done");
+  {
+    // Print statistics as if before normal exit:
+    extern void print_statistics();
+    print_statistics();
+  }
+  vm_exit(0);
+}
+
+int ClassLoader::_compile_the_world_counter = 0;
+
+void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
+  int len = (int)strlen(name);
+  if (len > 6 && strcmp(".class", name + len - 6) == 0) {
+    // We have a .class file
+    char buffer[2048];
+    strncpy(buffer, name, len - 6);
+    buffer[len-6] = 0;
+    // If the file has a period after removing .class, it's not really a
+    // valid class file.  The class loader will check everything else.
+    if (strchr(buffer, '.') == NULL) {
+      _compile_the_world_counter++;
+      if (_compile_the_world_counter >= CompileTheWorldStartAt && _compile_the_world_counter <= CompileTheWorldStopAt) {
+        // Construct name without extension
+        symbolHandle sym = oopFactory::new_symbol_handle(buffer, CHECK);
+        // Use loader to load and initialize class
+        klassOop ik = SystemDictionary::resolve_or_null(sym, loader, Handle(), THREAD);
+        instanceKlassHandle k (THREAD, ik);
+        if (k.not_null() && !HAS_PENDING_EXCEPTION) {
+          k->initialize(THREAD);
+        }
+        bool exception_occurred = HAS_PENDING_EXCEPTION;
+        CLEAR_PENDING_EXCEPTION;
+        if (k.is_null() || (exception_occurred && !CompileTheWorldIgnoreInitErrors)) {
+          // If something went wrong (e.g. ExceptionInInitializerError) we skip this class
+          tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer);
+        } else {
+          tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer);
+          // Preload all classes to get around uncommon traps
+          if (CompileTheWorldPreloadClasses) {
+            constantPoolKlass::preload_and_initialize_all_classes(k->constants(), THREAD);
+            if (HAS_PENDING_EXCEPTION) {
+              // If something went wrong in preloading we just ignore it
+              CLEAR_PENDING_EXCEPTION;
+              tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer);
+            }
+          }
+          // Iterate over all methods in class
+          for (int n = 0; n < k->methods()->length(); n++) {
+            methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
+            if (CompilationPolicy::canBeCompiled(m)) {
+              // Force compilation           
+              CompileBroker::compile_method(m, InvocationEntryBci,
+                                            methodHandle(), 0, "CTW", THREAD);
+              if (HAS_PENDING_EXCEPTION) {
+                CLEAR_PENDING_EXCEPTION;
+                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
+              }
+  	    if (TieredCompilation) {
+  	      // Clobber the first compile and force second tier compilation
+  	      m->clear_code();
+  	      CompileBroker::compile_method(m, InvocationEntryBci,
+                                            methodHandle(), 0, "CTW", THREAD);
+  	      if (HAS_PENDING_EXCEPTION) {
+  		CLEAR_PENDING_EXCEPTION;
+  		tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
+  	      }
+  	    }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+#endif //PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,305 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)classLoader.hpp	1.64 07/05/05 17:06:45 JVM"
+#endif 
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// The VM class loader.
+#include <sys/stat.h>
+
+
+// Meta-index (optional, to be able to skip opening boot classpath jar files)
+class MetaIndex: public CHeapObj {
+ private:
+  char** _meta_package_names;
+  int    _num_meta_package_names;
+ public:
+  MetaIndex(char** meta_package_names, int num_meta_package_names);
+  ~MetaIndex();
+  bool may_contain(const char* class_name);
+};
+
+
+// Class path entry (directory or zip file)
+
+class ClassPathEntry: public CHeapObj {
+ private:
+  ClassPathEntry* _next;
+ public:
+  // Next entry in class path
+  ClassPathEntry* next()              { return _next; }
+  void set_next(ClassPathEntry* next) { _next = next; }
+  virtual bool is_jar_file() = 0;
+  virtual const char* name() = 0;
+  virtual bool is_lazy();
+  // Constructor
+  ClassPathEntry();
+  // Attempt to locate file_name through this class path entry.
+  // Returns a class file parsing stream if successfull.
+  virtual ClassFileStream* open_stream(const char* name) = 0;
+  // Debugging
+  NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
+  NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
+};
+
+
+class ClassPathDirEntry: public ClassPathEntry {
+ private:
+  char* _dir;           // Name of directory
+ public:
+  bool is_jar_file()  { return false;  }
+  const char* name()  { return _dir; }
+  ClassPathDirEntry(char* dir);
+  ClassFileStream* open_stream(const char* name);
+  // Debugging
+  NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
+  NOT_PRODUCT(bool is_rt_jar();)
+};
+
+
+// Type definitions for zip file and zip file entry
+typedef void* jzfile;
+typedef struct {
+  char *name;	  	  	/* entry name */
+  jlong time;            	/* modification time */
+  jlong size;	  	  	/* size of uncompressed data */
+  jlong csize;  	  	/* size of compressed data (zero if uncompressed) */
+  jint crc;		  	/* crc of uncompressed data */
+  char *comment;	  	/* optional zip file comment */
+  jbyte *extra;	  		/* optional extra data */
+  jlong pos;	  	  	/* position of LOC header (if negative) or data */
+} jzentry;
+
+
+class ClassPathZipEntry: public ClassPathEntry {
+ private:
+  jzfile* _zip;        // The zip archive
+  char*   _zip_name;   // Name of zip archive
+ public:
+  bool is_jar_file()  { return true;  }
+  const char* name()  { return _zip_name; }
+  ClassPathZipEntry(jzfile* zip, const char* zip_name);
+  ~ClassPathZipEntry();
+  ClassFileStream* open_stream(const char* name);
+  void contents_do(void f(const char* name, void* context), void* context);
+  // Debugging
+  NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
+  NOT_PRODUCT(void compile_the_world12(Handle loader, TRAPS);) // JDK 1.2 version
+  NOT_PRODUCT(void compile_the_world13(Handle loader, TRAPS);) // JDK 1.3 version
+  NOT_PRODUCT(bool is_rt_jar();)
+  NOT_PRODUCT(bool is_rt_jar12();)
+  NOT_PRODUCT(bool is_rt_jar13();)
+};
+
+
+// For lazier loading of boot class path entries
+class LazyClassPathEntry: public ClassPathEntry {
+ private:
+  char* _path; // dir or file
+  struct stat _st;
+  MetaIndex* _meta_index;
+  volatile ClassPathEntry* _resolved_entry;
+  ClassPathEntry* resolve_entry();
+ public:
+  bool is_jar_file();
+  const char* name()  { return _path; }
+  LazyClassPathEntry(char* path, struct stat st);
+  ClassFileStream* open_stream(const char* name);
+  void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
+  virtual bool is_lazy();
+  // Debugging
+  NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
+  NOT_PRODUCT(bool is_rt_jar();)
+};
+
+class PackageHashtable;
+class PackageInfo;
+class HashtableBucket;
+
+class ClassLoader: AllStatic {
+ public:
+  enum SomeConstants {
+    package_hash_table_size = 31  // Number of buckets
+  };
+ private:
+  friend class LazyClassPathEntry;
+  
+  // Performance counters
+  static PerfCounter* _perf_accumulated_time;
+  static PerfCounter* _perf_classes_inited;
+  static PerfCounter* _perf_class_init_time;
+  static PerfCounter* _perf_class_verify_time;
+  static PerfCounter* _perf_classes_linked;
+  static PerfCounter* _perf_class_link_time;
+  
+  static PerfCounter* _sync_systemLoaderLockContentionRate;
+  static PerfCounter* _sync_nonSystemLoaderLockContentionRate;
+  static PerfCounter* _sync_JVMFindLoadedClassLockFreeCounter;
+  static PerfCounter* _sync_JVMDefineClassLockFreeCounter;
+  static PerfCounter* _sync_JNIDefineClassLockFreeCounter;
+  
+  static PerfCounter* _unsafe_defineClassCallCounter;
+  static PerfCounter* _isUnsyncloadClass;
+  static PerfCounter* _load_instance_class_failCounter;
+
+  // First entry in linked list of ClassPathEntry instances
+  static ClassPathEntry* _first_entry;
+  // Last entry in linked list of ClassPathEntry instances
+  static ClassPathEntry* _last_entry;
+  // Hash table used to keep track of loaded packages
+  static PackageHashtable* _package_hash_table;
+  static const char* _shared_archive;
+
+  // Hash function
+  static unsigned int hash(const char *s, int n);
+  // Returns the package file name corresponding to the specified package 
+  // or class name, or null if not found.
+  static PackageInfo* lookup_package(const char *pkgname);
+  // Adds a new package entry for the specified class or package name and
+  // corresponding directory or jar file name.
+  static bool add_package(const char *pkgname, int classpath_index, TRAPS);
+
+  // Initialization
+  static void setup_meta_index();
+  static void setup_bootstrap_search_path();
+  static void load_zip_library();
+  static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
+  static void update_class_path_entry_list(const char *path);
+
+  // Canonicalizes path names, so strcmp will work properly. This is mainly
+  // to avoid confusing the zip library
+  static bool get_canonical_path(char* orig, char* out, int len);
+ public:
+  // Timing
+  static PerfCounter* perf_accumulated_time()  { return _perf_accumulated_time; }
+  static PerfCounter* perf_classes_inited()    { return _perf_classes_inited; }
+  static PerfCounter* perf_class_init_time()   { return _perf_class_init_time; }
+  static PerfCounter* perf_class_verify_time() { return _perf_class_verify_time; }
+  static PerfCounter* perf_classes_linked()    { return _perf_classes_linked; }
+  static PerfCounter* perf_class_link_time() { return _perf_class_link_time; }
+
+  // Record how often system loader lock object is contended
+  static PerfCounter* sync_systemLoaderLockContentionRate() {
+    return _sync_systemLoaderLockContentionRate;
+  }
+
+  // Record how often non system loader lock object is contended
+  static PerfCounter* sync_nonSystemLoaderLockContentionRate() {
+    return _sync_nonSystemLoaderLockContentionRate;
+  }
+
+  // Record how many calls to JVM_FindLoadedClass w/o holding a lock
+  static PerfCounter* sync_JVMFindLoadedClassLockFreeCounter() {
+    return _sync_JVMFindLoadedClassLockFreeCounter;
+  }
+  
+  // Record how many calls to JVM_DefineClass w/o holding a lock
+  static PerfCounter* sync_JVMDefineClassLockFreeCounter() {
+    return _sync_JVMDefineClassLockFreeCounter;
+  }
+
+  // Record how many calls to jni_DefineClass w/o holding a lock
+  static PerfCounter* sync_JNIDefineClassLockFreeCounter() {
+    return _sync_JNIDefineClassLockFreeCounter;
+  }
+
+  // Record how many calls to Unsafe_DefineClass
+  static PerfCounter* unsafe_defineClassCallCounter() {
+    return _unsafe_defineClassCallCounter;
+  }
+
+  // Record how many times SystemDictionary::load_instance_class call
+  // fails with linkageError when Unsyncloadclass flag is set.
+  static PerfCounter* load_instance_class_failCounter() {
+    return _load_instance_class_failCounter;
+  }
+  
+  // Load individual .class file
+  static instanceKlassHandle load_classfile(symbolHandle h_name, TRAPS);  
+
+  // If the specified package has been loaded by the system, then returns
+  // the name of the directory or ZIP file that the package was loaded from.
+  // Returns null if the package was not loaded.
+  // Note: The specified name can either be the name of a class or package.
+  // If a package name is specified, then it must be "/"-separator and also
+  // end with a trailing "/".
+  static oop get_system_package(const char* name, TRAPS);
+
+  // Returns an array of Java strings representing all of the currently
+  // loaded system packages.
+  // Note: The package names returned are "/"-separated and end with a
+  // trailing "/".
+  static objArrayOop get_system_packages(TRAPS);
+
+  // Initialization
+  static void initialize();
+  static void create_package_info_table();
+  static void create_package_info_table(HashtableBucket *t, int length,
+                                        int number_of_entries);
+  static int compute_Object_vtable();
+
+  static ClassPathEntry* classpath_entry(int n) {
+    ClassPathEntry* e = ClassLoader::_first_entry;
+    while (--n >= 0) {
+      assert(e != NULL, "Not that many classpath entries.");
+      e = e->next();
+    }
+    return e;
+  }
+
+  // Sharing dump and restore
+  static void copy_package_info_buckets(char** top, char* end);
+  static void copy_package_info_table(char** top, char* end);
+
+  // VM monitoring and management support
+  static jlong classloader_time_ms();
+  static jlong class_method_total_size();
+  static jlong class_init_count();
+  static jlong class_init_time_ms();
+  static jlong class_verify_time_ms();
+  static jlong class_link_count();
+  static jlong class_link_time_ms();
+
+  // indicates if class path already contains a entry (exact match by name)
+  static bool contains_entry(ClassPathEntry* entry);
+
+  // adds a class path list
+  static void add_to_list(ClassPathEntry* new_entry);
+
+  // creates a class path zip entry (returns NULL if JAR file cannot be opened)
+  static ClassPathZipEntry* create_class_path_zip_entry(const char *apath);   
+
+  // Debugging
+  static void verify()              PRODUCT_RETURN;
+
+  // Force compilation of all methods in all classes in bootstrap class path (stress test)
+#ifndef PRODUCT
+ private:
+  static int _compile_the_world_counter;
+ public:
+  static void compile_the_world();
+  static void compile_the_world_in(char* name, Handle loader, TRAPS);
+  static int  compile_the_world_counter() { return _compile_the_world_counter; }
+#endif //PRODUCT
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,612 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)dictionary.cpp	1.26 07/05/17 15:50:16 JVM"
+#endif
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_dictionary.cpp.incl"
+
+
+DictionaryEntry*  Dictionary::_current_class_entry = NULL;
+int               Dictionary::_current_class_index =    0;
+
+
+Dictionary::Dictionary(int table_size)
+  : TwoOopHashtable(table_size, sizeof(DictionaryEntry)) {
+  _current_class_index = 0;
+  _current_class_entry = NULL;
+};
+
+
+
+Dictionary::Dictionary(int table_size, HashtableBucket* t,
+                       int number_of_entries)
+  : TwoOopHashtable(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
+  _current_class_index = 0;
+  _current_class_entry = NULL;
+};
+
+
+DictionaryEntry* Dictionary::new_entry(unsigned int hash, klassOop klass,
+                                       oop loader) {
+  DictionaryEntry* entry;
+  entry = (DictionaryEntry*)Hashtable::new_entry(hash, klass);
+  entry->set_loader(loader);
+  entry->set_pd_set(NULL);
+  return entry;
+}
+
+
+DictionaryEntry* Dictionary::new_entry() {
+  DictionaryEntry* entry = (DictionaryEntry*)Hashtable::new_entry(0L, NULL);
+  entry->set_loader(NULL);
+  entry->set_pd_set(NULL);
+  return entry;
+}
+
+
+void Dictionary::free_entry(DictionaryEntry* entry) {
+  // avoid recursion when deleting linked list
+  while (entry->pd_set() != NULL) {
+    ProtectionDomainEntry* to_delete = entry->pd_set();
+    entry->set_pd_set(to_delete->next());
+    delete to_delete;
+  }
+  Hashtable::free_entry(entry);
+}
+
+
+bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
+#ifdef ASSERT
+  if (protection_domain == instanceKlass::cast(klass())->protection_domain()) {
+    // Ensure this doesn't show up in the pd_set (invariant)
+    bool in_pd_set = false;
+    for (ProtectionDomainEntry* current = _pd_set; 
+                                current != NULL; 
+                                current = current->next()) {
+      if (current->protection_domain() == protection_domain) {
+	in_pd_set = true;
+	break;
+      }
+    }
+    if (in_pd_set) {
+      assert(false, "A klass's protection domain should not show up "
+                    "in its sys. dict. PD set");
+    }
+  }
+#endif /* ASSERT */
+
+  if (protection_domain == instanceKlass::cast(klass())->protection_domain()) {
+    // Succeeds trivially
+    return true;
+  }
+
+  for (ProtectionDomainEntry* current = _pd_set; 
+                              current != NULL; 
+                              current = current->next()) {
+    if (current->protection_domain() == protection_domain) return true;
+  }
+  return false;
+}
+
+
+void DictionaryEntry::add_protection_domain(oop protection_domain) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  if (!contains_protection_domain(protection_domain)) {
+    ProtectionDomainEntry* new_head =
+                new ProtectionDomainEntry(protection_domain, _pd_set);
+    // Warning: Preserve store ordering.  The SystemDictionary is read
+    //          without locks.  The new ProtectionDomainEntry must be
+    //          complete before other threads can be allowed to see it
+    //          via a store to _pd_set.
+    OrderAccess::release_store_ptr(&_pd_set, new_head);
+  }
+  if (TraceProtectionDomainVerification && WizardMode) {
+    print();
+  }
+}
+
+
+bool Dictionary::do_unloading(BoolObjectClosure* is_alive) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  bool class_was_unloaded = false;
+  int  index = 0; // Defined here for portability! Do not move
+
+  // Remove unloadable entries and classes from system dictionary
+  // The placeholder array has been handled in always_strong_oops_do.
+  DictionaryEntry* probe = NULL;
+  for (index = 0; index < table_size(); index++) {
+    for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
+      probe = *p;
+      klassOop e = probe->klass();
+      oop class_loader = probe->loader();
+
+      instanceKlass* ik = instanceKlass::cast(e);
+      if (ik->previous_versions() != NULL) {
+        // This klass has previous versions so see what we can cleanup
+        // while it is safe to do so.
+
+        int gc_count = 0;    // leave debugging breadcrumbs
+        int live_count = 0;
+
+        // RC_TRACE macro has an embedded ResourceMark
+        RC_TRACE(0x00000200, ("unload: %s: previous version length=%d",
+          ik->external_name(), ik->previous_versions()->length()));
+
+        for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) {
+          // check the previous versions array for GC'ed weak refs
+          PreviousVersionNode * pv_node = ik->previous_versions()->at(i);
+          jweak cp_ref = pv_node->prev_constant_pool();
+          assert(cp_ref != NULL, "weak cp ref was unexpectedly cleared");
+          if (cp_ref == NULL) {
+            delete pv_node;
+            ik->previous_versions()->remove_at(i);
+            // Since we are traversing the array backwards, we don't have to
+            // do anything special with the index.
+            continue;  // robustness
+          }
+      
+          constantPoolOop pvcp = (constantPoolOop)JNIHandles::resolve(cp_ref);
+          if (pvcp == NULL) {
+            // this entry has been GC'ed so remove it
+            delete pv_node;
+            ik->previous_versions()->remove_at(i);
+            // Since we are traversing the array backwards, we don't have to
+            // do anything special with the index.
+            gc_count++;
+            continue;
+          } else {
+            RC_TRACE(0x00000200, ("unload: previous version @%d is alive", i));
+            if (is_alive->do_object_b(pvcp)) {
+              live_count++;
+            } else {
+              guarantee(false, "sanity check");
+            }
+          }
+      
+          GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
+          if (method_refs != NULL) {
+            RC_TRACE(0x00000200, ("unload: previous methods length=%d",
+              method_refs->length()));
+            for (int j = method_refs->length() - 1; j >= 0; j--) {
+              jweak method_ref = method_refs->at(j);
+              assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
+              if (method_ref == NULL) {
+                method_refs->remove_at(j);
+                // Since we are traversing the array backwards, we don't have to
+                // do anything special with the index.
+                continue;  // robustness
+              }
+            
+              methodOop method = (methodOop)JNIHandles::resolve(method_ref);
+              if (method == NULL) {
+                // this method entry has been GC'ed so remove it
+                JNIHandles::destroy_weak_global(method_ref);
+                method_refs->remove_at(j);
+              } else {
+                // RC_TRACE macro has an embedded ResourceMark
+                RC_TRACE(0x00000200,
+                  ("unload: %s(%s): prev method @%d in version @%d is alive",
+                  method->name()->as_C_string(),
+                  method->signature()->as_C_string(), j, i));
+              }
+            }
+          }
+        }
+        assert(ik->previous_versions()->length() == live_count, "sanity check");
+        RC_TRACE(0x00000200,
+          ("unload: previous version stats: live=%d, GC'ed=%d", live_count,
+          gc_count));
+      }
+
+      // Non-unloadable classes were handled in always_strong_oops_do
+      if (!is_strongly_reachable(class_loader, e)) {
+        // Entry was not visited in phase1 (negated test from phase1)
+        assert(class_loader != NULL, "unloading entry with null class loader");
+        oop k_def_class_loader = ik->class_loader();
+
+        // Do we need to delete this system dictionary entry?
+        bool purge_entry = false;
+
+        // Do we need to delete this system dictionary entry?
+        if (!is_alive->do_object_b(class_loader)) {
+          // If the loader is not live this entry should always be
+          // removed (will never be looked up again). Note that this is
+          // not the same as unloading the referred class.
+          if (k_def_class_loader == class_loader) {
+            // This is the defining entry, so the referred class is about
+            // to be unloaded.
+            // Notify the debugger and clean up the class.
+            guarantee(!is_alive->do_object_b(e),
+                      "klass should not be live if defining loader is not");
+            class_was_unloaded = true;
+            // notify the debugger
+            if (JvmtiExport::should_post_class_unload()) {
+              JvmtiExport::post_class_unload(ik->as_klassOop());
+            }
+
+            // notify ClassLoadingService of class unload
+            ClassLoadingService::notify_class_unloaded(ik);
+
+            // Clean up C heap
+            ik->release_C_heap_structures();
+          }
+          // Also remove this system dictionary entry.
+          purge_entry = true;
+
+        } else {
+          // The loader in this entry is alive. If the klass is dead,
+          // the loader must be an initiating loader (rather than the
+          // defining loader). Remove this entry.
+          if (!is_alive->do_object_b(e)) {
+            guarantee(!is_alive->do_object_b(k_def_class_loader),
+                      "defining loader should not be live if klass is not");
+            // If we get here, the class_loader must not be the defining
+            // loader, it must be an initiating one.
+            assert(k_def_class_loader != class_loader,
+                   "cannot have live defining loader and unreachable klass");
+
+            // Loader is live, but class and its defining loader are dead.
+            // Remove the entry. The class is going away.
+            purge_entry = true;
+          }
+        }
+
+        if (purge_entry) {
+          *p = probe->next();
+          if (probe == _current_class_entry) {
+            _current_class_entry = NULL;
+          }
+          free_entry(probe);
+          continue;
+        }
+      }
+      p = probe->next_addr();
+    }
+  }
+  return class_was_unloaded;
+}
+
+
+void Dictionary::always_strong_classes_do(OopClosure* blk) {
+  // Follow all system classes and temporary placeholders in dictionary
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry *probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      oop e = probe->klass();
+      oop class_loader = probe->loader();            
+      if (is_strongly_reachable(class_loader, e)) {
+        blk->do_oop((oop*)probe->klass_addr());
+        if (class_loader != NULL) {
+          blk->do_oop(probe->loader_addr());
+        }
+        probe->protection_domain_set_oops_do(blk);
+      }
+    }
+  }
+}
+
+
+//   Just the classes from defining class loaders
+void Dictionary::classes_do(void f(klassOop)) {
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      klassOop k = probe->klass();
+      if (probe->loader() == instanceKlass::cast(k)->class_loader()) {
+        f(k);
+      }
+    }
+  }
+}
+
+// Added for initialize_itable_for_klass to handle exceptions
+//   Just the classes from defining class loaders
+void Dictionary::classes_do(void f(klassOop, TRAPS), TRAPS) {
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      klassOop k = probe->klass();
+      if (probe->loader() == instanceKlass::cast(k)->class_loader()) {
+        f(k, CHECK);
+      }
+    }
+  }
+}
+
+
+//   All classes, and their class loaders
+//   (added for helpers that use HandleMarks and ResourceMarks)
+// Don't iterate over placeholders
+void Dictionary::classes_do(void f(klassOop, oop, TRAPS), TRAPS) {
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      klassOop k = probe->klass();
+      f(k, probe->loader(), CHECK);
+    }
+  }
+}
+
+
+//   All classes, and their class loaders
+// Don't iterate over placeholders
+void Dictionary::classes_do(void f(klassOop, oop)) {
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      klassOop k = probe->klass();
+      f(k, probe->loader());
+    }
+  }
+}
+
+
+void Dictionary::oops_do(OopClosure* f) {
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      f->do_oop((oop*)probe->klass_addr());
+      if (probe->loader() != NULL) {
+        f->do_oop(probe->loader_addr());
+      }
+      probe->protection_domain_set_oops_do(f);
+    }
+  }
+}
+
+
+void Dictionary::methods_do(void f(methodOop)) {
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      klassOop k = probe->klass();
+      if (probe->loader() == instanceKlass::cast(k)->class_loader()) {
+        // only take klass is we have the entry with the defining class loader
+        instanceKlass::cast(k)->methods_do(f);
+      }
+    }
+  }
+}
+
+
+klassOop Dictionary::try_get_next_class() {
+  while (true) {
+    if (_current_class_entry != NULL) {
+      klassOop k = _current_class_entry->klass();
+      _current_class_entry = _current_class_entry->next();
+      return k;
+    }
+    _current_class_index = (_current_class_index + 1) % table_size();
+    _current_class_entry = bucket(_current_class_index);
+  }
+  // never reached
+}
+
+
+// Add a loaded class to the system dictionary.
+// Readers of the SystemDictionary aren't always locked, so _buckets
+// is volatile. The store of the next field in the constructor is
+// also cast to volatile;  we do this to ensure store order is maintained
+// by the compilers.
+
+void Dictionary::add_klass(symbolHandle class_name, Handle class_loader,
+                           KlassHandle obj) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  assert(obj() != NULL, "adding NULL obj");
+  assert(Klass::cast(obj())->name() == class_name(), "sanity check on name");
+
+  unsigned int hash = compute_hash(class_name, class_loader);
+  int index = hash_to_index(hash);
+  DictionaryEntry* entry = new_entry(hash, obj(), class_loader());
+  add_entry(index, entry);
+}
+
+
+// This routine does not lock the system dictionary.
+//
+// Since readers don't hold a lock, we must make sure that system
+// dictionary entries are only removed at a safepoint (when only one
+// thread is running), and are added to in a safe way (all links must
+// be updated in an MT-safe manner).
+//
+// Callers should be aware that an entry could be added just after
+// _buckets[index] is read here, so the caller will not see the new entry.
+DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash,
+                                       symbolHandle class_name,
+                                       Handle class_loader) {
+  symbolOop name_ = class_name();
+  oop loader_ = class_loader();
+  debug_only(_lookup_count++);
+  for (DictionaryEntry* entry = bucket(index); 
+                        entry != NULL; 
+                        entry = entry->next()) {
+    if (entry->hash() == hash && entry->equals(name_, loader_)) {
+      return entry;
+    }
+    debug_only(_lookup_length++);
+  }
+  return NULL;
+}
+
+
+klassOop Dictionary::find(int index, unsigned int hash, symbolHandle name,
+                          Handle loader, Handle protection_domain, TRAPS) {
+  DictionaryEntry* entry = get_entry(index, hash, name, loader);
+  if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) {
+    return entry->klass();
+  } else {
+    return NULL;
+  }
+}
+
+
+klassOop Dictionary::find_class(int index, unsigned int hash,
+                                symbolHandle name, Handle loader) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  assert (index == index_for(name, loader), "incorrect index?");
+
+  DictionaryEntry* entry = get_entry(index, hash, name, loader);
+  return (entry != NULL) ? entry->klass() : (klassOop)NULL;
+}
+
+
+// Variant of find_class for shared classes.  No locking required, as
+// that table is static.
+
+klassOop Dictionary::find_shared_class(int index, unsigned int hash,
+                                       symbolHandle name) {
+  assert (index == index_for(name, Handle()), "incorrect index?");
+
+  DictionaryEntry* entry = get_entry(index, hash, name, Handle());
+  return (entry != NULL) ? entry->klass() : (klassOop)NULL;
+}
+
+
+void Dictionary::add_protection_domain(int index, unsigned int hash,
+                                       instanceKlassHandle klass,
+                                       Handle loader, Handle protection_domain,
+                                       TRAPS) {
+  symbolHandle klass_name(THREAD, klass->name());
+  DictionaryEntry* entry = get_entry(index, hash, klass_name, loader);
+
+  assert(entry != NULL,"entry must be present, we just created it");
+  assert(protection_domain() != NULL, 
+         "real protection domain should be present");
+
+  entry->add_protection_domain(protection_domain());
+
+  assert(entry->contains_protection_domain(protection_domain()), 
+         "now protection domain should be present");
+}
+
+
+bool Dictionary::is_valid_protection_domain(int index, unsigned int hash,
+                                            symbolHandle name,
+                                            Handle loader,
+                                            Handle protection_domain) {
+  DictionaryEntry* entry = get_entry(index, hash, name, loader);
+  return entry->is_valid_protection_domain(protection_domain);
+}
+
+
+void Dictionary::reorder_dictionary() {
+
+  // Copy all the dictionary entries into a single master list.
+
+  DictionaryEntry* master_list = NULL;
+  for (int i = 0; i < table_size(); ++i) {
+    DictionaryEntry* p = bucket(i);
+    while (p != NULL) {
+      DictionaryEntry* tmp;
+      tmp = p->next();
+      p->set_next(master_list);
+      master_list = p;
+      p = tmp;
+    }
+    set_entry(i, NULL);
+  }
+
+  // Add the dictionary entries back to the list in the correct buckets.
+  Thread *thread = Thread::current();
+
+  while (master_list != NULL) {
+    DictionaryEntry* p = master_list;
+    master_list = master_list->next();
+    p->set_next(NULL);
+    symbolHandle class_name (thread, instanceKlass::cast((klassOop)(p->klass()))->name());
+    unsigned int hash = compute_hash(class_name, Handle(thread, p->loader()));
+    int index = hash_to_index(hash);
+    p->set_hash(hash);
+    p->set_next(bucket(index));
+    set_entry(index, p);
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+#ifndef PRODUCT
+
+void Dictionary::print() {
+  ResourceMark rm;
+  HandleMark   hm;
+
+  tty->print_cr("Java system dictionary (classes=%d)", number_of_entries());
+  tty->print_cr("^ indicates that initiating loader is different from "
+                "defining loader");
+
+  for (int index = 0; index < table_size(); index++) {    
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      if (Verbose) tty->print("%4d: ", index);
+      klassOop e = probe->klass();
+      oop class_loader =  probe->loader();
+      bool is_defining_class = 
+         (class_loader == instanceKlass::cast(e)->class_loader());
+      tty->print("%s%s", is_defining_class ? " " : "^", 
+                   Klass::cast(e)->external_name());
+      if (class_loader != NULL) {
+        tty->print(", loader ");
+        class_loader->print_value();
+      }
+      tty->cr();
+    }
+  }
+}
+
+#endif
+
+void Dictionary::verify() {
+  guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
+  int element_count = 0;
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry* probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      klassOop e = probe->klass();
+      oop class_loader = probe->loader();
+      guarantee(Klass::cast(e)->oop_is_instance(), 
+                              "Verify of system dictionary failed");
+      // class loader must be present;  a null class loader is the
+      // boostrap loader
+      guarantee(class_loader == NULL || class_loader->is_instance(), 
+                "checking type of class_loader");
+      e->verify();
+      probe->verify_protection_domain_set();
+      element_count++; 
+    }
+  }
+  guarantee(number_of_entries() == element_count,
+            "Verify of system dictionary failed");
+  debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,223 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)dictionary.hpp	1.15 07/05/05 17:05:47 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class DictionaryEntry;
+
+//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+// The data structure for the system dictionary (and the shared system
+// dictionary).
+
+class Dictionary : public TwoOopHashtable {
+  friend class VMStructs;
+private:
+  // current iteration index.
+  static int                    _current_class_index;
+  // pointer to the current hash table entry.
+  static DictionaryEntry*       _current_class_entry;
+
+  DictionaryEntry* get_entry(int index, unsigned int hash,
+                             symbolHandle name, Handle loader);
+
+  DictionaryEntry* bucket(int i) {
+    return (DictionaryEntry*)Hashtable::bucket(i);
+  }
+
+  // The following method is not MT-safe and must be done under lock.
+  DictionaryEntry** bucket_addr(int i) {
+    return (DictionaryEntry**)Hashtable::bucket_addr(i);
+  }
+
+  void add_entry(int index, DictionaryEntry* new_entry) {
+    Hashtable::add_entry(index, (HashtableEntry*)new_entry);
+  }
+
+
+public:
+  Dictionary(int table_size);
+  Dictionary(int table_size, HashtableBucket* t, int number_of_entries);
+  
+  DictionaryEntry* new_entry(unsigned int hash, klassOop klass, oop loader);
+
+  DictionaryEntry* new_entry();
+
+  void free_entry(DictionaryEntry* entry);
+
+  void add_klass(symbolHandle class_name, Handle class_loader,KlassHandle obj);
+
+  klassOop find_class(int index, unsigned int hash,
+                      symbolHandle name, Handle loader);
+
+  klassOop find_shared_class(int index, unsigned int hash, symbolHandle name);
+
+  // Compiler support
+  klassOop try_get_next_class();
+
+  // GC support
+
+  void oops_do(OopClosure* f);
+  void always_strong_classes_do(OopClosure* blk);
+  void classes_do(void f(klassOop));
+  void classes_do(void f(klassOop, TRAPS), TRAPS);
+  void classes_do(void f(klassOop, oop));
+  void classes_do(void f(klassOop, oop, TRAPS), TRAPS);
+
+  void methods_do(void f(methodOop));
+
+
+  // Classes loaded by the bootstrap loader are always strongly reachable.
+  // If we're not doing class unloading, all classes are strongly reachable.
+  static bool is_strongly_reachable(oop class_loader, oop klass) {
+    assert (klass != NULL, "should have non-null klass");
+    return (class_loader == NULL || !ClassUnloading);
+  }
+
+  // Unload (that is, break root links to) all unmarked classes and
+  // loaders.  Returns "true" iff something was unloaded.
+  bool do_unloading(BoolObjectClosure* is_alive);
+
+  // Protection domains
+  klassOop find(int index, unsigned int hash, symbolHandle name,
+                Handle loader, Handle protection_domain, TRAPS);
+  bool is_valid_protection_domain(int index, unsigned int hash,
+                                  symbolHandle name, Handle class_loader,
+                                  Handle protection_domain);
+  void add_protection_domain(int index, unsigned int hash,
+                             instanceKlassHandle klass, Handle loader,
+                             Handle protection_domain, TRAPS);
+
+  // Sharing support
+  void dump(SerializeOopClosure* soc);
+  void restore(SerializeOopClosure* soc);
+  void reorder_dictionary();
+
+  
+#ifndef PRODUCT
+  void print();
+#endif
+  void verify();
+};
+
+// The following classes can be in dictionary.cpp, but we need these
+// to be in header file so that SA's vmStructs can access.
+
+class ProtectionDomainEntry :public CHeapObj {
+  friend class VMStructs;
+ public:
+  ProtectionDomainEntry* _next;
+  oop                    _protection_domain;
+
+  ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) {
+    _protection_domain = protection_domain;
+    _next              = next;
+  }
+
+  ProtectionDomainEntry* next() { return _next; }
+  oop protection_domain() { return _protection_domain; }
+};
+
+// An entry in the system dictionary, this describes a class as
+// { klassOop, loader, protection_domain }.
+
+class DictionaryEntry : public HashtableEntry {
+  friend class VMStructs;
+ private:
+  // Contains the set of approved protection domains that can access
+  // this system dictionary entry.
+  ProtectionDomainEntry* _pd_set;
+  oop                    _loader;
+
+
+ public:
+  // Tells whether a protection is in the approved set.
+  bool contains_protection_domain(oop protection_domain) const;
+  // Adds a protection domain to the approved set.
+  void add_protection_domain(oop protection_domain);
+
+  klassOop klass() const { return (klassOop)literal(); }
+  klassOop* klass_addr() { return (klassOop*)literal_addr(); }
+
+  DictionaryEntry* next() const {
+    return (DictionaryEntry*)HashtableEntry::next();
+  }
+
+  DictionaryEntry** next_addr() {
+    return (DictionaryEntry**)HashtableEntry::next_addr();
+  }
+
+  oop loader() const { return _loader; }
+  void set_loader(oop loader) { _loader = loader; }
+  oop* loader_addr() { return &_loader; }
+
+  ProtectionDomainEntry* pd_set() const { return _pd_set; }
+  void set_pd_set(ProtectionDomainEntry* pd_set) { _pd_set = pd_set; }
+
+  bool has_protection_domain() { return _pd_set != NULL; }
+
+  // Tells whether the initiating class' protection can access the this _klass
+  bool is_valid_protection_domain(Handle protection_domain) {
+    if (!ProtectionDomainVerification) return true;
+    if (!SystemDictionary::has_checkPackageAccess()) return true;
+
+    return protection_domain() == NULL
+         ? true
+         : contains_protection_domain(protection_domain());
+  }
+
+
+  void protection_domain_set_oops_do(OopClosure* f) {
+    for (ProtectionDomainEntry* current = _pd_set;
+                                current != NULL;
+                                current = current->_next) {
+      f->do_oop(&(current->_protection_domain));
+    }
+  }
+
+  void verify_protection_domain_set() {
+    for (ProtectionDomainEntry* current = _pd_set;
+                                current != NULL;
+                                current = current->_next) {
+      current->_protection_domain->verify();
+    }
+  }
+
+  bool equals(symbolOop class_name, oop class_loader) const {
+    klassOop klass = (klassOop)literal();
+    return (instanceKlass::cast(klass)->name() == class_name &&
+            _loader == class_loader);
+  }
+
+  void print() {
+    int count = 0;
+    for (ProtectionDomainEntry* current = _pd_set;
+                                current != NULL;
+                                current = current->_next) {
+      count++;
+    }
+    tty->print_cr("pd set = #%d", count);
+  }
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/javaAssertions.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,210 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)javaAssertions.cpp	1.14 07/05/05 17:06:50 JVM"
+#endif
+/*
+ * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_javaAssertions.cpp.incl"
+
+bool				JavaAssertions::_userDefault = false;
+bool				JavaAssertions::_sysDefault = false;
+JavaAssertions::OptionList*	JavaAssertions::_classes = 0;
+JavaAssertions::OptionList*	JavaAssertions::_packages = 0;
+
+JavaAssertions::OptionList::OptionList(const char* name, bool enabled,
+  OptionList* next) {
+  assert(name != 0, "need a name");
+  _name = name;
+  _enabled = enabled;
+  _next = next;
+}
+
+int JavaAssertions::OptionList::count(OptionList* p) {
+  int rc;
+  for (rc = 0; p != 0; p = p->next(), ++rc) /* empty */;
+  return rc;
+}
+
+void JavaAssertions::addOption(const char* name, bool enable) {
+  assert(name != 0, "must have a name");
+
+  // Copy the name.  The storage needs to exist for the the lifetime of the vm;
+  // it is never freed, so will be leaked (along with other option strings -
+  // e.g., bootclasspath) if a process creates/destroys multiple VMs.
+  int len = (int)strlen(name);
+  char *name_copy = NEW_C_HEAP_ARRAY(char, len + 1);
+  strcpy(name_copy, name);
+
+  // Figure out which list the new item should go on.  Names that end in "..."
+  // go on the package tree list.
+  OptionList** head = &_classes;
+  if (len >= 3 && strcmp(name_copy + len - 3, "...") == 0) {
+    // Delete the "...".
+    len -= 3;
+    name_copy[len] = '\0';
+    head = &_packages;
+  }
+
+  // Convert class/package names to internal format.  Will have to convert back
+  // when copying to java in createJavaAssertionStatusDirectives, but that
+  // should happen only once.  Alternative would require that
+  // JVM_DesiredAssertionStatus pass the external_name() to
+  // JavaAssertion::enabled(), but that is done once per loaded class.
+  for (int i = 0; i < len; ++i) {
+    if (name_copy[i] == '.') name_copy[i] = '/';
+  }
+
+  if (TraceJavaAssertions) {
+    tty->print_cr("JavaAssertions: adding %s %s=%d",
+      head == &_classes ? "class" : "package",
+      name_copy[0] != '\0' ? name_copy : "'default'",
+      enable);
+  }
+
+  // Prepend a new item to the list.  Items added later take precedence, so
+  // prepending allows us to stop searching the list after the first match.
+  *head = new OptionList(name_copy, enable, *head);
+}
+
+oop JavaAssertions::createAssertionStatusDirectives(TRAPS) {
+  symbolHandle asd_sym = vmSymbolHandles::java_lang_AssertionStatusDirectives();
+  klassOop k = SystemDictionary::resolve_or_fail(asd_sym, true, CHECK_NULL);
+  instanceKlassHandle asd_klass (THREAD, k);
+  asd_klass->initialize(CHECK_NULL);
+  Handle h = asd_klass->allocate_instance_handle(CHECK_NULL);
+
+  int len;
+  typeArrayOop t;
+  len = OptionList::count(_packages);
+  objArrayOop pn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+  objArrayHandle pkgNames (THREAD, pn);
+  t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
+  typeArrayHandle pkgEnabled(THREAD, t);
+  fillJavaArrays(_packages, len, pkgNames, pkgEnabled, CHECK_NULL);
+
+  len = OptionList::count(_classes);
+  objArrayOop cn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+  objArrayHandle classNames (THREAD, cn);
+  t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
+  typeArrayHandle classEnabled(THREAD, t);
+  fillJavaArrays(_classes, len, classNames, classEnabled, CHECK_NULL);
+
+  java_lang_AssertionStatusDirectives::set_packages(h(), pkgNames());
+  java_lang_AssertionStatusDirectives::set_packageEnabled(h(), pkgEnabled());
+  java_lang_AssertionStatusDirectives::set_classes(h(), classNames());
+  java_lang_AssertionStatusDirectives::set_classEnabled(h(), classEnabled());
+  java_lang_AssertionStatusDirectives::set_deflt(h(), userClassDefault());
+  return h();
+}
+
+void JavaAssertions::fillJavaArrays(const OptionList* p, int len,
+objArrayHandle names, typeArrayHandle enabled, TRAPS) {
+  // Fill in the parallel names and enabled (boolean) arrays.  Start at the end
+  // of the array and work backwards, so the order of items in the arrays
+  // matches the order on the command line (the list is in reverse order, since
+  // it was created by prepending successive items from the command line).
+  int index;
+  for (index = len - 1; p != 0; p = p->next(), --index) {
+    assert(index >= 0, "length does not match list");
+    Handle s = java_lang_String::create_from_str(p->name(), CHECK);
+    s = java_lang_String::char_converter(s, '/', '.', CHECK);
+    names->obj_at_put(index, s());
+    enabled->bool_at_put(index, p->enabled());
+  }
+  assert(index == -1, "length does not match list");
+}
+
+inline JavaAssertions::OptionList*
+JavaAssertions::match_class(const char* classname) {
+  for (OptionList* p = _classes; p != 0; p = p->next()) {
+    if (strcmp(p->name(), classname) == 0) {
+      return p;
+    }
+  }
+  return 0;
+}
+
+JavaAssertions::OptionList*
+JavaAssertions::match_package(const char* classname) {
+  // Search the package list for any items that apply to classname.  Each
+  // sub-package in classname is checked, from most-specific to least, until one
+  // is found.
+  if (_packages == 0) return 0;
+
+  // Find the length of the "most-specific" package in classname.  If classname
+  // does not include a package, length will be 0 which will match items for the
+  // default package (from options "-ea:..."  or "-da:...").
+  size_t len = strlen(classname);
+  for (/* empty */; len > 0 && classname[len] != '/'; --len) /* empty */;
+
+  do {
+    assert(len == 0 || classname[len] == '/', "not a package name");
+    for (OptionList* p = _packages; p != 0; p = p->next()) {
+      if (strncmp(p->name(), classname, len) == 0 && p->name()[len] == '\0') {
+	return p;
+      }
+    }
+
+    // Find the length of the next package, taking care to avoid decrementing
+    // past 0 (len is unsigned).
+    while (len > 0 && classname[--len] != '/') /* empty */;
+  } while (len > 0);
+
+  return 0;
+}
+
+inline void JavaAssertions::trace(const char* name,
+const char* typefound, const char* namefound, bool enabled) {
+  if (TraceJavaAssertions) {
+    tty->print_cr("JavaAssertions:  search for %s found %s %s=%d",
+      name, typefound, namefound[0] != '\0' ? namefound : "'default'", enabled);
+  }
+}
+
+bool JavaAssertions::enabled(const char* classname, bool systemClass) {
+  assert(classname != 0, "must have a classname");
+
+  // This will be slow if the number of assertion options on the command line is
+  // large--it traverses two lists, one of them multiple times.  Could use a
+  // single n-ary tree instead of lists if someone ever notices.
+
+  // First check options that apply to classes.  If we find a match we're done.
+  OptionList* p;
+  if (p = match_class(classname)) {
+    trace(classname, "class", p->name(), p->enabled());
+    return p->enabled();
+  }
+
+  // Now check packages, from most specific to least.
+  if (p = match_package(classname)) {
+    trace(classname, "package", p->name(), p->enabled());
+    return p->enabled();
+  }
+
+  // No match.  Return the default status.
+  bool result = systemClass ? systemClassDefault() : userClassDefault();
+  trace(classname, systemClass ? "system" : "user", "default", result);
+  return result;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/javaAssertions.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,100 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)javaAssertions.hpp	1.11 07/05/05 17:06:50 JVM"
+#endif
+/*
+ * Copyright 2000 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class JavaAssertions: AllStatic {
+public:
+  static inline bool userClassDefault();
+  static inline void setUserClassDefault(bool enabled);
+  static inline bool systemClassDefault();
+  static inline void setSystemClassDefault(bool enabled);
+
+  // Add a command-line option.  A name ending in "..." applies to a package and
+  // any subpackages; other names apply to a single class.
+  static void addOption(const char* name, bool enable);
+
+  // Return true if command-line options have enabled assertions for the named
+  // class.  Should be called only after all command-line options have been
+  // processed.  Note:  this only consults command-line options and does not
+  // account for any dynamic changes to assertion status.
+  static bool enabled(const char* classname, bool systemClass);
+
+  // Create an instance of java.lang.AssertionStatusDirectives and fill in the
+  // fields based on the command-line assertion options.
+  static oop createAssertionStatusDirectives(TRAPS);
+
+private:
+  class OptionList;
+  static void fillJavaArrays(const OptionList* p, int len, objArrayHandle names,
+    typeArrayHandle status, TRAPS);
+
+  static inline void trace(const char* name, const char* typefound,
+    const char* namefound, bool enabled);
+
+  static inline OptionList*	match_class(const char* classname);
+  static OptionList*		match_package(const char* classname);
+
+  static bool		_userDefault;	// User class default (-ea/-da).
+  static bool		_sysDefault;	// System class default (-esa/-dsa).
+  static OptionList*	_classes;	// Options for classes.
+  static OptionList*	_packages;	// Options for package trees.
+};
+
+class JavaAssertions::OptionList: public CHeapObj {
+public:
+  inline OptionList(const char* name, bool enable, OptionList* next);
+
+  inline const char*	name() const	{ return _name; }
+  inline bool		enabled() const	{ return _enabled; }
+  inline OptionList*	next() const	{ return _next; }
+
+  static int count(OptionList* p);
+
+private:
+  const char*	_name;
+  OptionList*	_next;
+  bool		_enabled;
+};
+
+inline bool JavaAssertions::userClassDefault() {
+  return _userDefault;
+}
+
+inline void JavaAssertions::setUserClassDefault(bool enabled) {
+  if (TraceJavaAssertions)
+    tty->print_cr("JavaAssertions::setUserClassDefault(%d)", enabled);
+  _userDefault = enabled;
+}
+
+inline bool JavaAssertions::systemClassDefault() {
+  return _sysDefault;
+}
+
+inline void JavaAssertions::setSystemClassDefault(bool enabled) {
+  if (TraceJavaAssertions)
+    tty->print_cr("JavaAssertions::setSystemClassDefault(%d)", enabled);
+  _sysDefault = enabled;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,2518 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)javaClasses.cpp	1.247 07/05/17 15:50:20 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_javaClasses.cpp.incl"
+
+// Helpful macro for computing field offsets at run time rather than hardcoding them
+#define COMPUTE_OFFSET(klass_name_as_C_str, dest_offset, klass_oop, name_symbol, signature_symbol) \
+{                                                                                                  \
+  fieldDescriptor fd;                                                                              \
+  instanceKlass* ik = instanceKlass::cast(klass_oop);                                              \
+  if (!ik->find_local_field(name_symbol, signature_symbol, &fd)) {                                 \
+    fatal("Invalid layout of " klass_name_as_C_str);                                               \
+  }                                                                                                \
+  dest_offset = fd.offset();                                                                       \
+}
+
+// Same as above but for "optional" offsets that might not be present in certain JDK versions
+#define COMPUTE_OPTIONAL_OFFSET(klass_name_as_C_str, dest_offset, klass_oop, name_symbol, signature_symbol) \
+{                                                                                                  \
+  fieldDescriptor fd;                                                                              \
+  instanceKlass* ik = instanceKlass::cast(klass_oop);                                              \
+  if (ik->find_local_field(name_symbol, signature_symbol, &fd)) {                                  \
+    dest_offset = fd.offset();                                                                     \
+  }                                                                                                \
+}
+
+Handle java_lang_String::basic_create(int length, bool tenured, TRAPS) {
+  // Create the String object first, so there's a chance that the String
+  // and the char array it points to end up in the same cache line.
+  oop obj;
+  if (tenured) {
+    obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_permanent_instance(CHECK_NH);
+  } else {
+    obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_instance(CHECK_NH);
+  }
+
+  // Create the char array.  The String object must be handlized here
+  // because GC can happen as a result of the allocation attempt.
+  Handle h_obj(THREAD, obj);
+  typeArrayOop buffer;
+  if (tenured) {
+    buffer = oopFactory::new_permanent_charArray(length, CHECK_NH);
+  } else {
+    buffer = oopFactory::new_charArray(length, CHECK_NH);
+  }
+
+  // Point the String at the char array
+  obj = h_obj();
+  set_value(obj, buffer);
+  // No need to zero the offset, allocation zero'ed the entire String object
+  assert(offset(obj) == 0, "initial String offset should be zero");
+//set_offset(obj, 0);
+  set_count(obj, length);
+
+  return h_obj;
+}
+
+Handle java_lang_String::basic_create_from_unicode(jchar* unicode, int length, bool tenured, TRAPS) {
+  Handle h_obj = basic_create(length, tenured, CHECK_NH);
+  typeArrayOop buffer = value(h_obj());
+  for (int index = 0; index < length; index++) {
+    buffer->char_at_put(index, unicode[index]);
+  }
+  return h_obj;
+}
+
+Handle java_lang_String::create_from_unicode(jchar* unicode, int length, TRAPS) {
+  return basic_create_from_unicode(unicode, length, false, CHECK_NH);
+}
+
+Handle java_lang_String::create_tenured_from_unicode(jchar* unicode, int length, TRAPS) {
+  return basic_create_from_unicode(unicode, length, true, CHECK_NH);
+}
+
+oop java_lang_String::create_oop_from_unicode(jchar* unicode, int length, TRAPS) {
+  Handle h_obj = basic_create_from_unicode(unicode, length, false, CHECK_0);
+  return h_obj();
+}
+
+Handle java_lang_String::create_from_str(const char* utf8_str, TRAPS) {
+  if (utf8_str == NULL) {
+    return Handle();
+  }
+  int length = UTF8::unicode_length(utf8_str);
+  Handle h_obj = basic_create(length, false, CHECK_NH);
+  if (length > 0) {
+    UTF8::convert_to_unicode(utf8_str, value(h_obj())->char_at_addr(0), length);
+  }
+  return h_obj;
+}
+
+oop java_lang_String::create_oop_from_str(const char* utf8_str, TRAPS) {
+  Handle h_obj = create_from_str(utf8_str, CHECK_0);
+  return h_obj();
+}
+
+Handle java_lang_String::create_from_symbol(symbolHandle symbol, TRAPS) {
+  int length = UTF8::unicode_length((char*)symbol->bytes(), symbol->utf8_length());
+  Handle h_obj = basic_create(length, false, CHECK_NH);
+  if (length > 0) {
+    UTF8::convert_to_unicode((char*)symbol->bytes(), value(h_obj())->char_at_addr(0), length);
+  }
+  return h_obj;
+}
+
+// Converts a C string to a Java String based on current encoding
+Handle java_lang_String::create_from_platform_dependent_str(const char* str, TRAPS) {
+  assert(str != NULL, "bad arguments");
+
+  typedef jstring (*to_java_string_fn_t)(JNIEnv*, const char *);
+  static to_java_string_fn_t _to_java_string_fn = NULL;
+
+  if (_to_java_string_fn == NULL) {
+    void *lib_handle = os::native_java_library();
+    _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, hpi::dll_lookup(lib_handle, "NewStringPlatform"));
+    if (_to_java_string_fn == NULL) {
+      fatal("NewStringPlatform missing");
+    }
+  }
+
+  jstring js = NULL;
+  { JavaThread* thread = (JavaThread*)THREAD;
+    assert(thread->is_Java_thread(), "must be java thread");
+    ThreadToNativeFromVM ttn(thread);
+    HandleMark hm(thread);    
+    js = (_to_java_string_fn)(thread->jni_environment(), str);
+  }
+  return Handle(THREAD, JNIHandles::resolve(js));
+}
+
+Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS) {
+  oop          obj    = java_string();
+  // Typical usage is to convert all '/' to '.' in string.
+  typeArrayOop value  = java_lang_String::value(obj);
+  int          offset = java_lang_String::offset(obj);
+  int          length = java_lang_String::length(obj);
+
+  // First check if any from_char exist
+  int index; // Declared outside, used later
+  for (index = 0; index < length; index++) {
+    if (value->char_at(index + offset) == from_char) {
+      break;
+    }
+  }
+  if (index == length) {
+    // No from_char, so do not copy.
+    return java_string;
+  }
+
+  // Create new UNICODE buffer. Must handlize value because GC
+  // may happen during String and char array creation.
+  typeArrayHandle h_value(THREAD, value);
+  Handle string = basic_create(length, false, CHECK_NH);
+
+  typeArrayOop from_buffer = h_value();
+  typeArrayOop to_buffer   = java_lang_String::value(string());
+
+  // Copy contents
+  for (index = 0; index < length; index++) {
+    jchar c = from_buffer->char_at(index + offset);
+    if (c == from_char) {
+      c = to_char;
+    }
+    to_buffer->char_at_put(index, c);
+  }  
+  return string;
+}
+
+jchar* java_lang_String::as_unicode_string(oop java_string, int& length) {
+  typeArrayOop value  = java_lang_String::value(java_string);
+  int          offset = java_lang_String::offset(java_string);
+               length = java_lang_String::length(java_string);
+
+  jchar* result = NEW_RESOURCE_ARRAY(jchar, length);
+  for (int index = 0; index < length; index++) {
+    result[index] = value->char_at(index + offset);
+  }
+  return result;
+}
+
+symbolHandle java_lang_String::as_symbol(Handle java_string, TRAPS) {
+  oop          obj    = java_string();
+  typeArrayOop value  = java_lang_String::value(obj);
+  int          offset = java_lang_String::offset(obj);
+  int          length = java_lang_String::length(obj);
+
+  ResourceMark rm(THREAD);
+  symbolHandle result;
+
+  if (length > 0) {
+    int utf8_length = UNICODE::utf8_length(value->char_at_addr(offset), length);
+    char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
+    UNICODE::convert_to_utf8(value->char_at_addr(offset), length, chars);
+    // Allocate the symbol
+    result = oopFactory::new_symbol_handle(chars, utf8_length, CHECK_(symbolHandle()));  
+  } else {
+    result = oopFactory::new_symbol_handle("", 0, CHECK_(symbolHandle()));  
+  }
+  return result;
+}
+
+int java_lang_String::utf8_length(oop java_string) {
+  typeArrayOop value  = java_lang_String::value(java_string);
+  int          offset = java_lang_String::offset(java_string);
+  int          length = java_lang_String::length(java_string);
+  jchar* position = (length == 0) ? NULL : value->char_at_addr(offset);
+  return UNICODE::utf8_length(position, length);
+}
+
+char* java_lang_String::as_utf8_string(oop java_string) {
+  typeArrayOop value  = java_lang_String::value(java_string);
+  int          offset = java_lang_String::offset(java_string);
+  int          length = java_lang_String::length(java_string);
+  jchar* position = (length == 0) ? NULL : value->char_at_addr(offset);
+  return UNICODE::as_utf8(position, length);
+}
+
+char* java_lang_String::as_utf8_string(oop java_string, int start, int len) {
+  typeArrayOop value  = java_lang_String::value(java_string);
+  int          offset = java_lang_String::offset(java_string);
+  int          length = java_lang_String::length(java_string);
+  assert(start + len <= length, "just checking");
+  jchar* position = value->char_at_addr(offset + start);
+  return UNICODE::as_utf8(position, len);
+}
+
+bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
+  assert(SharedSkipVerify ||
+         java_string->klass() == SystemDictionary::string_klass(),
+         "must be java_string");
+  typeArrayOop value  = java_lang_String::value(java_string);
+  int          offset = java_lang_String::offset(java_string);
+  int          length = java_lang_String::length(java_string);
+  if (length != len) {
+    return false;
+  }
+  for (int i = 0; i < len; i++) {
+    if (value->char_at(i + offset) != chars[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+
+void java_lang_String::print(Handle java_string, outputStream* st) {
+  oop          obj    = java_string();
+  assert(obj->klass() == SystemDictionary::string_klass(), "must be java_string");
+  typeArrayOop value  = java_lang_String::value(obj);
+  int          offset = java_lang_String::offset(obj);
+  int          length = java_lang_String::length(obj);
+
+  int end = MIN2(length, 100); 
+  if (value == NULL) {
+    // This can happen if, e.g., printing a String
+    // object before its initializer has been called
+    st->print_cr("NULL");
+  } else {
+    st->print("\"");
+    for (int index = 0; index < length; index++) {
+      st->print("%c", value->char_at(index + offset));
+    }
+    st->print("\"");
+  }
+}
+
+
+oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
+  assert(k->java_mirror() == NULL, "should only assign mirror once");
+  // Use this moment of initialization to cache modifier_flags also,
+  // to support Class.getModifiers().  Instance classes recalculate
+  // the cached flags after the class file is parsed, but before the
+  // class is put into the system dictionary.
+  int computed_modifiers = k->compute_modifier_flags(CHECK_0);
+  k->set_modifier_flags(computed_modifiers);
+  if (SystemDictionary::class_klass_loaded()) {
+    // Allocate mirror (java.lang.Class instance)
+    Handle mirror = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+    // Setup indirections
+    mirror->obj_field_put(klass_offset,  k());
+    k->set_java_mirror(mirror());
+    // It might also have a component mirror.  This mirror must already exist.
+    if (k->oop_is_javaArray()) {
+      Handle comp_mirror;
+      if (k->oop_is_typeArray()) {
+        BasicType type = typeArrayKlass::cast(k->as_klassOop())->element_type();
+        comp_mirror = SystemDictionary::java_mirror(type);
+        assert(comp_mirror.not_null(), "must have primitive mirror");
+      } else if (k->oop_is_objArray()) {
+        klassOop element_klass = objArrayKlass::cast(k->as_klassOop())->element_klass();
+        if (element_klass != NULL
+            && (Klass::cast(element_klass)->oop_is_instance() ||
+                Klass::cast(element_klass)->oop_is_javaArray())) {
+          comp_mirror = Klass::cast(element_klass)->java_mirror();
+          assert(comp_mirror.not_null(), "must have element mirror");
+        }
+        // else some object array internal to the VM, like systemObjArrayKlassObj
+      }
+      if (comp_mirror.not_null()) {
+        // Two-way link between the array klass and its component mirror:
+        arrayKlass::cast(k->as_klassOop())->set_component_mirror(comp_mirror());
+        set_array_klass(comp_mirror(), k->as_klassOop());
+      }
+    }
+    return mirror();
+  } else {
+    return NULL;
+  }
+}
+
+
+oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
+  // This should be improved by adding a field at the Java level or by
+  // introducing a new VM klass (see comment in ClassFileParser)
+  oop java_class = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+  if (type != T_VOID) {
+    klassOop aklass = Universe::typeArrayKlassObj(type);
+    assert(aklass != NULL, "correct bootstrap");
+    set_array_klass(java_class, aklass);
+  }
+  return java_class;
+}
+
+
+klassOop java_lang_Class::as_klassOop(oop java_class) {
+  //%note memory_2
+  klassOop k = klassOop(java_class->obj_field(klass_offset));
+  assert(k == NULL || k->is_klass(), "type check");
+  return k;
+}
+
+
+klassOop java_lang_Class::array_klass(oop java_class) {
+  klassOop k = klassOop(java_class->obj_field(array_klass_offset));
+  assert(k == NULL || k->is_klass() && Klass::cast(k)->oop_is_javaArray(), "should be array klass");
+  return k;
+}
+
+
+void java_lang_Class::set_array_klass(oop java_class, klassOop klass) {
+  assert(klass->is_klass() && Klass::cast(klass)->oop_is_javaArray(), "should be array klass");
+  java_class->obj_field_put(array_klass_offset, klass);
+}
+
+
+methodOop java_lang_Class::resolved_constructor(oop java_class) {
+  oop constructor = java_class->obj_field(resolved_constructor_offset);
+  assert(constructor == NULL || constructor->is_method(), "should be method");
+  return methodOop(constructor);
+}
+
+
+void java_lang_Class::set_resolved_constructor(oop java_class, methodOop constructor) {
+  assert(constructor->is_method(), "should be method");
+  java_class->obj_field_put(resolved_constructor_offset, constructor);
+}
+
+
+bool java_lang_Class::is_primitive(oop java_class) {
+  klassOop k = klassOop(java_class->obj_field(klass_offset)); 
+  return k == NULL;
+}
+
+
+BasicType java_lang_Class::primitive_type(oop java_class) {
+  assert(java_lang_Class::is_primitive(java_class), "just checking");
+  klassOop ak = klassOop(java_class->obj_field(array_klass_offset));
+  BasicType type = T_VOID;
+  if (ak != NULL) {
+    // Note: create_basic_type_mirror above initializes ak to a non-null value.
+    type = arrayKlass::cast(ak)->element_type();
+  } else {
+    assert(java_class == SystemDictionary::void_mirror(), "only valid non-array primitive");
+  }
+  assert(SystemDictionary::java_mirror(type) == java_class, "must be consistent");
+  return type;
+}
+
+
+oop java_lang_Class::primitive_mirror(BasicType t) {
+  oop mirror = SystemDictionary::java_mirror(t);
+  assert(mirror != NULL && mirror->is_a(SystemDictionary::class_klass()), "must be a Class");
+  assert(java_lang_Class::is_primitive(mirror), "must be primitive");
+  return mirror;
+}
+
+bool java_lang_Class::offsets_computed = false;
+int  java_lang_Class::classRedefinedCount_offset = -1;
+
+void java_lang_Class::compute_offsets() {
+  assert(!offsets_computed, "offsets should be initialized only once");
+  offsets_computed = true;
+
+  klassOop k = SystemDictionary::class_klass();
+  // The classRedefinedCount field is only present starting in 1.5,
+  // so don't go fatal. 
+  COMPUTE_OPTIONAL_OFFSET("java.lang.Class", classRedefinedCount_offset,
+    k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
+}
+
+int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
+  if (!JDK_Version::is_gte_jdk15x_version()
+      || classRedefinedCount_offset == -1) {
+    // The classRedefinedCount field is only present starting in 1.5.
+    // If we don't have an offset for it then just return -1 as a marker.
+    return -1;
+  }
+
+  return the_class_mirror->int_field(classRedefinedCount_offset);
+}
+
+void java_lang_Class::set_classRedefinedCount(oop the_class_mirror, int value) {
+  if (!JDK_Version::is_gte_jdk15x_version()
+      || classRedefinedCount_offset == -1) {
+    // The classRedefinedCount field is only present starting in 1.5.
+    // If we don't have an offset for it then nothing to set.
+    return;
+  }
+
+  the_class_mirror->int_field_put(classRedefinedCount_offset, value);
+}
+
+
+// Note: JDK1.1 and before had a privateInfo_offset field which was used for the
+//       platform thread structure, and a eetop offset which was used for thread
+//       local storage (and unused by the HotSpot VM). In JDK1.2 the two structures 
+//       merged, so in the HotSpot VM we just use the eetop field for the thread 
+//       instead of the privateInfo_offset.
+//
+// Note: The stackSize field is only present starting in 1.4.
+
+int java_lang_Thread::_name_offset = 0;
+int java_lang_Thread::_group_offset = 0;
+int java_lang_Thread::_contextClassLoader_offset = 0;
+int java_lang_Thread::_inheritedAccessControlContext_offset = 0;
+int java_lang_Thread::_priority_offset = 0;
+int java_lang_Thread::_eetop_offset = 0;
+int java_lang_Thread::_daemon_offset = 0;
+int java_lang_Thread::_stillborn_offset = 0;
+int java_lang_Thread::_stackSize_offset = 0;
+int java_lang_Thread::_tid_offset = 0;
+int java_lang_Thread::_thread_status_offset = 0;
+int java_lang_Thread::_park_blocker_offset = 0;
+int java_lang_Thread::_park_event_offset = 0 ; 
+
+
+void java_lang_Thread::compute_offsets() {
+  assert(_group_offset == 0, "offsets should be initialized only once");
+
+  klassOop k = SystemDictionary::thread_klass();
+  COMPUTE_OFFSET("java.lang.Thread", _name_offset,      k, vmSymbols::name_name(),      vmSymbols::char_array_signature());
+  COMPUTE_OFFSET("java.lang.Thread", _group_offset,     k, vmSymbols::group_name(),     vmSymbols::threadgroup_signature());
+  COMPUTE_OFFSET("java.lang.Thread", _contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature());
+  COMPUTE_OFFSET("java.lang.Thread", _inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), vmSymbols::accesscontrolcontext_signature());
+  COMPUTE_OFFSET("java.lang.Thread", _priority_offset,  k, vmSymbols::priority_name(),  vmSymbols::int_signature());
+  COMPUTE_OFFSET("java.lang.Thread", _daemon_offset,    k, vmSymbols::daemon_name(),    vmSymbols::bool_signature());
+  COMPUTE_OFFSET("java.lang.Thread", _eetop_offset,     k, vmSymbols::eetop_name(),     vmSymbols::long_signature());
+  COMPUTE_OFFSET("java.lang.Thread", _stillborn_offset, k, vmSymbols::stillborn_name(), vmSymbols::bool_signature());
+  // The stackSize field is only present starting in 1.4, so don't go fatal. 
+  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _stackSize_offset, k, vmSymbols::stackSize_name(), vmSymbols::long_signature());
+  // The tid and thread_status fields are only present starting in 1.5, so don't go fatal. 
+  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _tid_offset, k, vmSymbols::thread_id_name(), vmSymbols::long_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _thread_status_offset, k, vmSymbols::thread_status_name(), vmSymbols::int_signature());
+  // The parkBlocker field is only present starting in 1.6, so don't go fatal. 
+  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_blocker_offset, k, vmSymbols::park_blocker_name(), vmSymbols::object_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_event_offset, k, vmSymbols::park_event_name(),
+ vmSymbols::long_signature());
+}
+
+
+JavaThread* java_lang_Thread::thread(oop java_thread) {
+  return (JavaThread*) java_thread->obj_field(_eetop_offset);
+}
+
+
+void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
+  // We are storing a JavaThread* (malloc'ed data) into a long field in the thread 
+  // object. The store has to be 64-bit wide so we use a pointer store, but we 
+  // cannot call oopDesc::obj_field_put since it includes a write barrier!
+  oop* addr = java_thread->obj_field_addr(_eetop_offset);
+  *addr = (oop) thread;
+}
+
+
+typeArrayOop java_lang_Thread::name(oop java_thread) {
+  oop name = java_thread->obj_field(_name_offset);  
+  assert(name == NULL || (name->is_typeArray() && typeArrayKlass::cast(name->klass())->element_type() == T_CHAR), "just checking");
+  return typeArrayOop(name);
+}
+
+
+void java_lang_Thread::set_name(oop java_thread, typeArrayOop name) {
+  assert(java_thread->obj_field(_name_offset) == NULL, "name should be NULL");
+  java_thread->obj_field_put(_name_offset, name);
+}
+
+
+ThreadPriority java_lang_Thread::priority(oop java_thread) {
+  return (ThreadPriority)java_thread->int_field(_priority_offset);
+}
+
+
+void java_lang_Thread::set_priority(oop java_thread, ThreadPriority priority) {
+  java_thread->int_field_put(_priority_offset, priority);
+}
+
+
+oop java_lang_Thread::threadGroup(oop java_thread) {
+  return java_thread->obj_field(_group_offset);
+}
+
+
+bool java_lang_Thread::is_stillborn(oop java_thread) {
+  return java_thread->bool_field(_stillborn_offset) != 0;
+}
+
+
+// We never have reason to turn the stillborn bit off
+void java_lang_Thread::set_stillborn(oop java_thread) {
+  java_thread->bool_field_put(_stillborn_offset, true);
+}
+
+
+bool java_lang_Thread::is_alive(oop java_thread) {
+  JavaThread* thr = java_lang_Thread::thread(java_thread);
+  return (thr != NULL);
+}
+
+
+bool java_lang_Thread::is_daemon(oop java_thread) {
+  return java_thread->bool_field(_daemon_offset) != 0;
+}
+
+
+void java_lang_Thread::set_daemon(oop java_thread) {
+  java_thread->bool_field_put(_daemon_offset, true);
+}
+
+oop java_lang_Thread::context_class_loader(oop java_thread) {
+  return java_thread->obj_field(_contextClassLoader_offset);
+}
+
+oop java_lang_Thread::inherited_access_control_context(oop java_thread) {
+  return java_thread->obj_field(_inheritedAccessControlContext_offset);
+}
+
+
+jlong java_lang_Thread::stackSize(oop java_thread) {
+  // The stackSize field is only present starting in 1.4
+  if (_stackSize_offset > 0) {
+    assert(JDK_Version::is_gte_jdk14x_version(), "sanity check");
+    return java_thread->long_field(_stackSize_offset);
+  } else {
+    return 0;
+  }
+}
+
+// Write the thread status value to threadStatus field in java.lang.Thread java class.
+void java_lang_Thread::set_thread_status(oop java_thread,
+                                         java_lang_Thread::ThreadStatus status) {
+  assert(JavaThread::current()->thread_state() == _thread_in_vm, "Java Thread is not running in vm");
+  // The threadStatus is only present starting in 1.5
+  if (_thread_status_offset > 0) {
+    java_thread->int_field_put(_thread_status_offset, status);
+  }
+}
+
+// Read thread status value from threadStatus field in java.lang.Thread java class.
+java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) {
+  assert(Thread::current()->is_VM_thread() ||
+         JavaThread::current()->thread_state() == _thread_in_vm,
+         "Java Thread is not running in vm");
+  // The threadStatus is only present starting in 1.5
+  if (_thread_status_offset > 0) {
+    return (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
+  } else {
+    // All we can easily figure out is if it is alive, but that is
+    // enough info for a valid unknown status.
+    // These aren't restricted to valid set ThreadStatus values, so
+    // use JVMTI values and cast.
+    JavaThread* thr = java_lang_Thread::thread(java_thread);
+    if (thr == NULL) {
+      // the thread hasn't run yet or is in the process of exiting
+      return NEW;
+    } 
+    return (java_lang_Thread::ThreadStatus)JVMTI_THREAD_STATE_ALIVE;
+  }
+}
+
+
+jlong java_lang_Thread::thread_id(oop java_thread) {
+  // The thread ID field is only present starting in 1.5
+  if (_tid_offset > 0) {
+    return java_thread->long_field(_tid_offset);
+  } else {
+    return 0;
+  }
+}
+
+oop java_lang_Thread::park_blocker(oop java_thread) {
+  assert(JDK_Version::supports_thread_park_blocker() && _park_blocker_offset != 0, 
+         "Must support parkBlocker field");
+
+  if (_park_blocker_offset > 0) {
+    return java_thread->obj_field(_park_blocker_offset);
+  }
+
+  return NULL;
+}
+
+jlong java_lang_Thread::park_event(oop java_thread) {
+  if (_park_event_offset > 0) {
+    return java_thread->long_field(_park_event_offset);
+  }
+  return 0;
+}
+ 
+bool java_lang_Thread::set_park_event(oop java_thread, jlong ptr) {
+  if (_park_event_offset > 0) {
+    java_thread->long_field_put(_park_event_offset, ptr);
+    return true;
+  }
+  return false;
+}
+
+
+const char* java_lang_Thread::thread_status_name(oop java_thread) {
+  assert(JDK_Version::is_gte_jdk15x_version() && _thread_status_offset != 0, "Must have thread status");
+  ThreadStatus status = (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
+  switch (status) {
+    case NEW                      : return "NEW";
+    case RUNNABLE                 : return "RUNNABLE";
+    case SLEEPING                 : return "TIMED_WAITING (sleeping)";
+    case IN_OBJECT_WAIT           : return "WAITING (on object monitor)";
+    case IN_OBJECT_WAIT_TIMED     : return "TIMED_WAITING (on object monitor)";
+    case PARKED                   : return "WAITING (parking)";
+    case PARKED_TIMED             : return "TIMED_WAITING (parking)";
+    case BLOCKED_ON_MONITOR_ENTER : return "BLOCKED (on object monitor)";
+    case TERMINATED               : return "TERMINATED";
+    default                       : return "UNKNOWN";
+  };
+}
+int java_lang_ThreadGroup::_parent_offset = 0;
+int java_lang_ThreadGroup::_name_offset = 0;
+int java_lang_ThreadGroup::_threads_offset = 0;
+int java_lang_ThreadGroup::_groups_offset = 0;
+int java_lang_ThreadGroup::_maxPriority_offset = 0;
+int java_lang_ThreadGroup::_destroyed_offset = 0;
+int java_lang_ThreadGroup::_daemon_offset = 0;
+int java_lang_ThreadGroup::_vmAllowSuspension_offset = 0;
+int java_lang_ThreadGroup::_nthreads_offset = 0;
+int java_lang_ThreadGroup::_ngroups_offset = 0;
+
+oop  java_lang_ThreadGroup::parent(oop java_thread_group) {
+  assert(java_thread_group->is_oop(), "thread group must be oop");
+  return java_thread_group->obj_field(_parent_offset);
+}
+
+// ("name as oop" accessor is not necessary)
+
+typeArrayOop java_lang_ThreadGroup::name(oop java_thread_group) {
+  oop name = java_thread_group->obj_field(_name_offset);
+  // ThreadGroup.name can be null
+  return name == NULL ? (typeArrayOop)NULL : java_lang_String::value(name);
+}
+
+int java_lang_ThreadGroup::nthreads(oop java_thread_group) {
+  assert(java_thread_group->is_oop(), "thread group must be oop");
+  return java_thread_group->int_field(_nthreads_offset);
+}
+
+objArrayOop java_lang_ThreadGroup::threads(oop java_thread_group) {
+  oop threads = java_thread_group->obj_field(_threads_offset);
+  assert(threads != NULL, "threadgroups should have threads");
+  assert(threads->is_objArray(), "just checking"); // Todo: Add better type checking code
+  return objArrayOop(threads);
+}
+
+int java_lang_ThreadGroup::ngroups(oop java_thread_group) {
+  assert(java_thread_group->is_oop(), "thread group must be oop");
+  return java_thread_group->int_field(_ngroups_offset);
+}
+
+objArrayOop java_lang_ThreadGroup::groups(oop java_thread_group) {
+  oop groups = java_thread_group->obj_field(_groups_offset);
+  assert(groups == NULL || groups->is_objArray(), "just checking"); // Todo: Add better type checking code
+  return objArrayOop(groups);
+}
+
+ThreadPriority java_lang_ThreadGroup::maxPriority(oop java_thread_group) {
+  assert(java_thread_group->is_oop(), "thread group must be oop");
+  return (ThreadPriority) java_thread_group->int_field(_maxPriority_offset);
+}
+
+bool java_lang_ThreadGroup::is_destroyed(oop java_thread_group) {
+  assert(java_thread_group->is_oop(), "thread group must be oop");
+  return java_thread_group->bool_field(_destroyed_offset) != 0;
+}
+
+bool java_lang_ThreadGroup::is_daemon(oop java_thread_group) {
+  assert(java_thread_group->is_oop(), "thread group must be oop");
+  return java_thread_group->bool_field(_daemon_offset) != 0;
+}
+
+bool java_lang_ThreadGroup::is_vmAllowSuspension(oop java_thread_group) {
+  assert(java_thread_group->is_oop(), "thread group must be oop");
+  return java_thread_group->bool_field(_vmAllowSuspension_offset) != 0;
+}
+
+void java_lang_ThreadGroup::compute_offsets() {
+  assert(_parent_offset == 0, "offsets should be initialized only once");
+
+  klassOop k = SystemDictionary::threadGroup_klass();
+
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _parent_offset,      k, vmSymbols::parent_name(),      vmSymbols::threadgroup_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _name_offset,        k, vmSymbols::name_name(),        vmSymbols::string_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _threads_offset,     k, vmSymbols::threads_name(),     vmSymbols::thread_array_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _groups_offset,      k, vmSymbols::groups_name(),      vmSymbols::threadgroup_array_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _maxPriority_offset, k, vmSymbols::maxPriority_name(), vmSymbols::int_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _destroyed_offset,   k, vmSymbols::destroyed_name(),   vmSymbols::bool_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _daemon_offset,      k, vmSymbols::daemon_name(),      vmSymbols::bool_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _vmAllowSuspension_offset, k, vmSymbols::vmAllowSuspension_name(), vmSymbols::bool_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _nthreads_offset,    k, vmSymbols::nthreads_name(),    vmSymbols::int_signature());
+  COMPUTE_OFFSET("java.lang.ThreadGroup", _ngroups_offset,     k, vmSymbols::ngroups_name(),     vmSymbols::int_signature());
+}
+
+oop java_lang_Throwable::backtrace(oop throwable) {
+  return throwable->obj_field_acquire(backtrace_offset);
+}
+
+
+void java_lang_Throwable::set_backtrace(oop throwable, oop value) {
+  throwable->release_obj_field_put(backtrace_offset, value);
+}
+
+
+oop java_lang_Throwable::message(oop throwable) {
+  return throwable->obj_field(detailMessage_offset);
+}
+
+
+oop java_lang_Throwable::message(Handle throwable) {
+  return throwable->obj_field(detailMessage_offset);
+}
+
+
+void java_lang_Throwable::set_message(oop throwable, oop value) {
+  throwable->obj_field_put(detailMessage_offset, value);
+}
+
+
+void java_lang_Throwable::clear_stacktrace(oop throwable) {
+  assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
+  throwable->obj_field_put(stackTrace_offset, NULL);
+}
+
+
+void java_lang_Throwable::print(oop throwable, outputStream* st) {
+  ResourceMark rm;
+  klassOop k = throwable->klass();
+  assert(k != NULL, "just checking");
+  st->print("%s", instanceKlass::cast(k)->external_name());
+  oop msg = message(throwable);
+  if (msg != NULL) {
+    st->print(": %s", java_lang_String::as_utf8_string(msg));
+  }
+}
+
+
+void java_lang_Throwable::print(Handle throwable, outputStream* st) {
+  ResourceMark rm;
+  klassOop k = throwable->klass();
+  assert(k != NULL, "just checking");
+  st->print("%s", instanceKlass::cast(k)->external_name());
+  oop msg = message(throwable);
+  if (msg != NULL) {
+    st->print(": %s", java_lang_String::as_utf8_string(msg));
+  }
+}
+
+// Print stack trace element to resource allocated buffer
+char* java_lang_Throwable::print_stack_element_to_buffer(methodOop method, int bci) { 
+  // Get strings and string lengths
+  instanceKlass* klass = instanceKlass::cast(method->method_holder());
+  const char* klass_name  = klass->external_name();
+  int buf_len = (int)strlen(klass_name);
+  char* source_file_name;
+  if (klass->source_file_name() == NULL) {
+    source_file_name = NULL;
+  } else {
+    source_file_name = klass->source_file_name()->as_C_string();
+    buf_len += (int)strlen(source_file_name);
+  }
+  char* method_name = method->name()->as_C_string();
+  buf_len += (int)strlen(method_name);
+
+  // Allocate temporary buffer with extra space for formatting and line number
+  char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
+
+  // Print stack trace line in buffer
+  sprintf(buf, "\tat %s.%s", klass_name, method_name);
+  if (method->is_native()) {
+    strcat(buf, "(Native Method)");
+  } else {    
+    int line_number = method->line_number_from_bci(bci);
+    if (source_file_name != NULL && (line_number != -1)) {
+      // Sourcename and linenumber
+      sprintf(buf + (int)strlen(buf), "(%s:%d)", source_file_name, line_number);
+    } else if (source_file_name != NULL) {
+      // Just sourcename
+      sprintf(buf + (int)strlen(buf), "(%s)", source_file_name);      
+    } else {
+      // Neither soucename and linenumber
+      sprintf(buf + (int)strlen(buf), "(Unknown Source)");
+    }
+    nmethod* nm = method->code();
+    if (WizardMode && nm != NULL) {
+      sprintf(buf + (int)strlen(buf), "(nmethod %#x)", nm);
+    }
+  }
+
+  return buf;
+}
+
+
+void java_lang_Throwable::print_stack_element(Handle stream, methodOop method, int bci) {  
+  ResourceMark rm;
+  char* buf = print_stack_element_to_buffer(method, bci);
+  print_to_stream(stream, buf);
+}
+
+void java_lang_Throwable::print_stack_element(outputStream *st, methodOop method, int bci) {  
+  ResourceMark rm;
+  char* buf = print_stack_element_to_buffer(method, bci);
+  st->print_cr("%s", buf);
+}
+
+void java_lang_Throwable::print_to_stream(Handle stream, const char* str) {
+  if (stream.is_null()) {
+    tty->print_cr("%s", str);
+  } else {
+    EXCEPTION_MARK;
+    JavaValue result(T_VOID);
+    Handle arg (THREAD, oopFactory::new_charArray(str, THREAD));
+    if (!HAS_PENDING_EXCEPTION) {
+      JavaCalls::call_virtual(&result, 
+                              stream, 
+                              KlassHandle(THREAD, stream->klass()),
+                              vmSymbolHandles::println_name(), 
+                              vmSymbolHandles::char_array_void_signature(), 
+                              arg, 
+                              THREAD);
+    }
+    // Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
+    if (HAS_PENDING_EXCEPTION) CLEAR_PENDING_EXCEPTION;
+  }
+
+}
+
+
+const char* java_lang_Throwable::no_stack_trace_message() {
+  return "\t<<no stack trace available>>";
+}
+
+
+// Currently used only for exceptions occurring during startup
+void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
+  Thread *THREAD = Thread::current();
+  Handle h_throwable(THREAD, throwable);
+  while (h_throwable.not_null()) {
+    objArrayHandle result (THREAD, objArrayOop(backtrace(h_throwable())));
+    if (result.is_null()) {
+      st->print_cr(no_stack_trace_message());
+      return;
+    }
+  
+    while (result.not_null()) {
+      objArrayHandle methods (THREAD,
+                              objArrayOop(result->obj_at(trace_methods_offset)));
+      typeArrayHandle bcis (THREAD, 
+                            typeArrayOop(result->obj_at(trace_bcis_offset)));
+
+      if (methods.is_null() || bcis.is_null()) {
+        st->print_cr(no_stack_trace_message());
+        return;
+      }
+
+      int length = methods()->length();
+      for (int index = 0; index < length; index++) {
+        methodOop method = methodOop(methods()->obj_at(index));
+        if (method == NULL) goto handle_cause;
+        int bci = bcis->ushort_at(index);
+        print_stack_element(st, method, bci);
+      }
+      result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
+    }
+  handle_cause:
+    {
+      EXCEPTION_MARK;
+      JavaValue result(T_OBJECT);
+      JavaCalls::call_virtual(&result,
+                              h_throwable,
+                              KlassHandle(THREAD, h_throwable->klass()),
+                              vmSymbolHandles::getCause_name(),
+                              vmSymbolHandles::void_throwable_signature(),
+                              THREAD);
+      // Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
+      if (HAS_PENDING_EXCEPTION) {
+        CLEAR_PENDING_EXCEPTION;
+        h_throwable = Handle();
+      } else {
+        h_throwable = Handle(THREAD, (oop) result.get_jobject());
+        if (h_throwable.not_null()) {
+          st->print("Caused by: ");
+          print(h_throwable, st); 
+          st->cr();
+        }
+      }
+    }
+  }
+}
+
+
+void java_lang_Throwable::print_stack_trace(oop throwable, oop print_stream) {
+  // Note: this is no longer used in Merlin, but we support it for compatibility.
+  Thread *thread = Thread::current();
+  Handle stream(thread, print_stream);
+  objArrayHandle result (thread, objArrayOop(backtrace(throwable)));
+  if (result.is_null()) {
+    print_to_stream(stream, no_stack_trace_message());
+    return;
+  }
+  
+  while (result.not_null()) {
+    objArrayHandle methods (thread,
+                            objArrayOop(result->obj_at(trace_methods_offset)));
+    typeArrayHandle bcis (thread, 
+                          typeArrayOop(result->obj_at(trace_bcis_offset)));
+
+    if (methods.is_null() || bcis.is_null()) {
+      print_to_stream(stream, no_stack_trace_message());
+      return;
+    }
+
+    int length = methods()->length();
+    for (int index = 0; index < length; index++) {
+      methodOop method = methodOop(methods()->obj_at(index));
+      if (method == NULL) return;
+      int bci = bcis->ushort_at(index);
+      print_stack_element(stream, method, bci);
+    }
+    result = objArrayHandle(thread, objArrayOop(result->obj_at(trace_next_offset)));
+  }
+}
+
+// This class provides a simple wrapper over the internal structure of
+// exception backtrace to insulate users of the backtrace from needing
+// to know what it looks like.
+class BacktraceBuilder: public StackObj {
+ private:
+  Handle          _backtrace;
+  objArrayOop     _head;
+  objArrayOop     _methods;
+  typeArrayOop    _bcis;
+  int             _index;
+  bool            _dirty;
+  bool            _done;
+  No_Safepoint_Verifier _nsv;
+
+ public:
+
+  enum {
+    trace_methods_offset = java_lang_Throwable::trace_methods_offset,
+    trace_bcis_offset    = java_lang_Throwable::trace_bcis_offset,
+    trace_next_offset    = java_lang_Throwable::trace_next_offset,
+    trace_size           = java_lang_Throwable::trace_size,
+    trace_chunk_size     = java_lang_Throwable::trace_chunk_size
+  };
+
+  // constructor for new backtrace
+  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) {
+    expand(CHECK);
+    _backtrace = _head;
+    _index = 0;
+    _dirty = false;
+    _done = false;
+  }
+
+  void flush() {
+    if (_dirty && _methods != NULL) {
+      BarrierSet* bs = Universe::heap()->barrier_set();
+      assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+      bs->write_ref_array(MemRegion((HeapWord*)_methods->obj_at_addr(0),
+                                    _methods->length() * HeapWordsPerOop));
+      _dirty = false;
+    }
+  }
+
+  void expand(TRAPS) {
+    flush();
+
+    objArrayHandle old_head(THREAD, _head);
+    Pause_No_Safepoint_Verifier pnsv(&_nsv);
+
+    objArrayOop head = oopFactory::new_objectArray(trace_size, CHECK);
+    objArrayHandle new_head(THREAD, head);
+
+    objArrayOop methods = oopFactory::new_objectArray(trace_chunk_size, CHECK);
+    objArrayHandle new_methods(THREAD, methods);
+
+    typeArrayOop bcis = oopFactory::new_shortArray(trace_chunk_size, CHECK);
+    typeArrayHandle new_bcis(THREAD, bcis);
+
+    if (!old_head.is_null()) {
+      old_head->obj_at_put(trace_next_offset, new_head());
+    }
+    new_head->obj_at_put(trace_methods_offset, new_methods());
+    new_head->obj_at_put(trace_bcis_offset, new_bcis());
+
+    _head    = new_head();
+    _methods = new_methods();
+    _bcis    = new_bcis();
+    _index = 0;
+  }
+
+  oop backtrace() {
+    flush();
+    return _backtrace();
+  }
+
+  inline void push(methodOop method, short bci, TRAPS) {
+    if (_index >= trace_chunk_size) {
+      methodHandle mhandle(THREAD, method);
+      expand(CHECK);
+      method = mhandle();
+    }
+
+    // _methods->obj_at_put(_index, method);
+    *_methods->obj_at_addr(_index) = method;
+    _bcis->ushort_at_put(_index, bci);
+    _index++;
+    _dirty = true;
+  }
+
+  methodOop current_method() {
+    assert(_index >= 0 && _index < trace_chunk_size, "out of range");
+    return methodOop(_methods->obj_at(_index));
+  }
+
+  jushort current_bci() {
+    assert(_index >= 0 && _index < trace_chunk_size, "out of range");
+    return _bcis->ushort_at(_index);
+  }
+};
+
+
+void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
+  if (!StackTraceInThrowable) return;
+  ResourceMark rm(THREAD);
+
+  // Start out by clearing the backtrace for this object, in case the VM
+  // runs out of memory while allocating the stack trace
+  set_backtrace(throwable(), NULL);
+  if (JDK_Version::is_gte_jdk14x_version()) {
+    // New since 1.4, clear lazily constructed Java level stacktrace if
+    // refilling occurs
+    clear_stacktrace(throwable());
+  }
+
+  int max_depth = MaxJavaStackTraceDepth;
+  JavaThread* thread = (JavaThread*)THREAD;
+  BacktraceBuilder bt(CHECK);
+
+  // Instead of using vframe directly, this version of fill_in_stack_trace 
+  // basically handles everything by hand. This significantly improved the 
+  // speed of this method call up to 28.5% on Solaris sparc. 27.1% on Windows.  
+  // See bug 6333838 for  more details.
+  // The "ASSERT" here is to verify this method generates the exactly same stack
+  // trace as utilizing vframe.
+#ifdef ASSERT 
+  vframeStream st(thread);
+  methodHandle st_method(THREAD, st.method());
+#endif
+  int total_count = 0;
+  RegisterMap map(thread, false);
+  int decode_offset = 0;
+  nmethod* nm = NULL;
+  bool skip_fillInStackTrace_check = false;
+  bool skip_throwableInit_check = false;
+ 
+  for (frame fr = thread->last_frame(); max_depth != total_count;) {
+    methodOop method = NULL;
+    int bci = 0;
+    
+    // Compiled java method case.
+    if (decode_offset != 0) {
+      DebugInfoReadStream stream(nm, decode_offset);
+      decode_offset = stream.read_int();
+      method = (methodOop)nm->oop_at(stream.read_int());
+      bci = stream.read_bci();
+    } else {
+      if (fr.is_first_frame()) break;      
+      address pc = fr.pc();
+      if (AbstractInterpreter::contains(pc)) {
+        intptr_t bcx = fr.interpreter_frame_bcx();
+        method = fr.interpreter_frame_method();
+        bci =  fr.is_bci(bcx) ? bcx : method->bci_from((address)bcx);
+        fr = fr.sender(&map);
+      } else {
+        CodeBlob* cb = fr.cb();
+        // HMMM QQQ might be nice to have frame return nm as NULL if cb is non-NULL
+        // but non nmethod
+        fr = fr.sender(&map);
+        if (cb == NULL || !cb->is_nmethod()) {
+          continue;
+        }
+        nm = (nmethod*)cb;
+        if (nm->method()->is_native()) {
+          method = nm->method();
+          bci = 0;
+        } else {
+          PcDesc* pd = nm->pc_desc_at(pc);
+          decode_offset = pd->scope_decode_offset();
+          // if decode_offset is not equal to 0, it will execute the 
+          // "compiled java method case" at the beginning of the loop.
+          continue;
+        }
+      }
+    }  
+#ifdef ASSERT
+  assert(st_method() == method && st.bci() == bci,
+         "Wrong stack trace");
+  st.next();
+  // vframeStream::method isn't GC-safe so store off a copy
+  // of the methodOop in case we GC.
+  if (!st.at_end()) {
+    st_method = st.method();
+  }
+#endif
+    if (!skip_fillInStackTrace_check) {
+      // check "fillInStackTrace" only once, so we negate the flag
+      // after the first time check.
+      skip_fillInStackTrace_check = true;
+      if (method->name() == vmSymbols::fillInStackTrace_name()) {
+        continue;
+      }
+    }
+    // skip <init> methods of the exceptions klass. If there is <init> methods
+    // that belongs to a superclass of the exception  we are going to skipping
+    // them in stack trace. This is simlar to classic VM.
+    if (!skip_throwableInit_check) {
+      if (method->name() == vmSymbols::object_initializer_name() &&  
+          throwable->is_a(method->method_holder())) {
+        continue;
+      } else {
+        // if no "Throwable.init()" method found, we stop checking it next time.
+        skip_throwableInit_check = true;
+      }
+    }
+    bt.push(method, bci, CHECK);
+    total_count++;
+  }
+
+  // Put completed stack trace into throwable object
+  set_backtrace(throwable(), bt.backtrace());
+}
+
+void java_lang_Throwable::fill_in_stack_trace(Handle throwable) {
+  // No-op if stack trace is disabled
+  if (!StackTraceInThrowable) {
+    return;
+  }
+ 
+  // Disable stack traces for some preallocated out of memory errors
+  if (!Universe::should_fill_in_stack_trace(throwable)) {
+    return;
+  }
+ 
+  PRESERVE_EXCEPTION_MARK;
+ 
+  JavaThread* thread = JavaThread::active();
+  fill_in_stack_trace(throwable, thread);
+  // ignore exceptions thrown during stack trace filling
+  CLEAR_PENDING_EXCEPTION;  
+}
+
+void java_lang_Throwable::allocate_backtrace(Handle throwable, TRAPS) {
+  // Allocate stack trace - backtrace is created but not filled in
+
+  // No-op if stack trace is disabled 
+  if (!StackTraceInThrowable) return;
+
+  objArrayOop h_oop = oopFactory::new_objectArray(trace_size, CHECK);
+  objArrayHandle backtrace  (THREAD, h_oop);
+  objArrayOop m_oop = oopFactory::new_objectArray(trace_chunk_size, CHECK);
+  objArrayHandle methods (THREAD, m_oop);
+  typeArrayOop b = oopFactory::new_shortArray(trace_chunk_size, CHECK);
+  typeArrayHandle bcis(THREAD, b);
+  
+  // backtrace has space for one chunk (next is NULL)
+  backtrace->obj_at_put(trace_methods_offset, methods());
+  backtrace->obj_at_put(trace_bcis_offset, bcis());
+  set_backtrace(throwable(), backtrace());
+}
+
+
+void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle throwable) {
+  // Fill in stack trace into preallocated backtrace (no GC)
+
+  // No-op if stack trace is disabled
+  if (!StackTraceInThrowable) return;
+
+  assert(throwable->is_a(SystemDictionary::throwable_klass()), "sanity check");
+
+  oop backtrace = java_lang_Throwable::backtrace(throwable());
+  assert(backtrace != NULL, "backtrace not preallocated");
+
+  oop m = objArrayOop(backtrace)->obj_at(trace_methods_offset);
+  objArrayOop methods = objArrayOop(m);
+  assert(methods != NULL && methods->length() > 0, "method array not preallocated");
+  
+  oop b = objArrayOop(backtrace)->obj_at(trace_bcis_offset);
+  typeArrayOop bcis = typeArrayOop(b);
+  assert(bcis != NULL, "bci array not preallocated");
+
+  assert(methods->length() == bcis->length(), "method and bci arrays should match");
+
+  JavaThread* thread = JavaThread::current();
+  ResourceMark rm(thread);
+  vframeStream st(thread); 
+
+  // Unlike fill_in_stack_trace we do not skip fillInStackTrace or throwable init 
+  // methods as preallocated errors aren't created by "java" code. 
+
+  // fill in as much stack trace as possible
+  int max_chunks = MIN2(methods->length(), (int)MaxJavaStackTraceDepth);
+  int chunk_count = 0;
+
+  for (;!st.at_end(); st.next()) {    
+    // add element
+    bcis->ushort_at_put(chunk_count, st.bci());
+    methods->obj_at_put(chunk_count, st.method());
+
+    chunk_count++;
+
+    // Bail-out for deep stacks
+    if (chunk_count >= max_chunks) break;
+  }
+}
+
+
+int java_lang_Throwable::get_stack_trace_depth(oop throwable, TRAPS) {
+  if (throwable == NULL) {
+    THROW_0(vmSymbols::java_lang_NullPointerException());
+  }
+  objArrayOop chunk = objArrayOop(backtrace(throwable));
+  int depth = 0;
+  if (chunk != NULL) {
+    // Iterate over chunks and count full ones
+    while (true) {
+      objArrayOop next = objArrayOop(chunk->obj_at(trace_next_offset));
+      if (next == NULL) break;
+      depth += trace_chunk_size;
+      chunk = next;
+    }
+    assert(chunk != NULL && chunk->obj_at(trace_next_offset) == NULL, "sanity check");
+    // Count element in remaining partial chunk
+    objArrayOop methods = objArrayOop(chunk->obj_at(trace_methods_offset));
+    typeArrayOop bcis = typeArrayOop(chunk->obj_at(trace_bcis_offset));
+    assert(methods != NULL && bcis != NULL, "sanity check");
+    for (int i = 0; i < methods->length(); i++) {
+      if (methods->obj_at(i) == NULL) break;
+      depth++;
+    }
+  }
+  return depth;
+}
+
+
+oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS) {
+  if (throwable == NULL) {
+    THROW_0(vmSymbols::java_lang_NullPointerException());
+  }
+  if (index < 0) {
+    THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
+  }
+  // Compute how many chunks to skip and index into actual chunk
+  objArrayOop chunk = objArrayOop(backtrace(throwable));
+  int skip_chunks = index / trace_chunk_size;
+  int chunk_index = index % trace_chunk_size;
+  while (chunk != NULL && skip_chunks > 0) {
+    chunk = objArrayOop(chunk->obj_at(trace_next_offset));
+	skip_chunks--;
+  }
+  if (chunk == NULL) {
+    THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
+  }
+  // Get method,bci from chunk
+  objArrayOop methods = objArrayOop(chunk->obj_at(trace_methods_offset));
+  typeArrayOop bcis = typeArrayOop(chunk->obj_at(trace_bcis_offset));
+  assert(methods != NULL && bcis != NULL, "sanity check");
+  methodHandle method(THREAD, methodOop(methods->obj_at(chunk_index)));
+  int bci = bcis->ushort_at(chunk_index);
+  // Chunk can be partial full
+  if (method.is_null()) {
+    THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
+  }
+
+  oop element = java_lang_StackTraceElement::create(method, bci, CHECK_0);
+  return element;
+}
+
+oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
+  // SystemDictionary::stackTraceElement_klass() will be null for pre-1.4 JDKs
+  assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
+
+  // Allocate java.lang.StackTraceElement instance
+  klassOop k = SystemDictionary::stackTraceElement_klass();
+  instanceKlassHandle ik (THREAD, k);
+  if (ik->should_be_initialized()) {
+    ik->initialize(CHECK_0);
+  }
+
+  Handle element = ik->allocate_instance_handle(CHECK_0);
+  // Fill in class name
+  ResourceMark rm(THREAD);
+  const char* str = instanceKlass::cast(method->method_holder())->external_name();
+  oop classname = StringTable::intern((char*) str, CHECK_0);
+  java_lang_StackTraceElement::set_declaringClass(element(), classname);
+  // Fill in method name
+  oop methodname = StringTable::intern(method->name(), CHECK_0);
+  java_lang_StackTraceElement::set_methodName(element(), methodname);
+  // Fill in source file name
+  symbolOop source = instanceKlass::cast(method->method_holder())->source_file_name();
+  oop filename = StringTable::intern(source, CHECK_0);
+  java_lang_StackTraceElement::set_fileName(element(), filename);
+  // File in source line number
+  int line_number;
+  if (method->is_native()) {
+    // Negative value different from -1 below, enabling Java code in 
+    // class java.lang.StackTraceElement to distinguish "native" from
+    // "no LineNumberTable".
+    line_number = -2;
+  } else {
+    // Returns -1 if no LineNumberTable, and otherwise actual line number
+    line_number = method->line_number_from_bci(bci);
+  }
+  java_lang_StackTraceElement::set_lineNumber(element(), line_number);
+
+  return element();
+}
+
+
+void java_lang_reflect_AccessibleObject::compute_offsets() {
+  klassOop k = SystemDictionary::reflect_accessible_object_klass();
+  COMPUTE_OFFSET("java.lang.reflect.AccessibleObject", override_offset, k, vmSymbols::override_name(), vmSymbols::bool_signature());
+}
+
+jboolean java_lang_reflect_AccessibleObject::override(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return (jboolean) reflect->bool_field(override_offset);
+}
+
+void java_lang_reflect_AccessibleObject::set_override(oop reflect, jboolean value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  reflect->bool_field_put(override_offset, (int) value);
+}
+
+void java_lang_reflect_Method::compute_offsets() {
+  klassOop k = SystemDictionary::reflect_method_klass();
+  COMPUTE_OFFSET("java.lang.reflect.Method", clazz_offset,          k, vmSymbols::clazz_name(),          vmSymbols::class_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Method", name_offset,           k, vmSymbols::name_name(),           vmSymbols::string_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Method", returnType_offset,     k, vmSymbols::returnType_name(),     vmSymbols::class_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Method", parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Method", exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Method", slot_offset,           k, vmSymbols::slot_name(),           vmSymbols::int_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Method", modifiers_offset,      k, vmSymbols::modifiers_name(),      vmSymbols::int_signature());
+  // The generic signature and annotations fields are only present in 1.5
+  signature_offset = -1;
+  annotations_offset = -1;
+  parameter_annotations_offset = -1;
+  annotation_default_offset = -1;
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", signature_offset,             k, vmSymbols::signature_name(),             vmSymbols::string_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", annotations_offset,           k, vmSymbols::annotations_name(),           vmSymbols::byte_array_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", annotation_default_offset,    k, vmSymbols::annotation_default_name(),    vmSymbols::byte_array_signature());
+}
+
+Handle java_lang_reflect_Method::create(TRAPS) {  
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  klassOop klass = SystemDictionary::reflect_method_klass();
+  // This class is eagerly initialized during VM initialization, since we keep a refence
+  // to one of the methods
+  assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized");  
+  return instanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH);
+}
+
+oop java_lang_reflect_Method::clazz(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return reflect->obj_field(clazz_offset);
+}
+
+void java_lang_reflect_Method::set_clazz(oop reflect, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+   reflect->obj_field_put(clazz_offset, value);
+}
+
+int java_lang_reflect_Method::slot(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return reflect->int_field(slot_offset);
+}
+
+void java_lang_reflect_Method::set_slot(oop reflect, int value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  reflect->int_field_put(slot_offset, value);
+}
+
+oop java_lang_reflect_Method::name(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return method->obj_field(name_offset);
+}
+
+void java_lang_reflect_Method::set_name(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  method->obj_field_put(name_offset, value);
+}
+
+oop java_lang_reflect_Method::return_type(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return method->obj_field(returnType_offset);
+}
+
+void java_lang_reflect_Method::set_return_type(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  method->obj_field_put(returnType_offset, value);
+}
+
+oop java_lang_reflect_Method::parameter_types(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return method->obj_field(parameterTypes_offset);
+}
+
+void java_lang_reflect_Method::set_parameter_types(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  method->obj_field_put(parameterTypes_offset, value);
+}
+
+oop java_lang_reflect_Method::exception_types(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return method->obj_field(exceptionTypes_offset);
+}
+
+void java_lang_reflect_Method::set_exception_types(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  method->obj_field_put(exceptionTypes_offset, value);
+}
+
+int java_lang_reflect_Method::modifiers(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return method->int_field(modifiers_offset);
+}
+
+void java_lang_reflect_Method::set_modifiers(oop method, int value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  method->int_field_put(modifiers_offset, value);
+}
+
+bool java_lang_reflect_Method::has_signature_field() {
+  return (signature_offset >= 0);
+}
+
+oop java_lang_reflect_Method::signature(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_signature_field(), "signature field must be present");
+  return method->obj_field(signature_offset);
+}
+
+void java_lang_reflect_Method::set_signature(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_signature_field(), "signature field must be present");
+  method->obj_field_put(signature_offset, value);
+}
+
+bool java_lang_reflect_Method::has_annotations_field() {
+  return (annotations_offset >= 0);
+}
+
+oop java_lang_reflect_Method::annotations(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotations_field(), "annotations field must be present");
+  return method->obj_field(annotations_offset);
+}
+
+void java_lang_reflect_Method::set_annotations(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotations_field(), "annotations field must be present");
+  method->obj_field_put(annotations_offset, value);
+}
+
+bool java_lang_reflect_Method::has_parameter_annotations_field() {
+  return (parameter_annotations_offset >= 0);
+}
+
+oop java_lang_reflect_Method::parameter_annotations(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
+  return method->obj_field(parameter_annotations_offset);
+}
+
+void java_lang_reflect_Method::set_parameter_annotations(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
+  method->obj_field_put(parameter_annotations_offset, value);
+}
+
+bool java_lang_reflect_Method::has_annotation_default_field() {
+  return (annotation_default_offset >= 0);
+}
+
+oop java_lang_reflect_Method::annotation_default(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotation_default_field(), "annotation default field must be present");
+  return method->obj_field(annotation_default_offset);
+}
+
+void java_lang_reflect_Method::set_annotation_default(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotation_default_field(), "annotation default field must be present");
+  method->obj_field_put(annotation_default_offset, value);
+}
+
+void java_lang_reflect_Constructor::compute_offsets() {
+  klassOop k = SystemDictionary::reflect_constructor_klass();
+  COMPUTE_OFFSET("java.lang.reflect.Constructor", clazz_offset,          k, vmSymbols::clazz_name(),          vmSymbols::class_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Constructor", parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Constructor", exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Constructor", slot_offset,           k, vmSymbols::slot_name(),           vmSymbols::int_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Constructor", modifiers_offset,      k, vmSymbols::modifiers_name(),      vmSymbols::int_signature());
+  // The generic signature and annotations fields are only present in 1.5
+  signature_offset = -1;
+  annotations_offset = -1;
+  parameter_annotations_offset = -1;
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", signature_offset,             k, vmSymbols::signature_name(),             vmSymbols::string_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", annotations_offset,           k, vmSymbols::annotations_name(),           vmSymbols::byte_array_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
+}
+
+Handle java_lang_reflect_Constructor::create(TRAPS) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  symbolHandle name = vmSymbolHandles::java_lang_reflect_Constructor();
+  klassOop k = SystemDictionary::resolve_or_fail(name, true, CHECK_NH);
+  instanceKlassHandle klass (THREAD, k);
+  // Ensure it is initialized
+  klass->initialize(CHECK_NH);
+  return klass->allocate_instance_handle(CHECK_NH);
+}
+
+oop java_lang_reflect_Constructor::clazz(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return reflect->obj_field(clazz_offset);
+}
+
+void java_lang_reflect_Constructor::set_clazz(oop reflect, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+   reflect->obj_field_put(clazz_offset, value);
+}
+
+oop java_lang_reflect_Constructor::parameter_types(oop constructor) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return constructor->obj_field(parameterTypes_offset);
+}
+
+void java_lang_reflect_Constructor::set_parameter_types(oop constructor, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  constructor->obj_field_put(parameterTypes_offset, value);
+}
+
+oop java_lang_reflect_Constructor::exception_types(oop constructor) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return constructor->obj_field(exceptionTypes_offset);
+}
+
+void java_lang_reflect_Constructor::set_exception_types(oop constructor, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  constructor->obj_field_put(exceptionTypes_offset, value);
+}
+
+int java_lang_reflect_Constructor::slot(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return reflect->int_field(slot_offset);
+}
+
+void java_lang_reflect_Constructor::set_slot(oop reflect, int value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  reflect->int_field_put(slot_offset, value);
+}
+
+int java_lang_reflect_Constructor::modifiers(oop constructor) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return constructor->int_field(modifiers_offset);
+}
+
+void java_lang_reflect_Constructor::set_modifiers(oop constructor, int value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  constructor->int_field_put(modifiers_offset, value);
+}
+
+bool java_lang_reflect_Constructor::has_signature_field() {
+  return (signature_offset >= 0);
+}
+
+oop java_lang_reflect_Constructor::signature(oop constructor) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_signature_field(), "signature field must be present");
+  return constructor->obj_field(signature_offset);
+}
+
+void java_lang_reflect_Constructor::set_signature(oop constructor, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_signature_field(), "signature field must be present");
+  constructor->obj_field_put(signature_offset, value);
+}
+
+bool java_lang_reflect_Constructor::has_annotations_field() {
+  return (annotations_offset >= 0);
+}
+
+oop java_lang_reflect_Constructor::annotations(oop constructor) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotations_field(), "annotations field must be present");
+  return constructor->obj_field(annotations_offset);
+}
+
+void java_lang_reflect_Constructor::set_annotations(oop constructor, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotations_field(), "annotations field must be present");
+  constructor->obj_field_put(annotations_offset, value);
+}
+
+bool java_lang_reflect_Constructor::has_parameter_annotations_field() {
+  return (parameter_annotations_offset >= 0);
+}
+
+oop java_lang_reflect_Constructor::parameter_annotations(oop method) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
+  return method->obj_field(parameter_annotations_offset);
+}
+
+void java_lang_reflect_Constructor::set_parameter_annotations(oop method, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
+  method->obj_field_put(parameter_annotations_offset, value);
+}
+
+void java_lang_reflect_Field::compute_offsets() {
+  klassOop k = SystemDictionary::reflect_field_klass();
+  COMPUTE_OFFSET("java.lang.reflect.Field", clazz_offset,     k, vmSymbols::clazz_name(),     vmSymbols::class_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Field", name_offset,      k, vmSymbols::name_name(),      vmSymbols::string_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Field", type_offset,      k, vmSymbols::type_name(),      vmSymbols::class_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Field", slot_offset,      k, vmSymbols::slot_name(),      vmSymbols::int_signature());
+  COMPUTE_OFFSET("java.lang.reflect.Field", modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature());
+  // The generic signature and annotations fields are only present in 1.5
+  signature_offset = -1;
+  annotations_offset = -1;
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Field", signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature());
+  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Field", annotations_offset,  k, vmSymbols::annotations_name(),  vmSymbols::byte_array_signature());
+}
+
+Handle java_lang_reflect_Field::create(TRAPS) {  
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  symbolHandle name = vmSymbolHandles::java_lang_reflect_Field();
+  klassOop k = SystemDictionary::resolve_or_fail(name, true, CHECK_NH);
+  instanceKlassHandle klass (THREAD, k);
+  // Ensure it is initialized
+  klass->initialize(CHECK_NH);
+  return klass->allocate_instance_handle(CHECK_NH);
+}
+
+oop java_lang_reflect_Field::clazz(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return reflect->obj_field(clazz_offset);
+}
+
+void java_lang_reflect_Field::set_clazz(oop reflect, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+   reflect->obj_field_put(clazz_offset, value);
+}
+
+oop java_lang_reflect_Field::name(oop field) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return field->obj_field(name_offset);
+}
+
+void java_lang_reflect_Field::set_name(oop field, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  field->obj_field_put(name_offset, value);
+}
+
+oop java_lang_reflect_Field::type(oop field) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return field->obj_field(type_offset);
+}
+
+void java_lang_reflect_Field::set_type(oop field, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  field->obj_field_put(type_offset, value);
+}
+
+int java_lang_reflect_Field::slot(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return reflect->int_field(slot_offset);
+}
+
+void java_lang_reflect_Field::set_slot(oop reflect, int value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  reflect->int_field_put(slot_offset, value);
+}
+
+int java_lang_reflect_Field::modifiers(oop field) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return field->int_field(modifiers_offset);
+}
+
+void java_lang_reflect_Field::set_modifiers(oop field, int value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  field->int_field_put(modifiers_offset, value);
+}
+
+bool java_lang_reflect_Field::has_signature_field() {
+  return (signature_offset >= 0);
+}
+
+oop java_lang_reflect_Field::signature(oop field) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_signature_field(), "signature field must be present");
+  return field->obj_field(signature_offset);
+}
+
+void java_lang_reflect_Field::set_signature(oop field, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_signature_field(), "signature field must be present");
+  field->obj_field_put(signature_offset, value);
+}
+
+bool java_lang_reflect_Field::has_annotations_field() {
+  return (annotations_offset >= 0);
+}
+
+oop java_lang_reflect_Field::annotations(oop field) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotations_field(), "annotations field must be present");
+  return field->obj_field(annotations_offset);
+}
+
+void java_lang_reflect_Field::set_annotations(oop field, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  assert(has_annotations_field(), "annotations field must be present");
+  field->obj_field_put(annotations_offset, value);
+}
+
+
+void sun_reflect_ConstantPool::compute_offsets() {
+  klassOop k = SystemDictionary::reflect_constant_pool_klass();
+  // This null test can be removed post beta
+  if (k != NULL) {
+    COMPUTE_OFFSET("sun.reflect.ConstantPool", _cp_oop_offset, k, vmSymbols::constantPoolOop_name(), vmSymbols::object_signature());
+  }
+}
+
+
+Handle sun_reflect_ConstantPool::create(TRAPS) {  
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  klassOop k = SystemDictionary::reflect_constant_pool_klass();
+  instanceKlassHandle klass (THREAD, k);
+  // Ensure it is initialized
+  klass->initialize(CHECK_NH);
+  return klass->allocate_instance_handle(CHECK_NH);
+}
+
+
+oop sun_reflect_ConstantPool::cp_oop(oop reflect) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  return reflect->obj_field(_cp_oop_offset);
+}
+
+
+void sun_reflect_ConstantPool::set_cp_oop(oop reflect, oop value) {
+  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
+  reflect->obj_field_put(_cp_oop_offset, value);
+}
+
+void sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() {
+  klassOop k = SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass();
+  // This null test can be removed post beta
+  if (k != NULL) {
+    COMPUTE_OFFSET("sun.reflect.UnsafeStaticFieldAccessorImpl", _base_offset, k,
+                   vmSymbols::base_name(), vmSymbols::object_signature());
+  }
+}
+
+oop java_lang_boxing_object::initialize_and_allocate(klassOop k, TRAPS) {
+ instanceKlassHandle h (THREAD, k);
+ if (!h->is_initialized()) h->initialize(CHECK_0);
+ return h->allocate_instance(THREAD);
+}
+
+
+oop java_lang_boxing_object::create(BasicType type, jvalue* value, TRAPS) {
+  oop box;
+  switch (type) {
+    case T_BOOLEAN:
+      box = initialize_and_allocate(SystemDictionary::boolean_klass(), CHECK_0);
+      box->bool_field_put(value_offset, value->z);
+      break;
+    case T_CHAR:
+      box = initialize_and_allocate(SystemDictionary::char_klass(), CHECK_0);
+      box->char_field_put(value_offset, value->c);
+      break;
+    case T_FLOAT:
+      box = initialize_and_allocate(SystemDictionary::float_klass(), CHECK_0);
+      box->float_field_put(value_offset, value->f);
+      break;
+    case T_DOUBLE:
+      box = initialize_and_allocate(SystemDictionary::double_klass(), CHECK_0);
+      box->double_field_put(value_offset, value->d);
+      break;
+    case T_BYTE:
+      box = initialize_and_allocate(SystemDictionary::byte_klass(), CHECK_0);
+      box->byte_field_put(value_offset, value->b);
+      break;
+    case T_SHORT:
+      box = initialize_and_allocate(SystemDictionary::short_klass(), CHECK_0);
+      box->short_field_put(value_offset, value->s);
+      break;
+    case T_INT:
+      box = initialize_and_allocate(SystemDictionary::int_klass(), CHECK_0);
+      box->int_field_put(value_offset, value->i);
+      break;
+    case T_LONG:
+      box = initialize_and_allocate(SystemDictionary::long_klass(), CHECK_0);
+      box->long_field_put(value_offset, value->j);
+      break;
+    default:
+      return NULL;
+  }
+  return box;
+}
+
+
+BasicType java_lang_boxing_object::get_value(oop box, jvalue* value) {
+  klassOop k = box->klass();
+  if (k == SystemDictionary::boolean_klass()) {
+    value->z = box->bool_field(value_offset);
+    return T_BOOLEAN;
+  }
+  if (k == SystemDictionary::char_klass()) {
+    value->c = box->char_field(value_offset);
+    return T_CHAR;
+  }
+  if (k == SystemDictionary::float_klass()) {
+    value->f = box->float_field(value_offset);
+    return T_FLOAT;
+  }
+  if (k == SystemDictionary::double_klass()) {
+    value->d = box->double_field(value_offset);
+    return T_DOUBLE;
+  }
+  if (k == SystemDictionary::byte_klass()) {
+    value->b = box->byte_field(value_offset);
+    return T_BYTE;
+  }
+  if (k == SystemDictionary::short_klass()) {
+    value->s = box->short_field(value_offset);
+    return T_SHORT;
+  }
+  if (k == SystemDictionary::int_klass()) {
+    value->i = box->int_field(value_offset);
+    return T_INT;
+  }
+  if (k == SystemDictionary::long_klass()) {
+    value->j = box->long_field(value_offset);
+    return T_LONG;
+  }
+  return T_ILLEGAL;
+}
+
+
+BasicType java_lang_boxing_object::set_value(oop box, jvalue* value) {
+  klassOop k = box->klass();
+  if (k == SystemDictionary::boolean_klass()) {
+    box->bool_field_put(value_offset, value->z);
+    return T_BOOLEAN;
+  }
+  if (k == SystemDictionary::char_klass()) {
+    box->char_field_put(value_offset, value->c);
+    return T_CHAR;
+  }
+  if (k == SystemDictionary::float_klass()) {
+    box->float_field_put(value_offset, value->f);
+    return T_FLOAT;
+  }
+  if (k == SystemDictionary::double_klass()) {
+    box->double_field_put(value_offset, value->d);
+    return T_DOUBLE;
+  }
+  if (k == SystemDictionary::byte_klass()) {
+    box->byte_field_put(value_offset, value->b);
+    return T_BYTE;
+  }
+  if (k == SystemDictionary::short_klass()) {
+    box->short_field_put(value_offset, value->s);
+    return T_SHORT;
+  }
+  if (k == SystemDictionary::int_klass()) {
+    box->int_field_put(value_offset, value->i);
+    return T_INT;
+  }
+  if (k == SystemDictionary::long_klass()) {
+    box->long_field_put(value_offset, value->j);
+    return T_LONG;
+  }
+  return T_ILLEGAL;
+}
+
+
+// Support for java_lang_ref_Reference
+
+void java_lang_ref_Reference::set_referent(oop ref, oop value) {
+  ref->obj_field_put(referent_offset, value);
+}
+
+oop* java_lang_ref_Reference::referent_addr(oop ref) {
+  return ref->obj_field_addr(referent_offset);
+}
+
+void java_lang_ref_Reference::set_next(oop ref, oop value) {
+  ref->obj_field_put(next_offset, value);
+}
+
+oop* java_lang_ref_Reference::next_addr(oop ref) {
+  return ref->obj_field_addr(next_offset);
+}
+
+void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
+  ref->obj_field_put(discovered_offset, value);
+}
+
+oop* java_lang_ref_Reference::discovered_addr(oop ref) {
+  return ref->obj_field_addr(discovered_offset);
+}
+
+oop* java_lang_ref_Reference::pending_list_lock_addr() {
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+  return (oop*)(((char *)ik->start_of_static_fields()) + static_lock_offset);
+}
+
+oop* java_lang_ref_Reference::pending_list_addr() {
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+  return (oop *)(((char *)ik->start_of_static_fields()) + static_pending_offset);
+}
+
+
+// Support for java_lang_ref_SoftReference
+
+jlong java_lang_ref_SoftReference::timestamp(oop ref) {
+  return ref->long_field(timestamp_offset);
+}
+
+jlong java_lang_ref_SoftReference::clock() {
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+  int offset = ik->offset_of_static_fields() + static_clock_offset;
+
+  return SystemDictionary::soft_reference_klass()->long_field(offset);
+}
+
+void java_lang_ref_SoftReference::set_clock(jlong value) {
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+  int offset = ik->offset_of_static_fields() + static_clock_offset;
+
+  SystemDictionary::soft_reference_klass()->long_field_put(offset, value);
+}
+
+
+// Support for java_security_AccessControlContext
+
+int java_security_AccessControlContext::_context_offset = 0;
+int java_security_AccessControlContext::_privilegedContext_offset = 0;
+int java_security_AccessControlContext::_isPrivileged_offset = 0;
+
+
+void java_security_AccessControlContext::compute_offsets() {
+  assert(_isPrivileged_offset == 0, "offsets should be initialized only once");
+  fieldDescriptor fd;
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::AccessControlContext_klass());
+
+  if (!ik->find_local_field(vmSymbols::context_name(), vmSymbols::protectiondomain_signature(), &fd)) {
+    fatal("Invalid layout of java.security.AccessControlContext");
+  }
+  _context_offset = fd.offset();
+
+  if (!ik->find_local_field(vmSymbols::privilegedContext_name(), vmSymbols::accesscontrolcontext_signature(), &fd)) {
+    fatal("Invalid layout of java.security.AccessControlContext");
+  }
+  _privilegedContext_offset = fd.offset();
+
+  if (!ik->find_local_field(vmSymbols::isPrivileged_name(), vmSymbols::bool_signature(), &fd)) {
+    fatal("Invalid layout of java.security.AccessControlContext");
+  }
+  _isPrivileged_offset = fd.offset();
+}
+
+
+oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) {  
+  assert(_isPrivileged_offset != 0, "offsets should have been initialized");
+  // Ensure klass is initialized
+  instanceKlass::cast(SystemDictionary::AccessControlContext_klass())->initialize(CHECK_0);
+  // Allocate result
+  oop result = instanceKlass::cast(SystemDictionary::AccessControlContext_klass())->allocate_instance(CHECK_0);
+  // Fill in values
+  result->obj_field_put(_context_offset, context());
+  result->obj_field_put(_privilegedContext_offset, privileged_context());
+  result->bool_field_put(_isPrivileged_offset, isPrivileged);
+  return result;
+}
+
+
+// Support for java_lang_ClassLoader
+
+oop java_lang_ClassLoader::parent(oop loader) {
+  assert(loader->is_oop(), "loader must be oop");
+  return loader->obj_field(parent_offset);
+}
+
+
+bool java_lang_ClassLoader::is_trusted_loader(oop loader) {
+  // Fix for 4474172; see evaluation for more details
+  loader = non_reflection_class_loader(loader);
+
+  oop cl = SystemDictionary::java_system_loader();
+  while(cl != NULL) {
+    if (cl == loader) return true;
+    cl = parent(cl);
+  }
+  return false;
+}
+
+oop java_lang_ClassLoader::non_reflection_class_loader(oop loader) {
+  if (loader != NULL) {
+    // See whether this is one of the class loaders associated with
+    // the generated bytecodes for reflection, and if so, "magically"
+    // delegate to its parent to prevent class loading from occurring
+    // in places where applications using reflection didn't expect it.
+    klassOop delegating_cl_class = SystemDictionary::reflect_delegating_classloader_klass();
+    // This might be null in non-1.4 JDKs
+    if (delegating_cl_class != NULL && loader->is_a(delegating_cl_class)) {
+      return parent(loader);
+    }
+  }
+  return loader;
+}
+
+
+// Support for java_lang_System
+
+void java_lang_System::compute_offsets() {
+  assert(offset_of_static_fields == 0, "offsets should be initialized only once");
+
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::system_klass());
+  offset_of_static_fields = ik->offset_of_static_fields();
+}
+
+int java_lang_System::in_offset_in_bytes() {
+  return (offset_of_static_fields + static_in_offset);
+}
+
+
+int java_lang_System::out_offset_in_bytes() {
+  return (offset_of_static_fields + static_out_offset);
+}
+
+
+int java_lang_System::err_offset_in_bytes() {
+  return (offset_of_static_fields + static_err_offset);
+}
+
+
+
+int java_lang_String::value_offset;
+int java_lang_String::offset_offset;
+int java_lang_String::count_offset;
+int java_lang_String::hash_offset;
+int java_lang_Class::klass_offset;
+int java_lang_Class::array_klass_offset;
+int java_lang_Class::resolved_constructor_offset;
+int java_lang_Class::number_of_fake_oop_fields;
+int java_lang_Throwable::backtrace_offset;
+int java_lang_Throwable::detailMessage_offset;
+int java_lang_Throwable::cause_offset;
+int java_lang_Throwable::stackTrace_offset;
+int java_lang_reflect_AccessibleObject::override_offset;
+int java_lang_reflect_Method::clazz_offset;
+int java_lang_reflect_Method::name_offset;
+int java_lang_reflect_Method::returnType_offset;
+int java_lang_reflect_Method::parameterTypes_offset;
+int java_lang_reflect_Method::exceptionTypes_offset;
+int java_lang_reflect_Method::slot_offset;
+int java_lang_reflect_Method::modifiers_offset;
+int java_lang_reflect_Method::signature_offset;
+int java_lang_reflect_Method::annotations_offset;
+int java_lang_reflect_Method::parameter_annotations_offset;
+int java_lang_reflect_Method::annotation_default_offset;
+int java_lang_reflect_Constructor::clazz_offset;
+int java_lang_reflect_Constructor::parameterTypes_offset;
+int java_lang_reflect_Constructor::exceptionTypes_offset;
+int java_lang_reflect_Constructor::slot_offset;
+int java_lang_reflect_Constructor::modifiers_offset;
+int java_lang_reflect_Constructor::signature_offset;
+int java_lang_reflect_Constructor::annotations_offset;
+int java_lang_reflect_Constructor::parameter_annotations_offset;
+int java_lang_reflect_Field::clazz_offset;
+int java_lang_reflect_Field::name_offset;
+int java_lang_reflect_Field::type_offset;
+int java_lang_reflect_Field::slot_offset;
+int java_lang_reflect_Field::modifiers_offset;
+int java_lang_reflect_Field::signature_offset;
+int java_lang_reflect_Field::annotations_offset;
+int java_lang_boxing_object::value_offset;
+int java_lang_ref_Reference::referent_offset;
+int java_lang_ref_Reference::queue_offset;
+int java_lang_ref_Reference::next_offset;
+int java_lang_ref_Reference::discovered_offset;
+int java_lang_ref_Reference::static_lock_offset;
+int java_lang_ref_Reference::static_pending_offset;
+int java_lang_ref_Reference::number_of_fake_oop_fields;
+int java_lang_ref_SoftReference::timestamp_offset;
+int java_lang_ref_SoftReference::static_clock_offset;
+int java_lang_ClassLoader::parent_offset;
+int java_lang_System::offset_of_static_fields;
+int java_lang_System::static_in_offset;
+int java_lang_System::static_out_offset;
+int java_lang_System::static_err_offset;
+int java_lang_StackTraceElement::declaringClass_offset;
+int java_lang_StackTraceElement::methodName_offset;
+int java_lang_StackTraceElement::fileName_offset;
+int java_lang_StackTraceElement::lineNumber_offset;
+int java_lang_AssertionStatusDirectives::classes_offset;
+int java_lang_AssertionStatusDirectives::classEnabled_offset;
+int java_lang_AssertionStatusDirectives::packages_offset;
+int java_lang_AssertionStatusDirectives::packageEnabled_offset;
+int java_lang_AssertionStatusDirectives::deflt_offset;
+int java_nio_Buffer::_limit_offset;
+int sun_misc_AtomicLongCSImpl::_value_offset;
+int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
+int sun_reflect_ConstantPool::_cp_oop_offset;
+int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
+
+
+// Support for java_lang_StackTraceElement
+
+void java_lang_StackTraceElement::set_fileName(oop element, oop value) {
+  element->obj_field_put(fileName_offset, value);
+}
+
+void java_lang_StackTraceElement::set_declaringClass(oop element, oop value) {
+  element->obj_field_put(declaringClass_offset, value);
+}
+
+void java_lang_StackTraceElement::set_methodName(oop element, oop value) {
+  element->obj_field_put(methodName_offset, value);
+}
+
+void java_lang_StackTraceElement::set_lineNumber(oop element, int value) {
+  element->int_field_put(lineNumber_offset, value);
+}
+  
+  
+// Support for java Assertions - java_lang_AssertionStatusDirectives.
+
+void java_lang_AssertionStatusDirectives::set_classes(oop o, oop val) {
+  o->obj_field_put(classes_offset, val);
+}
+
+void java_lang_AssertionStatusDirectives::set_classEnabled(oop o, oop val) {
+  o->obj_field_put(classEnabled_offset, val);
+}
+
+void java_lang_AssertionStatusDirectives::set_packages(oop o, oop val) {
+  o->obj_field_put(packages_offset, val);
+}
+
+void java_lang_AssertionStatusDirectives::set_packageEnabled(oop o, oop val) {
+  o->obj_field_put(packageEnabled_offset, val);
+}
+
+void java_lang_AssertionStatusDirectives::set_deflt(oop o, bool val) {
+  o->bool_field_put(deflt_offset, val);
+}
+
+
+// Support for intrinsification of java.nio.Buffer.checkIndex
+int java_nio_Buffer::limit_offset() {
+  return _limit_offset;
+}
+
+
+void java_nio_Buffer::compute_offsets() {
+  klassOop k = SystemDictionary::java_nio_Buffer_klass();
+  COMPUTE_OFFSET("java.nio.Buffer", _limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
+}
+
+// Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate
+int sun_misc_AtomicLongCSImpl::value_offset() {
+  assert(SystemDictionary::sun_misc_AtomicLongCSImpl_klass() != NULL, "can't call this");
+  return _value_offset;
+}
+
+
+void sun_misc_AtomicLongCSImpl::compute_offsets() {
+  klassOop k = SystemDictionary::sun_misc_AtomicLongCSImpl_klass();
+  // If this class is not present, its value field offset won't be referenced.
+  if (k != NULL) {
+    COMPUTE_OFFSET("sun.misc.AtomicLongCSImpl", _value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature());
+  }
+}
+
+void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
+  if (_owner_offset != 0) return;
+
+  assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later");
+  SystemDictionary::load_abstract_ownable_synchronizer_klass(CHECK);
+  klassOop k = SystemDictionary::abstract_ownable_synchronizer_klass();
+  COMPUTE_OFFSET("java.util.concurrent.locks.AbstractOwnableSynchronizer", _owner_offset, k, 
+                 vmSymbols::exclusive_owner_thread_name(), vmSymbols::thread_signature());
+}
+
+oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(oop obj) {
+  assert(_owner_offset != 0, "Must be initialized");
+  return obj->obj_field(_owner_offset);
+}
+
+// Compute hard-coded offsets
+// Invoked before SystemDictionary::initialize, so pre-loaded classes
+// are not available to determine the offset_of_static_fields.
+void JavaClasses::compute_hard_coded_offsets() {
+  const int x = wordSize;  			
+  const int header = instanceOopDesc::header_size_in_bytes();
+
+  // Do the String Class
+  java_lang_String::value_offset  = java_lang_String::hc_value_offset  * x + header;
+  java_lang_String::offset_offset = java_lang_String::hc_offset_offset * x + header;
+  java_lang_String::count_offset  = java_lang_String::offset_offset + sizeof (jint);
+  java_lang_String::hash_offset   = java_lang_String::count_offset + sizeof (jint);
+
+  // Do the Class Class
+  java_lang_Class::klass_offset = java_lang_Class::hc_klass_offset * x + header;
+  java_lang_Class::array_klass_offset = java_lang_Class::hc_array_klass_offset * x + header;
+  java_lang_Class::resolved_constructor_offset = java_lang_Class::hc_resolved_constructor_offset * x + header;
+
+  // This is NOT an offset
+  java_lang_Class::number_of_fake_oop_fields = java_lang_Class::hc_number_of_fake_oop_fields;
+
+  // Throwable Class
+  java_lang_Throwable::backtrace_offset  = java_lang_Throwable::hc_backtrace_offset  * x + header;
+  java_lang_Throwable::detailMessage_offset = java_lang_Throwable::hc_detailMessage_offset * x + header;
+  java_lang_Throwable::cause_offset      = java_lang_Throwable::hc_cause_offset      * x + header;
+  java_lang_Throwable::stackTrace_offset = java_lang_Throwable::hc_stackTrace_offset * x + header;
+
+  // java_lang_boxing_object
+  java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset * x + header;
+
+  // java_lang_ref_Reference:
+  java_lang_ref_Reference::referent_offset = java_lang_ref_Reference::hc_referent_offset * x + header;
+  java_lang_ref_Reference::queue_offset = java_lang_ref_Reference::hc_queue_offset * x + header;
+  java_lang_ref_Reference::next_offset  = java_lang_ref_Reference::hc_next_offset * x + header;
+  java_lang_ref_Reference::discovered_offset  = java_lang_ref_Reference::hc_discovered_offset * x + header;
+  java_lang_ref_Reference::static_lock_offset = java_lang_ref_Reference::hc_static_lock_offset *  x;
+  java_lang_ref_Reference::static_pending_offset = java_lang_ref_Reference::hc_static_pending_offset * x;
+  // Artificial fields for java_lang_ref_Reference
+  // The first field is for the discovered field added in 1.4
+  java_lang_ref_Reference::number_of_fake_oop_fields = 1;
+
+  // java_lang_ref_SoftReference Class
+  java_lang_ref_SoftReference::timestamp_offset = java_lang_ref_SoftReference::hc_timestamp_offset * x + header;
+  // Don't multiply static fields because they are always in wordSize units
+  java_lang_ref_SoftReference::static_clock_offset = java_lang_ref_SoftReference::hc_static_clock_offset * x;
+
+  // java_lang_ClassLoader
+  java_lang_ClassLoader::parent_offset = java_lang_ClassLoader::hc_parent_offset * x + header;
+
+  // java_lang_System
+  java_lang_System::static_in_offset  = java_lang_System::hc_static_in_offset  * x;
+  java_lang_System::static_out_offset = java_lang_System::hc_static_out_offset * x;
+  java_lang_System::static_err_offset = java_lang_System::hc_static_err_offset * x;
+
+  // java_lang_StackTraceElement
+  java_lang_StackTraceElement::declaringClass_offset = java_lang_StackTraceElement::hc_declaringClass_offset  * x + header;
+  java_lang_StackTraceElement::methodName_offset = java_lang_StackTraceElement::hc_methodName_offset * x + header;
+  java_lang_StackTraceElement::fileName_offset   = java_lang_StackTraceElement::hc_fileName_offset   * x + header;
+  java_lang_StackTraceElement::lineNumber_offset = java_lang_StackTraceElement::hc_lineNumber_offset * x + header;
+  java_lang_AssertionStatusDirectives::classes_offset = java_lang_AssertionStatusDirectives::hc_classes_offset * x + header;
+  java_lang_AssertionStatusDirectives::classEnabled_offset = java_lang_AssertionStatusDirectives::hc_classEnabled_offset * x + header;
+  java_lang_AssertionStatusDirectives::packages_offset = java_lang_AssertionStatusDirectives::hc_packages_offset * x + header;
+  java_lang_AssertionStatusDirectives::packageEnabled_offset = java_lang_AssertionStatusDirectives::hc_packageEnabled_offset * x + header;
+  java_lang_AssertionStatusDirectives::deflt_offset = java_lang_AssertionStatusDirectives::hc_deflt_offset * x + header;
+
+}
+  
+
+// Compute non-hard-coded field offsets of all the classes in this file
+void JavaClasses::compute_offsets() {
+
+  java_lang_Class::compute_offsets();
+  java_lang_System::compute_offsets();
+  java_lang_Thread::compute_offsets();
+  java_lang_ThreadGroup::compute_offsets();
+  java_security_AccessControlContext::compute_offsets();
+  // Initialize reflection classes. The layouts of these classes
+  // changed with the new reflection implementation in JDK 1.4, and
+  // since the Universe doesn't know what JDK version it is until this
+  // point we defer computation of these offsets until now.
+  java_lang_reflect_AccessibleObject::compute_offsets();
+  java_lang_reflect_Method::compute_offsets();
+  java_lang_reflect_Constructor::compute_offsets();
+  java_lang_reflect_Field::compute_offsets();
+  if (JDK_Version::is_gte_jdk14x_version()) {
+    java_nio_Buffer::compute_offsets();
+  }
+  if (JDK_Version::is_gte_jdk15x_version()) {
+    sun_reflect_ConstantPool::compute_offsets();
+    sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
+  }
+  sun_misc_AtomicLongCSImpl::compute_offsets();
+}
+
+#ifndef PRODUCT
+
+// These functions exist to assert the validity of hard-coded field offsets to guard 
+// against changes in the class files
+
+bool JavaClasses::check_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) {
+  EXCEPTION_MARK;
+  fieldDescriptor fd;
+  symbolHandle klass_sym = oopFactory::new_symbol_handle(klass_name, CATCH);
+  klassOop k = SystemDictionary::resolve_or_fail(klass_sym, true, CATCH);
+  instanceKlassHandle h_klass (THREAD, k);
+  //instanceKlassHandle h_klass(klass);
+  symbolHandle f_name = oopFactory::new_symbol_handle(field_name, CATCH);
+  symbolHandle f_sig  = oopFactory::new_symbol_handle(field_sig, CATCH);
+  if (!h_klass->find_local_field(f_name(), f_sig(), &fd)) {
+    tty->print_cr("Nonstatic field %s.%s not found", klass_name, field_name);
+    return false;
+  }
+  if (fd.is_static()) {
+    tty->print_cr("Nonstatic field %s.%s appears to be static", klass_name, field_name);
+    return false;
+  }
+  if (fd.offset() == hardcoded_offset ) {
+    return true;
+  } else {
+    tty->print_cr("Offset of nonstatic field %s.%s is hardcoded as %d but should really be %d.", 
+                  klass_name, field_name, hardcoded_offset, fd.offset());
+    return false;
+  }
+}
+
+
+bool JavaClasses::check_static_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) {
+  EXCEPTION_MARK;
+  fieldDescriptor fd;
+  symbolHandle klass_sym = oopFactory::new_symbol_handle(klass_name, CATCH);
+  klassOop k = SystemDictionary::resolve_or_fail(klass_sym, true, CATCH);
+  instanceKlassHandle h_klass (THREAD, k);
+  symbolHandle f_name = oopFactory::new_symbol_handle(field_name, CATCH);
+  symbolHandle f_sig  = oopFactory::new_symbol_handle(field_sig, CATCH);
+  if (!h_klass->find_local_field(f_name(), f_sig(), &fd)) {
+    tty->print_cr("Static field %s.%s not found", klass_name, field_name);
+    return false;
+  }
+  if (!fd.is_static()) {
+    tty->print_cr("Static field %s.%s appears to be nonstatic", klass_name, field_name);
+    return false;
+  }
+  if (fd.offset() == hardcoded_offset + h_klass->offset_of_static_fields()) {
+    return true;
+  } else {
+    tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - h_klass->offset_of_static_fields());
+    return false;
+  }
+}
+
+
+// Check the hard-coded field offsets of all the classes in this file
+
+void JavaClasses::check_offsets() {
+  bool valid = true;
+
+#define CHECK_OFFSET(klass_name, cpp_klass_name, field_name, field_sig) \
+  valid &= check_offset(klass_name, cpp_klass_name :: field_name ## _offset, #field_name, field_sig)
+
+#define CHECK_STATIC_OFFSET(klass_name, cpp_klass_name, field_name, field_sig) \
+  valid &= check_static_offset(klass_name, cpp_klass_name :: static_ ## field_name ## _offset, #field_name, field_sig)
+
+  // java.lang.String
+
+  CHECK_OFFSET("java/lang/String", java_lang_String, value, "[C");
+  CHECK_OFFSET("java/lang/String", java_lang_String, offset, "I");
+  CHECK_OFFSET("java/lang/String", java_lang_String, count, "I");
+  CHECK_OFFSET("java/lang/String", java_lang_String, hash, "I");
+  
+  // java.lang.Class
+
+  // Fake fields
+  // CHECK_OFFSET("java/lang/Class", java_lang_Class, klass); // %%% this needs to be checked
+  // CHECK_OFFSET("java/lang/Class", java_lang_Class, array_klass); // %%% this needs to be checked
+  // CHECK_OFFSET("java/lang/Class", java_lang_Class, resolved_constructor); // %%% this needs to be checked
+
+  // java.lang.Throwable
+
+  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, backtrace, "Ljava/lang/Object;");
+  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, detailMessage, "Ljava/lang/String;");
+  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, cause, "Ljava/lang/Throwable;");
+  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, stackTrace, "[Ljava/lang/StackTraceElement;");
+  
+  // Boxed primitive objects (java_lang_boxing_object)
+
+  CHECK_OFFSET("java/lang/Boolean",   java_lang_boxing_object, value, "Z");
+  CHECK_OFFSET("java/lang/Character", java_lang_boxing_object, value, "C");
+  CHECK_OFFSET("java/lang/Float",     java_lang_boxing_object, value, "F");
+  CHECK_OFFSET("java/lang/Double",    java_lang_boxing_object, value, "D");
+  CHECK_OFFSET("java/lang/Byte",      java_lang_boxing_object, value, "B");
+  CHECK_OFFSET("java/lang/Short",     java_lang_boxing_object, value, "S");
+  CHECK_OFFSET("java/lang/Integer",   java_lang_boxing_object, value, "I");
+  CHECK_OFFSET("java/lang/Long",      java_lang_boxing_object, value, "J");
+
+  // java.lang.ClassLoader
+
+  CHECK_OFFSET("java/lang/ClassLoader", java_lang_ClassLoader, parent,      "Ljava/lang/ClassLoader;");
+
+  // java.lang.System
+
+  CHECK_STATIC_OFFSET("java/lang/System", java_lang_System,  in, "Ljava/io/InputStream;");
+  CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, out, "Ljava/io/PrintStream;");
+  CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, err, "Ljava/io/PrintStream;");
+
+  // java.lang.StackTraceElement
+
+  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement, declaringClass, "Ljava/lang/String;");
+  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement, methodName, "Ljava/lang/String;");
+  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement,   fileName, "Ljava/lang/String;");
+  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement, lineNumber, "I");
+
+  // java.lang.ref.Reference
+
+  CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, referent, "Ljava/lang/Object;");
+  CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, queue, "Ljava/lang/ref/ReferenceQueue;");
+  CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, next, "Ljava/lang/ref/Reference;");
+  // Fake field
+  //CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, discovered, "Ljava/lang/ref/Reference;");
+  CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, lock, "Ljava/lang/ref/Reference$Lock;");
+  CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, pending, "Ljava/lang/ref/Reference;");
+
+  // java.lang.ref.SoftReference
+
+  CHECK_OFFSET("java/lang/ref/SoftReference", java_lang_ref_SoftReference, timestamp, "J");
+  CHECK_STATIC_OFFSET("java/lang/ref/SoftReference", java_lang_ref_SoftReference, clock, "J");
+
+  // java.lang.AssertionStatusDirectives
+  // 
+  // The CheckAssertionStatusDirectives boolean can be removed from here and
+  // globals.hpp after the AssertionStatusDirectives class has been integrated
+  // into merlin "for some time."  Without it, the vm will fail with early
+  // merlin builds.
+
+  if (CheckAssertionStatusDirectives && JDK_Version::is_gte_jdk14x_version()) {
+    const char* nm = "java/lang/AssertionStatusDirectives";
+    const char* sig = "[Ljava/lang/String;";
+    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, classes, sig);
+    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, classEnabled, "[Z");
+    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, packages, sig);
+    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, packageEnabled, "[Z");
+    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, deflt, "Z");
+  }
+
+  if (!valid) vm_exit_during_initialization("Hard-coded field offset verification failed");
+}
+
+#endif // PRODUCT
+
+void javaClasses_init() {
+  JavaClasses::compute_offsets();
+  JavaClasses::check_offsets();
+  FilteredFieldsMap::initialize();  // must be done after computing offsets.
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,907 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)javaClasses.hpp	1.157 07/05/05 17:05:52 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// Interface for manipulating the basic Java classes.
+//
+// All dependencies on layout of actual Java classes should be kept here.
+// If the layout of any of the classes above changes the offsets must be adjusted.
+//
+// For most classes we hardwire the offsets for performance reasons. In certain
+// cases (e.g. java.security.AccessControlContext) we compute the offsets at
+// startup since the layout here differs between JDK1.2 and JDK1.3.
+// 
+// Note that fields (static and non-static) are arranged with oops before non-oops
+// on a per class basis. The offsets below have to reflect this ordering.
+//
+// When editing the layouts please update the check_offset verification code 
+// correspondingly. The names in the enums must be identical to the actual field 
+// names in order for the verification code to work.
+
+
+// Interface to java.lang.String objects
+
+class java_lang_String : AllStatic {
+ private:
+  enum {
+    hc_value_offset  = 0,
+    hc_offset_offset = 1,
+    hc_count_offset  = 2,
+    hc_hash_offset   = 3
+  };
+
+  static int value_offset;
+  static int offset_offset;
+  static int count_offset;
+  static int hash_offset;
+
+  static Handle basic_create(int length, bool tenured, TRAPS);
+  static Handle basic_create_from_unicode(jchar* unicode, int length, bool tenured, TRAPS);
+
+  static void set_value( oop string, typeArrayOop buffer) { string->obj_field_put(value_offset,  (oop)buffer); }
+  static void set_offset(oop string, int offset)          { string->int_field_put(offset_offset, offset); }
+  static void set_count( oop string, int count)           { string->int_field_put(count_offset,  count);  }
+
+ public:
+  // Instance creation
+  static Handle create_from_unicode(jchar* unicode, int len, TRAPS);
+  static Handle create_tenured_from_unicode(jchar* unicode, int len, TRAPS);
+  static oop    create_oop_from_unicode(jchar* unicode, int len, TRAPS);
+  static Handle create_from_str(const char* utf8_str, TRAPS);
+  static oop    create_oop_from_str(const char* utf8_str, TRAPS);
+  static Handle create_from_symbol(symbolHandle symbol, TRAPS);  
+  static Handle create_from_platform_dependent_str(const char* str, TRAPS);
+  static Handle char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS);
+ 
+  static int value_offset_in_bytes()  { return value_offset;  }
+  static int count_offset_in_bytes()  { return count_offset;  }
+  static int offset_offset_in_bytes() { return offset_offset; }
+  static int hash_offset_in_bytes()   { return hash_offset;   }
+
+  // Accessors
+  static typeArrayOop value(oop java_string) {
+    assert(is_instance(java_string), "must be java_string");
+    return (typeArrayOop) java_string->obj_field(value_offset);
+  }
+  static int offset(oop java_string) {
+    assert(is_instance(java_string), "must be java_string");
+    return java_string->int_field(offset_offset);
+  }
+  static int length(oop java_string) {
+    assert(is_instance(java_string), "must be java_string");
+    return java_string->int_field(count_offset);
+  }
+  static int utf8_length(oop java_string);
+
+  // String converters
+  static char*  as_utf8_string(oop java_string);
+  static char*  as_utf8_string(oop java_string, int start, int len);
+  static jchar* as_unicode_string(oop java_string, int& length);
+
+  static bool equals(oop java_string, jchar* chars, int len);
+
+  // Conversion between '.' and '/' formats
+  static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
+  static Handle internalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '.', '/', THREAD); }    
+
+  // Conversion
+  static symbolHandle as_symbol(Handle java_string, TRAPS);
+
+  // Testers
+  static bool is_instance(oop obj) {
+    return obj != NULL && obj->klass() == SystemDictionary::string_klass();
+  }
+
+  // Debugging
+  static void print(Handle java_string, outputStream* st);
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.Class objects
+
+class java_lang_Class : AllStatic {
+   friend class VMStructs;
+ private:
+  // The fake offsets are added by the class loader when java.lang.Class is loaded 
+
+  enum {
+    hc_klass_offset                = 0,
+    hc_array_klass_offset          = 1,
+    hc_resolved_constructor_offset = 2,
+    hc_number_of_fake_oop_fields   = 3
+  };
+
+  static int klass_offset;
+  static int resolved_constructor_offset;
+  static int array_klass_offset;
+  static int number_of_fake_oop_fields;
+
+  static void compute_offsets();
+  static bool offsets_computed;
+  static int classRedefinedCount_offset;
+
+ public:
+  // Instance creation
+  static oop  create_mirror(KlassHandle k, TRAPS);
+  static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
+  // Conversion
+  static klassOop as_klassOop(oop java_class);
+  // Testing
+  static bool is_primitive(oop java_class);  
+  static BasicType primitive_type(oop java_class);  
+  static oop primitive_mirror(BasicType t);  
+  // JVM_NewInstance support
+  static methodOop resolved_constructor(oop java_class);
+  static void set_resolved_constructor(oop java_class, methodOop constructor);
+  // JVM_NewArray support
+  static klassOop array_klass(oop java_class);
+  static void set_array_klass(oop java_class, klassOop klass);
+  // compiler support for class operations
+  static int klass_offset_in_bytes() { return klass_offset; }
+  static int resolved_constructor_offset_in_bytes() { return resolved_constructor_offset; }
+  static int array_klass_offset_in_bytes() { return array_klass_offset; }
+  // Support for classRedefinedCount field
+  static int classRedefinedCount(oop the_class_mirror);
+  static void set_classRedefinedCount(oop the_class_mirror, int value);
+  // Debugging
+  friend class JavaClasses;
+  friend class instanceKlass;   // verification code accesses offsets
+  friend class ClassFileParser; // access to number_of_fake_fields
+};
+
+// Interface to java.lang.Thread objects
+
+class java_lang_Thread : AllStatic {
+ private:
+  // Note that for this class the layout changed between JDK1.2 and JDK1.3,
+  // so we compute the offsets at startup rather than hard-wiring them.
+  static int _name_offset;
+  static int _group_offset;
+  static int _contextClassLoader_offset;
+  static int _inheritedAccessControlContext_offset;
+  static int _priority_offset;
+  static int _eetop_offset;
+  static int _daemon_offset;
+  static int _stillborn_offset;
+  static int _stackSize_offset;
+  static int _tid_offset;
+  static int _thread_status_offset; 
+  static int _park_blocker_offset; 
+  static int _park_event_offset ; 
+
+  static void compute_offsets();
+
+ public:
+  // Instance creation
+  static oop create();
+  // Returns the JavaThread associated with the thread obj
+  static JavaThread* thread(oop java_thread);
+  // Set JavaThread for instance
+  static void set_thread(oop java_thread, JavaThread* thread);
+  // Name
+  static typeArrayOop name(oop java_thread);
+  static void set_name(oop java_thread, typeArrayOop name);
+  // Priority
+  static ThreadPriority priority(oop java_thread);
+  static void set_priority(oop java_thread, ThreadPriority priority);
+  // Thread group
+  static oop  threadGroup(oop java_thread);
+  // Stillborn
+  static bool is_stillborn(oop java_thread);
+  static void set_stillborn(oop java_thread);
+  // Alive (NOTE: this is not really a field, but provides the correct
+  // definition without doing a Java call)
+  static bool is_alive(oop java_thread);
+  // Daemon
+  static bool is_daemon(oop java_thread);
+  static void set_daemon(oop java_thread);
+  // Context ClassLoader
+  static oop context_class_loader(oop java_thread);
+  // Control context
+  static oop inherited_access_control_context(oop java_thread);
+  // Stack size hint
+  static jlong stackSize(oop java_thread);
+  // Thread ID
+  static jlong thread_id(oop java_thread);
+    
+  // Blocker object responsible for thread parking
+  static oop park_blocker(oop java_thread);
+
+  // Pointer to type-stable park handler, encoded as jlong. 
+  // Should be set when apparently null
+  // For details, see unsafe.cpp Unsafe_Unpark
+  static jlong park_event(oop java_thread);
+  static bool set_park_event(oop java_thread, jlong ptr);
+
+  // Java Thread Status for JVMTI and M&M use.
+  // This thread status info is saved in threadStatus field of
+  // java.lang.Thread java class.
+  enum ThreadStatus {
+    NEW                      = 0,
+    RUNNABLE                 = JVMTI_THREAD_STATE_ALIVE +          // runnable / running
+                               JVMTI_THREAD_STATE_RUNNABLE,
+    SLEEPING                 = JVMTI_THREAD_STATE_ALIVE +          // Thread.sleep()
+                               JVMTI_THREAD_STATE_WAITING +
+                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT + 
+                               JVMTI_THREAD_STATE_SLEEPING,
+    IN_OBJECT_WAIT           = JVMTI_THREAD_STATE_ALIVE +          // Object.wait()
+                               JVMTI_THREAD_STATE_WAITING +
+                               JVMTI_THREAD_STATE_WAITING_INDEFINITELY +
+                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT, 
+    IN_OBJECT_WAIT_TIMED     = JVMTI_THREAD_STATE_ALIVE +          // Object.wait(long)
+                               JVMTI_THREAD_STATE_WAITING +
+                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +
+                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT, 
+    PARKED                   = JVMTI_THREAD_STATE_ALIVE +          // LockSupport.park()
+                               JVMTI_THREAD_STATE_WAITING +
+                               JVMTI_THREAD_STATE_WAITING_INDEFINITELY +
+                               JVMTI_THREAD_STATE_PARKED,
+    PARKED_TIMED             = JVMTI_THREAD_STATE_ALIVE +          // LockSupport.park(long)
+                               JVMTI_THREAD_STATE_WAITING +
+                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +
+                               JVMTI_THREAD_STATE_PARKED,  
+    BLOCKED_ON_MONITOR_ENTER = JVMTI_THREAD_STATE_ALIVE +          // (re-)entering a synchronization block 
+                               JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER,   
+    TERMINATED               = JVMTI_THREAD_STATE_TERMINATED
+  };
+  // Write thread status info to threadStatus field of java.lang.Thread.
+  static void set_thread_status(oop java_thread_oop, ThreadStatus status);
+  // Read thread status info from threadStatus field of java.lang.Thread. 
+  static ThreadStatus get_thread_status(oop java_thread_oop);
+
+  static const char*  thread_status_name(oop java_thread_oop);
+    
+  // Debugging
+  friend class JavaClasses;
+};
+
+// Interface to java.lang.ThreadGroup objects
+
+class java_lang_ThreadGroup : AllStatic {
+ private:
+  static int _parent_offset;        
+  static int _name_offset;
+  static int _threads_offset;
+  static int _groups_offset;
+  static int _maxPriority_offset;
+  static int _destroyed_offset;
+  static int _daemon_offset;
+  static int _vmAllowSuspension_offset; 
+  static int _nthreads_offset;  
+  static int _ngroups_offset; 
+
+  static void compute_offsets();
+
+ public:  
+  // parent ThreadGroup
+  static oop  parent(oop java_thread_group);
+  // name
+  static typeArrayOop name(oop java_thread_group);
+  // ("name as oop" accessor is not necessary)
+  // Number of threads in group
+  static int nthreads(oop java_thread_group);
+  // threads
+  static objArrayOop threads(oop java_thread_group);
+  // Number of threads in group
+  static int ngroups(oop java_thread_group);
+  // groups
+  static objArrayOop groups(oop java_thread_group);
+  // maxPriority in group
+  static ThreadPriority maxPriority(oop java_thread_group);
+  // Destroyed
+  static bool is_destroyed(oop java_thread_group);
+  // Daemon
+  static bool is_daemon(oop java_thread_group);
+  // vmAllowSuspension
+  static bool is_vmAllowSuspension(oop java_thread_group);
+  // Debugging
+  friend class JavaClasses;
+};
+  
+
+
+// Interface to java.lang.Throwable objects
+
+class java_lang_Throwable: AllStatic {
+  friend class BacktraceBuilder;
+
+ private:
+  // Offsets
+  enum {
+    hc_backtrace_offset     =  0,
+    hc_detailMessage_offset =  1,
+    hc_cause_offset         =  2,  // New since 1.4
+    hc_stackTrace_offset    =  3   // New since 1.4
+  };
+  // Trace constants
+  enum {
+    trace_methods_offset = 0,
+    trace_bcis_offset    = 1,
+    trace_next_offset    = 2,
+    trace_size           = 3,
+    trace_chunk_size     = 32
+  };
+
+  static int backtrace_offset;
+  static int detailMessage_offset;
+  static int cause_offset;
+  static int stackTrace_offset;
+
+  // Printing
+  static char* print_stack_element_to_buffer(methodOop method, int bci);
+  static void print_to_stream(Handle stream, const char* str);
+  // StackTrace (programmatic access, new since 1.4)
+  static void clear_stacktrace(oop throwable);
+  // No stack trace available
+  static const char* no_stack_trace_message();
+
+ public:
+  // Backtrace
+  static oop backtrace(oop throwable);
+  static void set_backtrace(oop throwable, oop value);
+  // Needed by JVMTI to filter out this internal field. 
+  static int get_backtrace_offset() { return backtrace_offset;}
+  static int get_detailMessage_offset() { return detailMessage_offset;}
+  // Message
+  static oop message(oop throwable);
+  static oop message(Handle throwable);
+  static void set_message(oop throwable, oop value);
+  // Print stack trace stored in exception by call-back to Java
+  // Note: this is no longer used in Merlin, but we still suppport
+  // it for compatibility.
+  static void print_stack_trace(oop throwable, oop print_stream);
+  static void print_stack_element(Handle stream, methodOop method, int bci);
+  static void print_stack_element(outputStream *st, methodOop method, int bci);
+  static void print_stack_usage(Handle stream);
+
+  // Allocate space for backtrace (created but stack trace not filled in)
+  static void allocate_backtrace(Handle throwable, TRAPS);
+  // Fill in current stack trace for throwable with preallocated backtrace (no GC)
+  static void fill_in_stack_trace_of_preallocated_backtrace(Handle throwable);
+
+  // Fill in current stack trace, can cause GC
+  static void fill_in_stack_trace(Handle throwable, TRAPS);
+  static void fill_in_stack_trace(Handle throwable);
+  // Programmatic access to stack trace
+  static oop  get_stack_trace_element(oop throwable, int index, TRAPS);
+  static int  get_stack_trace_depth(oop throwable, TRAPS);
+  // Printing
+  static void print(oop throwable, outputStream* st);
+  static void print(Handle throwable, outputStream* st);
+  static void print_stack_trace(oop throwable, outputStream* st);
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.reflect.AccessibleObject objects
+
+class java_lang_reflect_AccessibleObject: AllStatic {
+ private:
+  // Note that to reduce dependencies on the JDK we compute these
+  // offsets at run-time.
+  static int override_offset; 
+
+  static void compute_offsets();
+
+ public:
+  // Accessors
+  static jboolean override(oop reflect);
+  static void set_override(oop reflect, jboolean value);
+
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.reflect.Method objects
+
+class java_lang_reflect_Method : public java_lang_reflect_AccessibleObject {
+ private:
+  // Note that to reduce dependencies on the JDK we compute these
+  // offsets at run-time.
+  static int clazz_offset;
+  static int name_offset;
+  static int returnType_offset;
+  static int parameterTypes_offset;
+  static int exceptionTypes_offset;
+  static int slot_offset; 
+  static int modifiers_offset; 
+  static int signature_offset;
+  static int annotations_offset;
+  static int parameter_annotations_offset;
+  static int annotation_default_offset;
+
+  static void compute_offsets();
+
+ public:
+  // Allocation
+  static Handle create(TRAPS);
+
+  // Accessors
+  static oop clazz(oop reflect);
+  static void set_clazz(oop reflect, oop value);
+
+  static oop name(oop method);
+  static void set_name(oop method, oop value);
+
+  static oop return_type(oop method);
+  static void set_return_type(oop method, oop value);
+
+  static oop parameter_types(oop method);
+  static void set_parameter_types(oop method, oop value);
+
+  static oop exception_types(oop method);
+  static void set_exception_types(oop method, oop value);
+
+  static int slot(oop reflect);
+  static void set_slot(oop reflect, int value);
+
+  static int modifiers(oop method);
+  static void set_modifiers(oop method, int value);
+
+  static bool has_signature_field();
+  static oop signature(oop method);
+  static void set_signature(oop method, oop value);
+
+  static bool has_annotations_field();
+  static oop annotations(oop method);
+  static void set_annotations(oop method, oop value);
+
+  static bool has_parameter_annotations_field();
+  static oop parameter_annotations(oop method);
+  static void set_parameter_annotations(oop method, oop value);
+
+  static bool has_annotation_default_field();
+  static oop annotation_default(oop method);
+  static void set_annotation_default(oop method, oop value);
+
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.reflect.Constructor objects
+
+class java_lang_reflect_Constructor : public java_lang_reflect_AccessibleObject {
+ private:
+  // Note that to reduce dependencies on the JDK we compute these
+  // offsets at run-time.
+  static int clazz_offset;
+  static int parameterTypes_offset;
+  static int exceptionTypes_offset;
+  static int slot_offset;
+  static int modifiers_offset;
+  static int signature_offset;
+  static int annotations_offset;
+  static int parameter_annotations_offset;
+
+  static void compute_offsets();
+
+ public:
+  // Allocation
+  static Handle create(TRAPS);
+
+  // Accessors
+  static oop clazz(oop reflect);
+  static void set_clazz(oop reflect, oop value);
+
+  static oop parameter_types(oop constructor);
+  static void set_parameter_types(oop constructor, oop value);
+
+  static oop exception_types(oop constructor);
+  static void set_exception_types(oop constructor, oop value);
+
+  static int slot(oop reflect);
+  static void set_slot(oop reflect, int value);
+
+  static int modifiers(oop constructor);
+  static void set_modifiers(oop constructor, int value);
+
+  static bool has_signature_field();
+  static oop signature(oop constructor);
+  static void set_signature(oop constructor, oop value);
+
+  static bool has_annotations_field();
+  static oop annotations(oop constructor);
+  static void set_annotations(oop constructor, oop value);
+
+  static bool has_parameter_annotations_field();
+  static oop parameter_annotations(oop method);
+  static void set_parameter_annotations(oop method, oop value);
+
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.reflect.Field objects
+
+class java_lang_reflect_Field : public java_lang_reflect_AccessibleObject {
+ private:
+  // Note that to reduce dependencies on the JDK we compute these
+  // offsets at run-time.
+  static int clazz_offset; 
+  static int name_offset;
+  static int type_offset;
+  static int slot_offset;
+  static int modifiers_offset;
+  static int signature_offset;
+  static int annotations_offset;
+
+  static void compute_offsets();
+
+ public:
+  // Allocation
+  static Handle create(TRAPS);
+
+  // Accessors
+  static oop clazz(oop reflect);
+  static void set_clazz(oop reflect, oop value);
+
+  static oop name(oop field);
+  static void set_name(oop field, oop value);
+
+  static oop type(oop field);
+  static void set_type(oop field, oop value);
+
+  static int slot(oop reflect);
+  static void set_slot(oop reflect, int value);
+
+  static int modifiers(oop field);
+  static void set_modifiers(oop field, int value);
+
+  static bool has_signature_field();
+  static oop signature(oop constructor);
+  static void set_signature(oop constructor, oop value);
+
+  static bool has_annotations_field();
+  static oop annotations(oop constructor);
+  static void set_annotations(oop constructor, oop value);
+
+  static bool has_parameter_annotations_field();
+  static oop parameter_annotations(oop method);
+  static void set_parameter_annotations(oop method, oop value);
+
+  static bool has_annotation_default_field();
+  static oop annotation_default(oop method);
+  static void set_annotation_default(oop method, oop value);
+
+  // Debugging
+  friend class JavaClasses;
+}; 
+
+// Interface to sun.reflect.ConstantPool objects
+class sun_reflect_ConstantPool {
+ private:
+  // Note that to reduce dependencies on the JDK we compute these
+  // offsets at run-time.
+  static int _cp_oop_offset; 
+
+  static void compute_offsets();
+
+ public:
+  // Allocation
+  static Handle create(TRAPS);
+
+  // Accessors
+  static oop cp_oop(oop reflect);
+  static void set_cp_oop(oop reflect, oop value);
+  static int cp_oop_offset() {
+    return _cp_oop_offset;
+  }
+
+  // Debugging
+  friend class JavaClasses;
+}; 
+
+// Interface to sun.reflect.UnsafeStaticFieldAccessorImpl objects
+class sun_reflect_UnsafeStaticFieldAccessorImpl {
+ private:
+  static int _base_offset; 
+  static void compute_offsets();
+
+ public:
+  static int base_offset() {
+    return _base_offset;
+  }
+
+  // Debugging
+  friend class JavaClasses;
+}; 
+
+// Interface to java.lang primitive type boxing objects:
+//  - java.lang.Boolean
+//  - java.lang.Character
+//  - java.lang.Float
+//  - java.lang.Double
+//  - java.lang.Byte
+//  - java.lang.Short
+//  - java.lang.Integer
+//  - java.lang.Long
+
+// This could be separated out into 8 individual classes.
+
+class java_lang_boxing_object: AllStatic {
+ private:
+  enum {
+   hc_value_offset = 0
+  };
+  static int value_offset; 
+
+  static oop initialize_and_allocate(klassOop klass, TRAPS);
+ public:
+  // Allocation. Returns a boxed value, or NULL for invalid type.
+  static oop create(BasicType type, jvalue* value, TRAPS);
+  // Accessors. Returns the basic type being boxed, or T_ILLEGAL for invalid oop.
+  static BasicType get_value(oop box, jvalue* value);
+  static BasicType set_value(oop box, jvalue* value);
+
+  static int value_offset_in_bytes() { return value_offset; }
+
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+
+// Interface to java.lang.ref.Reference objects
+
+class java_lang_ref_Reference: AllStatic {
+ public:
+  enum {
+   hc_referent_offset   = 0,
+   hc_queue_offset      = 1,
+   hc_next_offset       = 2,
+   hc_discovered_offset	= 3  // Is not last, see SoftRefs.
+  };
+  enum {
+   hc_static_lock_offset    = 0,
+   hc_static_pending_offset = 1
+  };
+
+  static int referent_offset;
+  static int queue_offset;
+  static int next_offset;
+  static int discovered_offset;
+  static int static_lock_offset;
+  static int static_pending_offset;
+  static int number_of_fake_oop_fields;
+ 
+  // Accessors
+  static oop referent(oop ref)        { return *referent_addr(ref); }
+  static void set_referent(oop ref, oop value);
+  static oop* referent_addr(oop ref);
+
+  static oop next(oop ref)            { return *next_addr(ref); }
+  static void set_next(oop ref, oop value);
+  static oop* next_addr(oop ref);
+
+  static oop discovered(oop ref)      { return *discovered_addr(ref); }
+  static void set_discovered(oop ref, oop value);
+  static oop* discovered_addr(oop ref);
+
+  // Accessors for statics
+  static oop  pending_list_lock()     { return *pending_list_lock_addr(); }
+  static oop  pending_list()          { return *pending_list_addr(); }
+
+  static oop* pending_list_lock_addr();
+  static oop* pending_list_addr();
+};
+
+
+// Interface to java.lang.ref.SoftReference objects
+
+class java_lang_ref_SoftReference: public java_lang_ref_Reference {
+ public:
+  enum {
+   // The timestamp is a long field and may need to be adjusted for alignment.
+   hc_timestamp_offset    = align_object_offset_(hc_discovered_offset + 1)
+  };
+  enum {
+   hc_static_clock_offset = 0
+  };
+
+  static int timestamp_offset;
+  static int static_clock_offset;
+
+  // Accessors
+  static jlong timestamp(oop ref);
+
+  // Accessors for statics
+  static jlong clock();
+  static void set_clock(jlong value);
+};
+
+
+// Interface to java.security.AccessControlContext objects
+
+class java_security_AccessControlContext: AllStatic {
+ private:
+  // Note that for this class the layout changed between JDK1.2 and JDK1.3,
+  // so we compute the offsets at startup rather than hard-wiring them.
+  static int _context_offset;
+  static int _privilegedContext_offset;
+  static int _isPrivileged_offset;
+
+  static void compute_offsets();
+ public:
+  static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);  
+
+  // Debugging/initialization
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.ClassLoader objects
+
+class java_lang_ClassLoader : AllStatic {
+ private:
+  enum {
+   hc_parent_offset = 0
+  };
+
+  static int parent_offset;
+
+ public:
+  static oop parent(oop loader);
+
+  static bool is_trusted_loader(oop loader);
+
+  // Fix for 4474172
+  static oop  non_reflection_class_loader(oop loader);
+
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.System objects
+
+class java_lang_System : AllStatic {
+ private:
+  enum {
+   hc_static_in_offset  = 0,
+   hc_static_out_offset = 1,
+   hc_static_err_offset = 2
+  };
+
+  static int offset_of_static_fields;
+  static int  static_in_offset;
+  static int static_out_offset;
+  static int static_err_offset;
+
+  static void compute_offsets();
+
+ public:
+  static int  in_offset_in_bytes();
+  static int out_offset_in_bytes();
+  static int err_offset_in_bytes();
+
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.StackTraceElement objects
+
+class java_lang_StackTraceElement: AllStatic {
+ private:
+  enum {
+    hc_declaringClass_offset  = 0,
+    hc_methodName_offset = 1,
+    hc_fileName_offset   = 2,
+    hc_lineNumber_offset = 3
+  };
+
+  static int declaringClass_offset;
+  static int methodName_offset;
+  static int fileName_offset;
+  static int lineNumber_offset;
+
+ public:
+  // Setters
+  static void set_declaringClass(oop element, oop value);
+  static void set_methodName(oop element, oop value);
+  static void set_fileName(oop element, oop value);
+  static void set_lineNumber(oop element, int value);
+
+  // Create an instance of StackTraceElement
+  static oop create(methodHandle m, int bci, TRAPS);
+
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+// Interface to java.lang.AssertionStatusDirectives objects
+
+class java_lang_AssertionStatusDirectives: AllStatic {
+ private:
+  enum {
+    hc_classes_offset,
+    hc_classEnabled_offset,
+    hc_packages_offset,
+    hc_packageEnabled_offset,
+    hc_deflt_offset
+  };
+
+  static int classes_offset;
+  static int classEnabled_offset;
+  static int packages_offset;
+  static int packageEnabled_offset;
+  static int deflt_offset;
+
+ public:
+  // Setters
+  static void set_classes(oop obj, oop val);
+  static void set_classEnabled(oop obj, oop val);
+  static void set_packages(oop obj, oop val);
+  static void set_packageEnabled(oop obj, oop val);
+  static void set_deflt(oop obj, bool val);
+  // Debugging
+  friend class JavaClasses;
+};
+
+
+class java_nio_Buffer: AllStatic {
+ private:
+  static int _limit_offset;
+
+ public:
+  static int  limit_offset();
+  static void compute_offsets();
+};
+
+class sun_misc_AtomicLongCSImpl: AllStatic {
+ private:
+  static int _value_offset;
+
+ public:
+  static int  value_offset();
+  static void compute_offsets();
+};
+
+class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
+ private:
+  static int  _owner_offset;
+ public:
+  static void initialize(TRAPS);
+  static oop  get_owner_threadObj(oop obj);
+};
+
+// Interface to hard-coded offset checking
+
+class JavaClasses : AllStatic {
+ private:
+  static bool check_offset(const char *klass_name, int offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0;
+  static bool check_static_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0;
+ public:
+  static void compute_hard_coded_offsets();
+  static void compute_offsets();
+  static void check_offsets() PRODUCT_RETURN;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/loaderConstraints.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,510 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)loaderConstraints.cpp	1.19 07/05/17 15:50:23 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_loaderConstraints.cpp.incl"
+
+LoaderConstraintTable::LoaderConstraintTable(int nof_buckets)
+  : Hashtable(nof_buckets, sizeof(LoaderConstraintEntry)) {};
+
+
+LoaderConstraintEntry* LoaderConstraintTable::new_entry(
+                                 unsigned int hash, symbolOop name,
+                                 klassOop klass, int num_loaders,
+                                 int max_loaders) {
+  LoaderConstraintEntry* entry;
+  entry = (LoaderConstraintEntry*)Hashtable::new_entry(hash, klass);
+  entry->set_name(name);
+  entry->set_num_loaders(num_loaders);
+  entry->set_max_loaders(max_loaders);
+  return entry;
+}
+
+
+void LoaderConstraintTable::oops_do(OopClosure* f) {
+  for (int index = 0; index < table_size(); index++) {
+    for (LoaderConstraintEntry* probe = bucket(index);
+                                probe != NULL;
+                                probe = probe->next()) {
+      f->do_oop((oop*)(probe->name_addr()));
+      if (probe->klass() != NULL) {
+        f->do_oop((oop*)probe->klass_addr());
+      }
+      for (int n = 0; n < probe->num_loaders(); n++) {
+        if (probe->loader(n) != NULL) {
+          f->do_oop(probe->loader_addr(n));
+        }
+      }
+    }
+  }
+}
+
+// We must keep the symbolOop used in the name alive.  We'll use the
+// loaders to decide if a particular entry can be purged. 
+void LoaderConstraintTable::always_strong_classes_do(OopClosure* blk) {
+  // We must keep the symbolOop used in the name alive.
+  for (int cindex = 0; cindex < table_size(); cindex++) {
+    for (LoaderConstraintEntry* lc_probe = bucket(cindex);
+                                lc_probe != NULL;
+                                lc_probe = lc_probe->next()) {
+      assert (lc_probe->name() != NULL,  "corrupted loader constraint table");
+      blk->do_oop((oop*)lc_probe->name_addr());
+    }
+  }
+}
+
+
+// The loaderConstraintTable must always be accessed with the
+// SystemDictionary lock held. This is true even for readers as
+// entries in the table could be being dynamically resized.
+
+LoaderConstraintEntry** LoaderConstraintTable::find_loader_constraint(
+                                    symbolHandle name, Handle loader) {
+
+  unsigned int hash = compute_hash(name);
+  int index = hash_to_index(hash);
+  LoaderConstraintEntry** pp = bucket_addr(index);
+  while (*pp) {
+    LoaderConstraintEntry* p = *pp;
+    if (p->hash() == hash) {
+      if (p->name() == name()) {
+        for (int i = p->num_loaders() - 1; i >= 0; i--) {
+          if (p->loader(i) == loader()) {
+            return pp;
+          }
+        }
+      }
+    }
+    pp = p->next_addr();
+  }
+  return pp;
+}
+
+
+void LoaderConstraintTable::purge_loader_constraints(BoolObjectClosure* is_alive) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  // Remove unloaded entries from constraint table
+  for (int index = 0; index < table_size(); index++) {
+    LoaderConstraintEntry** p = bucket_addr(index);
+    while(*p) {
+      LoaderConstraintEntry* probe = *p;
+      klassOop klass = probe->klass();
+      // Remove klass that is no longer alive
+      if (klass != NULL && !is_alive->do_object_b(klass)) {
+        probe->set_klass(NULL);
+	if (TraceLoaderConstraints) {
+	  ResourceMark rm;
+	  tty->print_cr("[Purging class object from constraint for name %s,"
+		     " loader list:", 
+		     probe->name()->as_C_string());
+  	  for (int i = 0; i < probe->num_loaders(); i++) {
+	    tty->print_cr("[   [%d]: %s", i, 
+			  SystemDictionary::loader_name(probe->loader(i)));
+	  }
+	}
+      }
+      // Remove entries no longer alive from loader array
+      int n = 0; 
+      while (n < probe->num_loaders()) {
+        if (probe->loader(n) != NULL) {
+          if (!is_alive->do_object_b(probe->loader(n))) {
+	    if (TraceLoaderConstraints) {
+	      ResourceMark rm;
+              tty->print_cr("[Purging loader %s from constraint for name %s",
+			    SystemDictionary::loader_name(probe->loader(n)),
+			    probe->name()->as_C_string()
+			    );
+	    }
+
+            // Compact array
+            int num = probe->num_loaders() - 1;
+            probe->set_num_loaders(num);
+            probe->set_loader(n, probe->loader(num));
+            probe->set_loader(num, NULL);
+
+	    if (TraceLoaderConstraints) {
+	      ResourceMark rm;
+              tty->print_cr("[New loader list:");
+	      for (int i = 0; i < probe->num_loaders(); i++) {
+                tty->print_cr("[   [%d]: %s", i, 
+			      SystemDictionary::loader_name(probe->loader(i)));
+	      }
+	    }
+
+            continue;  // current element replaced, so restart without
+                       // incrementing n
+          }
+        }
+        n++;
+      }
+      // Check whether entry should be purged
+      if (probe->num_loaders() < 2) {
+	    if (TraceLoaderConstraints) {
+	      ResourceMark rm;
+	      tty->print("[Purging complete constraint for name %s\n", 
+			 probe->name()->as_C_string());
+	    }
+
+        // Purge entry
+        *p = probe->next();
+        FREE_C_HEAP_ARRAY(oop, probe->loaders());
+        free_entry(probe);
+      } else {
+#ifdef ASSERT
+        assert(is_alive->do_object_b(probe->name()), "name should be live");
+        if (probe->klass() != NULL) {
+          assert(is_alive->do_object_b(probe->klass()), "klass should be live");
+        }
+        for (n = 0; n < probe->num_loaders(); n++) {
+          if (probe->loader(n) != NULL) {
+            assert(is_alive->do_object_b(probe->loader(n)), "loader should be live");
+          }
+        }
+#endif
+        // Go to next entry
+        p = probe->next_addr();
+      }
+    }
+  }
+}
+
+bool LoaderConstraintTable::add_entry(symbolHandle class_name,
+                                      klassOop klass1, Handle class_loader1,
+                                      klassOop klass2, Handle class_loader2) {
+  int failure_code = 0; // encode different reasons for failing
+
+  if (klass1 != NULL && klass2 != NULL && klass1 != klass2) {
+    failure_code = 1;
+  } else {
+    klassOop klass = klass1 != NULL ? klass1 : klass2;
+      
+    LoaderConstraintEntry** pp1 = find_loader_constraint(class_name,
+							 class_loader1);
+    if (*pp1 != NULL && (*pp1)->klass() != NULL) {
+      if (klass != NULL) {
+	if (klass != (*pp1)->klass()) {
+	  failure_code = 2;
+	}
+      } else {
+	klass = (*pp1)->klass();
+      }
+    }
+    
+    LoaderConstraintEntry** pp2 = find_loader_constraint(class_name,
+							 class_loader2);
+    if (*pp2 != NULL && (*pp2)->klass() != NULL) {
+      if (klass != NULL) {
+	if (klass != (*pp2)->klass()) {
+	  failure_code = 3;
+	}
+      } else {
+	klass = (*pp2)->klass();
+      }
+    }
+
+    if (failure_code == 0) {
+      if (*pp1 == NULL && *pp2 == NULL) {
+	unsigned int hash = compute_hash(class_name);
+	int index = hash_to_index(hash);
+	LoaderConstraintEntry* p;
+	p = new_entry(hash, class_name(), klass, 2, 2);
+	p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2));
+	p->set_loader(0, class_loader1());
+	p->set_loader(1, class_loader2());
+	p->set_klass(klass);
+	p->set_next(bucket(index));
+	set_entry(index, p);
+	if (TraceLoaderConstraints) {
+	  ResourceMark rm;
+	  tty->print("[Adding new constraint for name: %s, loader[0]: %s,"
+		     " loader[1]: %s ]\n",
+		     class_name()->as_C_string(), 
+		     SystemDictionary::loader_name(class_loader1()),
+		     SystemDictionary::loader_name(class_loader2())
+		     );
+	}
+      } else if (*pp1 == *pp2) {
+	/* constraint already imposed */
+	if ((*pp1)->klass() == NULL) {
+	  (*pp1)->set_klass(klass);
+	  if (TraceLoaderConstraints) {
+	    ResourceMark rm;
+	    tty->print("[Setting class object in existing constraint for"
+		       " name: %s and loader %s ]\n",
+		       class_name()->as_C_string(),
+		       SystemDictionary::loader_name(class_loader1())
+		       );
+	  }
+	} else {
+	  assert((*pp1)->klass() == klass, "loader constraints corrupted");
+	}
+      } else if (*pp1 == NULL) {
+	extend_loader_constraint(*pp2, class_loader1, klass);
+      } else if (*pp2 == NULL) {
+	extend_loader_constraint(*pp1, class_loader2, klass);
+      } else {
+	merge_loader_constraints(pp1, pp2, klass);
+      }
+    }
+  }
+  
+  if (failure_code != 0 && TraceLoaderConstraints) {
+    ResourceMark rm;
+    const char* reason = "";
+    switch(failure_code) {
+    case 1: reason = "the class objects presented by loader[0] and loader[1]"
+	      " are different"; break;
+    case 2: reason = "the class object presented by loader[0] does not match"
+	      " the stored class object in the constraint"; break;
+    case 3: reason = "the class object presented by loader[1] does not match"
+	      " the stored class object in the constraint"; break;
+    default: reason = "unknown reason code";
+    }
+    tty->print("[Failed to add constraint for name: %s, loader[0]: %s,"
+	       " loader[1]: %s, Reason: %s ]\n",
+	       class_name()->as_C_string(),
+	       SystemDictionary::loader_name(class_loader1()),
+	       SystemDictionary::loader_name(class_loader2()),
+	       reason
+	       );
+  }
+  
+  return failure_code == 0;
+}
+
+
+// return true if the constraint was updated, false if the constraint is
+// violated
+bool LoaderConstraintTable::check_or_update(instanceKlassHandle k,
+                                                   Handle loader,
+                                                   symbolHandle name) {
+  LoaderConstraintEntry* p = *(find_loader_constraint(name, loader));
+  if (p && p->klass() != NULL && p->klass() != k()) {
+    if (TraceLoaderConstraints) {
+      ResourceMark rm;
+      tty->print("[Constraint check failed for name %s, loader %s: "
+		 "the presented class object differs from that stored ]\n",
+		 name()->as_C_string(), 
+		 SystemDictionary::loader_name(loader()));
+    }
+    return false;
+  } else {
+    if (p && p->klass() == NULL) {
+      p->set_klass(k());
+      if (TraceLoaderConstraints) {
+	ResourceMark rm;
+	tty->print("[Updating constraint for name %s, loader %s, "
+		   "by setting class object ]\n",
+		   name()->as_C_string(), 
+		   SystemDictionary::loader_name(loader()));
+      }
+    }
+    return true;
+  }
+}
+
+klassOop LoaderConstraintTable::find_constrained_klass(symbolHandle name,
+                                                       Handle loader) {
+  LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
+  if (p != NULL && p->klass() != NULL)
+    return p->klass();
+
+  // No constraints, or else no klass loaded yet.
+  return NULL;
+}
+
+
+klassOop LoaderConstraintTable::find_constrained_elem_klass(symbolHandle name,
+                                                            symbolHandle elem_name,
+                                                            Handle loader,
+                                                            TRAPS) {
+  LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
+  if (p != NULL) {
+    assert(p->klass() == NULL, "Expecting null array klass");
+
+    // The array name has a constraint, but it will not have a class. Check
+    // each loader for an associated elem
+    for (int i = 0; i < p->num_loaders(); i++) {
+      Handle no_protection_domain;
+
+      klassOop k = SystemDictionary::find(elem_name, p->loader(i), no_protection_domain, THREAD);
+      if (k != NULL) {
+        // Return the first elem klass found.
+        return k;
+      }
+    }
+  }
+
+  // No constraints, or else no klass loaded yet.
+  return NULL;
+}
+
+
+void LoaderConstraintTable::ensure_loader_constraint_capacity(
+                                                     LoaderConstraintEntry *p,
+                                                    int nfree) {
+    if (p->max_loaders() - p->num_loaders() < nfree) {
+        int n = nfree + p->num_loaders();
+        oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n);
+        memcpy(new_loaders, p->loaders(), sizeof(oop) * p->num_loaders());
+        p->set_max_loaders(n);
+        FREE_C_HEAP_ARRAY(oop, p->loaders());
+        p->set_loaders(new_loaders);
+    }
+}
+ 
+
+void LoaderConstraintTable::extend_loader_constraint(LoaderConstraintEntry* p,
+                                                     Handle loader,
+                                                     klassOop klass) {
+  ensure_loader_constraint_capacity(p, 1);
+  int num = p->num_loaders();
+  p->set_loader(num, loader());
+  p->set_num_loaders(num + 1);
+  if (TraceLoaderConstraints) {
+    ResourceMark rm;
+    tty->print("[Extending constraint for name %s by adding loader[%d]: %s %s",
+	       p->name()->as_C_string(),
+	       num,
+               SystemDictionary::loader_name(loader()),
+	       (p->klass() == NULL ? " and setting class object ]\n" : " ]\n")
+	       );
+  }
+  if (p->klass() == NULL) {
+    p->set_klass(klass);
+  } else {
+    assert(klass == NULL || p->klass() == klass, "constraints corrupted");
+  }
+}
+
+
+void LoaderConstraintTable::merge_loader_constraints(
+                                                   LoaderConstraintEntry** pp1,
+                                                   LoaderConstraintEntry** pp2,
+                                                   klassOop klass) {
+  // make sure *pp1 has higher capacity 
+  if ((*pp1)->max_loaders() < (*pp2)->max_loaders()) {
+    LoaderConstraintEntry** tmp = pp2;
+    pp2 = pp1;
+    pp1 = tmp;
+  }
+  
+  LoaderConstraintEntry* p1 = *pp1;
+  LoaderConstraintEntry* p2 = *pp2;
+  
+  ensure_loader_constraint_capacity(p1, p2->num_loaders());
+
+  for (int i = 0; i < p2->num_loaders(); i++) {
+    int num = p1->num_loaders();
+    p1->set_loader(num, p2->loader(i));
+    p1->set_num_loaders(num + 1);
+  }
+
+  if (TraceLoaderConstraints) {
+    ResourceMark rm;
+    tty->print_cr("[Merged constraints for name %s, new loader list:", 
+		  p1->name()->as_C_string()
+		  );
+  
+    for (int i = 0; i < p1->num_loaders(); i++) {
+      tty->print_cr("[   [%d]: %s", i, 
+		    SystemDictionary::loader_name(p1->loader(i)));
+    }
+    if (p1->klass() == NULL) {
+      tty->print_cr("[... and setting class object]");
+    }
+  }
+  
+  // p1->klass() will hold NULL if klass, p2->klass(), and old
+  // p1->klass() are all NULL.  In addition, all three must have
+  // matching non-NULL values, otherwise either the constraints would
+  // have been violated, or the constraints had been corrupted (and an
+  // assertion would fail).
+  if (p2->klass() != NULL) {
+    assert(p2->klass() == klass, "constraints corrupted");
+  }
+  if (p1->klass() == NULL) {
+    p1->set_klass(klass);
+  } else {
+    assert(p1->klass() == klass, "constraints corrupted");
+  }
+
+  *pp2 = p2->next();
+  FREE_C_HEAP_ARRAY(oop, p2->loaders());
+  free_entry(p2);
+  return;
+}
+
+
+void LoaderConstraintTable::verify(Dictionary* dictionary) {
+  Thread *thread = Thread::current();
+  for (int cindex = 0; cindex < _loader_constraint_size; cindex++) {
+    for (LoaderConstraintEntry* probe = bucket(cindex);
+                                probe != NULL;
+                                probe = probe->next()) {
+      guarantee(probe->name()->is_symbol(), "should be symbol");
+      if (probe->klass() != NULL) {
+        instanceKlass* ik = instanceKlass::cast(probe->klass()); 
+        guarantee(ik->name() == probe->name(), "name should match");
+        symbolHandle name (thread, ik->name());
+        Handle loader(thread, ik->class_loader());
+        unsigned int d_hash = dictionary->compute_hash(name, loader);
+        int d_index = dictionary->hash_to_index(d_hash);
+        klassOop k = dictionary->find_class(d_index, d_hash, name, loader);
+        guarantee(k == probe->klass(), "klass should be in dictionary");
+      }
+      for (int n = 0; n< probe->num_loaders(); n++) {
+        guarantee(probe->loader(n)->is_oop_or_null(), "should be oop");
+      }
+    }
+  }
+}
+
+#ifndef PRODUCT
+
+// Called with the system dictionary lock held
+void LoaderConstraintTable::print() {
+  ResourceMark rm;
+
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  tty->print_cr("Java loader constraints (entries=%d)", _loader_constraint_size);
+  for (int cindex = 0; cindex < _loader_constraint_size; cindex++) {
+    for (LoaderConstraintEntry* probe = bucket(cindex);
+                                probe != NULL;
+                                probe = probe->next()) {
+      tty->print("%4d: ", cindex);
+      probe->name()->print();
+      tty->print(" , loaders:");
+      for (int n = 0; n < probe->num_loaders(); n++) {
+        probe->loader(n)->print_value();
+        tty->print(", ");
+      }
+      tty->cr();
+    }
+  }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/loaderConstraints.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,136 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)loaderConstraints.hpp	1.14 07/05/05 17:05:52 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class LoaderConstraintEntry;
+
+class LoaderConstraintTable : public Hashtable {
+  friend class VMStructs;
+private:
+
+  enum Constants {
+    _loader_constraint_size = 107,                     // number of entries in constraint table
+    _nof_buckets            = 1009                     // number of buckets in hash table
+  };
+
+  LoaderConstraintEntry** find_loader_constraint(symbolHandle name,
+                                                 Handle loader);
+
+public:
+
+  LoaderConstraintTable(int nof_buckets);
+
+  LoaderConstraintEntry* new_entry(unsigned int hash, symbolOop name,
+                                   klassOop klass, int num_loaders,
+                                   int max_loaders);
+
+  LoaderConstraintEntry* bucket(int i) {
+    return (LoaderConstraintEntry*)Hashtable::bucket(i);
+  }
+
+  LoaderConstraintEntry** bucket_addr(int i) {
+    return (LoaderConstraintEntry**)Hashtable::bucket_addr(i);
+  }
+
+  // GC support
+  void oops_do(OopClosure* f);
+  void always_strong_classes_do(OopClosure* blk);
+
+  // Check class loader constraints
+  bool add_entry(symbolHandle name, klassOop klass1, Handle loader1,
+                                    klassOop klass2, Handle loader2);
+
+  void check_signature_loaders(symbolHandle signature, Handle loader1,
+                               Handle loader2, bool is_method, TRAPS);
+
+  klassOop find_constrained_klass(symbolHandle name, Handle loader);
+  klassOop find_constrained_elem_klass(symbolHandle name, symbolHandle elem_name,
+                                       Handle loader, TRAPS);
+
+
+  // Class loader constraints
+
+  void ensure_loader_constraint_capacity(LoaderConstraintEntry *p, int nfree);
+  void extend_loader_constraint(LoaderConstraintEntry* p, Handle loader,
+                                klassOop klass);
+  void merge_loader_constraints(LoaderConstraintEntry** pp1,
+                                LoaderConstraintEntry** pp2, klassOop klass);
+
+  bool check_or_update(instanceKlassHandle k, Handle loader,
+                              symbolHandle name);
+
+  
+  void purge_loader_constraints(BoolObjectClosure* is_alive);
+
+  void verify(Dictionary* dictionary);
+#ifndef PRODUCT
+  void print();
+#endif
+};
+
+class LoaderConstraintEntry : public HashtableEntry {
+  friend class VMStructs;
+private:
+  symbolOop              _name;                   // class name
+  int                    _num_loaders;
+  int                    _max_loaders;
+  oop*                   _loaders;                // initiating loaders
+
+public:
+
+  klassOop klass() { return (klassOop)literal(); }
+  klassOop* klass_addr() { return (klassOop*)literal_addr(); }
+  void set_klass(klassOop k) { set_literal(k); }
+
+  LoaderConstraintEntry* next() {
+    return (LoaderConstraintEntry*)HashtableEntry::next();
+  }
+
+  LoaderConstraintEntry** next_addr() {
+    return (LoaderConstraintEntry**)HashtableEntry::next_addr();
+  }
+  void set_next(LoaderConstraintEntry* next) {
+    HashtableEntry::set_next(next);
+  }
+
+  symbolOop name() { return _name; }
+  symbolOop* name_addr() { return &_name; }
+  void set_name(symbolOop name) { _name = name; }
+
+  int num_loaders() { return _num_loaders; }
+  void set_num_loaders(int i) { _num_loaders = i; }
+
+  int max_loaders() { return _max_loaders; }
+  void set_max_loaders(int i) { _max_loaders = i; }
+
+  oop* loaders() { return _loaders; }
+  void set_loaders(oop* loaders) { _loaders = loaders; }
+
+  oop loader(int i) { return _loaders[i]; }
+  oop* loader_addr(int i) { return &_loaders[i]; }
+  void set_loader(int i, oop p) { _loaders[i] = p; }
+
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/placeholders.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,274 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)placeholders.cpp	1.20 07/05/17 15:50:29 JVM"
+#endif
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_placeholders.cpp.incl"
+
+// Placeholder methods
+
+PlaceholderEntry* PlaceholderTable::new_entry(int hash, symbolOop name,
+                                              oop loader, bool havesupername, 
+                                              symbolOop supername) {
+  PlaceholderEntry* entry = (PlaceholderEntry*)Hashtable::new_entry(hash, name);
+  entry->set_loader(loader);
+  entry->set_havesupername(havesupername);
+  entry->set_supername(supername);
+  entry->set_superThreadQ(NULL);
+  entry->set_loadInstanceThreadQ(NULL);
+  entry->set_defineThreadQ(NULL);
+  entry->set_definer(NULL);
+  entry->set_instanceKlass(NULL);
+  return entry;
+}
+
+
+// Placeholder objects represent classes currently being loaded.
+// All threads examining the placeholder table must hold the
+// SystemDictionary_lock, so we don't need special precautions
+// on store ordering here.
+void PlaceholderTable::add_entry(int index, unsigned int hash,
+                                 symbolHandle class_name, Handle class_loader,
+                                 bool havesupername, symbolHandle supername){
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  assert(!class_name.is_null(), "adding NULL obj");
+
+  // Both readers and writers are locked so it's safe to just
+  // create the placeholder and insert it in the list without a membar.
+  PlaceholderEntry* entry = new_entry(hash, class_name(), class_loader(), havesupername, supername());
+  add_entry(index, entry);
+}
+
+
+// Remove a placeholder object. 
+void PlaceholderTable::remove_entry(int index, unsigned int hash,
+                                    symbolHandle class_name, 
+                                    Handle class_loader) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  PlaceholderEntry** p = bucket_addr(index);
+  while (*p) {
+    PlaceholderEntry *probe = *p;
+    if (probe->hash() == hash && probe->equals(class_name(), class_loader())) {
+      // Delete entry
+      *p = probe->next();
+      free_entry(probe);
+      return;
+    }
+    p = probe->next_addr();
+  }
+}
+
+PlaceholderEntry* PlaceholderTable::get_entry(int index, unsigned int hash,
+                                       symbolHandle class_name,
+                                       Handle class_loader) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+
+  symbolOop class_name_ = class_name();
+  oop class_loader_ = class_loader();
+
+  for (PlaceholderEntry *place_probe = bucket(index);
+                         place_probe != NULL;
+                         place_probe = place_probe->next()) {
+    if (place_probe->hash() == hash &&
+        place_probe->equals(class_name_, class_loader_)) {
+      return place_probe;
+    }
+  }
+  return NULL;
+}
+
+symbolOop PlaceholderTable::find_entry(int index, unsigned int hash,
+                                       symbolHandle class_name,
+                                       Handle class_loader) {
+  PlaceholderEntry* probe = get_entry(index, hash, class_name, class_loader);
+  return (probe? probe->klass(): symbolOop(NULL));
+}
+
+  // find_and_add returns probe pointer - old or new
+  // If no entry exists, add a placeholder entry 
+  // If entry exists, reuse entry 
+  // For both, push SeenThread for classloadAction
+  // if havesupername: this is used for circularity for instanceklass loading
+PlaceholderEntry* PlaceholderTable::find_and_add(int index, unsigned int hash, symbolHandle name, Handle loader, classloadAction action, symbolHandle supername, Thread* thread) {
+  PlaceholderEntry* probe = get_entry(index, hash, name, loader);
+  if (probe == NULL) {
+    // Nothing found, add place holder
+    add_entry(index, hash, name, loader, (action == LOAD_SUPER), supername);
+    probe = get_entry(index, hash, name, loader);
+  } else {
+    if (action == LOAD_SUPER) {
+      probe->set_havesupername(true);
+      probe->set_supername(supername());
+    }
+  }
+  if (probe) probe->add_seen_thread(thread, action);
+  return probe;
+}
+
+
+// placeholder used to track class loading internal states
+// placeholder existence now for loading superclass/superinterface
+// superthreadQ tracks class circularity, while loading superclass/superinterface
+// loadInstanceThreadQ tracks load_instance_class calls
+// definer() tracks the single thread that owns define token
+// defineThreadQ tracks waiters on defining thread's results
+// 1st claimant creates placeholder
+// find_and_add adds SeenThread entry for appropriate queue
+// All claimants remove SeenThread after completing action
+// On removal: if definer and all queues empty, remove entry
+// Note: you can be in both placeholders and systemDictionary
+// see parse_stream for redefine classes
+// Therefore - must always check SD first
+// Ignores the case where entry is not found
+void PlaceholderTable::find_and_remove(int index, unsigned int hash,
+                       symbolHandle name, Handle loader, Thread* thread) {
+    assert_locked_or_safepoint(SystemDictionary_lock);
+    PlaceholderEntry *probe = get_entry(index, hash, name, loader);
+    if (probe != NULL) {
+       // No other threads using this entry
+       if ((probe->superThreadQ() == NULL) && (probe->loadInstanceThreadQ() == NULL)
+          && (probe->defineThreadQ() == NULL) && (probe->definer() == NULL)) {
+         remove_entry(index, hash, name, loader);
+       }
+    }
+  }
+
+PlaceholderTable::PlaceholderTable(int table_size)
+    : TwoOopHashtable(table_size, sizeof(PlaceholderEntry)) {
+}
+
+
+void PlaceholderTable::oops_do(OopClosure* f) {
+  for (int index = 0; index < table_size(); index++) {
+    for (PlaceholderEntry* probe = bucket(index); 
+                           probe != NULL; 
+                           probe = probe->next()) {
+      probe->oops_do(f);
+    }
+  }
+}
+
+
+void PlaceholderEntry::oops_do(OopClosure* blk) {
+  assert(klass() != NULL, "should have a non-null klass");
+  blk->do_oop((oop*)klass_addr());
+  if (_loader != NULL) {
+    blk->do_oop(loader_addr());
+  }
+  if (_supername != NULL) {
+    blk->do_oop((oop*)supername_addr());
+  }
+  if (_instanceKlass != NULL) {
+    blk->do_oop((oop*)instanceKlass_addr());
+  }
+}
+
+// do all entries in the placeholder table
+void PlaceholderTable::entries_do(void f(symbolOop, oop)) {
+  for (int index = 0; index < table_size(); index++) {
+    for (PlaceholderEntry* probe = bucket(index); 
+                           probe != NULL; 
+                           probe = probe->next()) {
+      f(probe->klass(), probe->loader());             
+    }
+  }
+}
+
+
+#ifndef PRODUCT
+// Note, doesn't append a cr
+void PlaceholderEntry::print() const {
+  klass()->print_value();
+  if (loader() != NULL) {
+    tty->print(", loader ");
+    loader()->print_value();
+  }
+  if (supername() != NULL) {
+    tty->print(", supername ");
+    supername()->print_value();
+  }
+  if (definer() != NULL) {
+    tty->print(", definer ");
+    definer()->print_value();
+  }
+  if (instanceKlass() != NULL) {
+    tty->print(", instanceKlass ");
+    instanceKlass()->print_value();
+  }
+  tty->print("\n");
+  tty->print("loadInstanceThreadQ threads:");
+  loadInstanceThreadQ()->printActionQ();
+  tty->print("\n");
+  tty->print("superThreadQ threads:");
+  superThreadQ()->printActionQ();
+  tty->print("\n");
+  tty->print("defineThreadQ threads:");
+  defineThreadQ()->printActionQ();
+  tty->print("\n");
+}
+#endif
+
+void PlaceholderEntry::verify() const {
+  guarantee(loader() == NULL || loader()->is_instance(), 
+            "checking type of _loader");
+  guarantee(instanceKlass() == NULL 
+            || Klass::cast(instanceKlass())->oop_is_instance(),
+            "checking type of instanceKlass result");
+  klass()->verify();
+}
+
+void PlaceholderTable::verify() {
+  int element_count = 0;
+  for (int pindex = 0; pindex < table_size(); pindex++) {
+    for (PlaceholderEntry* probe = bucket(pindex); 
+                           probe != NULL; 
+                           probe = probe->next()) {
+      probe->verify();
+      element_count++;  // both klasses and place holders count
+    }
+  }
+  guarantee(number_of_entries() == element_count,
+            "Verify of system dictionary failed");
+}
+
+
+#ifndef PRODUCT
+void PlaceholderTable::print() {
+  for (int pindex = 0; pindex < table_size(); pindex++) {    
+    for (PlaceholderEntry* probe = bucket(pindex);
+                           probe != NULL; 
+                           probe = probe->next()) {
+      if (Verbose) tty->print("%4d: ", pindex);
+      tty->print(" place holder ");
+
+      probe->print();
+      tty->cr();
+    }
+  }
+}
+#endif
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/placeholders.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,336 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)placeholders.hpp	1.21 07/05/05 17:05:54 JVM"
+#endif
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class PlaceholderEntry;
+
+// Placeholder objects. These represent classes currently
+// being loaded, as well as arrays of primitives.
+//
+
+class PlaceholderTable : public TwoOopHashtable {
+  friend class VMStructs;
+
+public:
+  PlaceholderTable(int table_size);
+
+  PlaceholderEntry* new_entry(int hash, symbolOop name, oop loader, bool havesupername, symbolOop supername);
+
+  PlaceholderEntry* bucket(int i) {
+    return (PlaceholderEntry*)Hashtable::bucket(i);
+  }
+
+  PlaceholderEntry** bucket_addr(int i) {
+    return (PlaceholderEntry**)Hashtable::bucket_addr(i);
+  }
+
+  void add_entry(int index, PlaceholderEntry* new_entry) {
+    Hashtable::add_entry(index, (HashtableEntry*)new_entry);
+  }
+
+  void add_entry(int index, unsigned int hash, symbolHandle name, 
+                Handle loader, bool havesupername, symbolHandle supername);
+
+// This returns a symbolOop to match type for SystemDictionary
+  symbolOop find_entry(int index, unsigned int hash,
+                       symbolHandle name, Handle loader);
+
+  PlaceholderEntry* get_entry(int index, unsigned int hash,
+                       symbolHandle name, Handle loader);
+
+// caller to create a placeholder entry must enumerate an action
+// caller claims ownership of that action
+// For parallel classloading:
+// multiple LOAD_INSTANCE threads can proceed in parallel
+// multiple LOAD_SUPER threads can proceed in parallel
+// LOAD_SUPER needed to check for class circularity
+// DEFINE_CLASS: ultimately define class must be single threaded
+// on a class/classloader basis
+// so the head of that queue owns the token  
+// and the rest of the threads return the result the first thread gets
+ enum classloadAction {
+    LOAD_INSTANCE = 1,             // calling load_instance_class
+    LOAD_SUPER = 2,                // loading superclass for this class
+    DEFINE_CLASS = 3               // find_or_define class
+ };
+
+  // find_and_add returns probe pointer - old or new
+  // If no entry exists, add a placeholder entry and push SeenThread
+  // If entry exists, reuse entry and push SeenThread for classloadAction
+  PlaceholderEntry* find_and_add(int index, unsigned int hash, 
+                                 symbolHandle name, Handle loader, 
+                                 classloadAction action, symbolHandle supername, 
+                                 Thread* thread);
+
+  void remove_entry(int index, unsigned int hash,
+                    symbolHandle name, Handle loader);
+
+// Remove placeholder information
+  void find_and_remove(int index, unsigned int hash, 
+                       symbolHandle name, Handle loader, Thread* thread); 
+
+  // GC support.
+  void oops_do(OopClosure* f);
+
+  // JVMTI support
+  void entries_do(void f(symbolOop, oop));
+
+#ifndef PRODUCT
+  void print();
+#endif
+  void verify();
+};
+
+// SeenThread objects represent list of threads that are
+// currently performing a load action on a class.
+// For class circularity, set before loading a superclass.
+// For bootclasssearchpath, set before calling load_instance_class.
+// Defining must be single threaded on a class/classloader basis
+// For DEFINE_CLASS, the head of the queue owns the
+// define token and the rest of the threads wait to return the
+// result the first thread gets.
+class SeenThread: public CHeapObj {
+private:
+   Thread *_thread;
+   SeenThread* _stnext;
+   SeenThread* _stprev;
+public:
+   SeenThread(Thread *thread) {
+       _thread = thread;
+       _stnext = NULL;
+       _stprev = NULL;
+   }
+   Thread* thread()                const { return _thread;}
+   void set_thread(Thread *thread) { _thread = thread; }
+
+   SeenThread* next()              const { return _stnext;}
+   void set_next(SeenThread *seen) { _stnext = seen; }
+   void set_prev(SeenThread *seen) { _stprev = seen; }
+   
+#ifndef PRODUCT
+  void printActionQ() {
+    SeenThread* seen = this;
+    while (seen != NULL) {
+      seen->thread()->print_value();
+      tty->print(", ");
+      seen = seen->next();
+    }
+  }
+#endif // PRODUCT
+};
+
+// Placeholder objects represent classes currently being loaded.
+// All threads examining the placeholder table must hold the
+// SystemDictionary_lock, so we don't need special precautions
+// on store ordering here.
+// The system dictionary is the only user of this class.
+
+class PlaceholderEntry : public HashtableEntry {
+  friend class VMStructs;
+
+
+ private:
+  oop               _loader;        // initiating loader
+  bool              _havesupername; // distinguish between null supername, and unknown
+  symbolOop         _supername;
+  Thread*           _definer;       // owner of define token
+  klassOop          _instanceKlass; // instanceKlass from successful define
+  SeenThread*       _superThreadQ;  // doubly-linked queue of Threads loading a superclass for this class
+  SeenThread*       _loadInstanceThreadQ;  // loadInstance thread 
+                                    // can be multiple threads if classloader object lock broken by application
+                                    // or if classloader supports parallel classloading
+                   
+  SeenThread*       _defineThreadQ; // queue of Threads trying to define this class
+                                    // including _definer
+                                    // _definer owns token 
+                                    // queue waits for and returns results from _definer
+
+ public:
+  // Simple accessors, used only by SystemDictionary
+  symbolOop          klass()               const { return (symbolOop)literal(); }
+  symbolOop*         klass_addr()          { return (symbolOop*)literal_addr(); }
+
+  oop                loader()              const { return _loader; }
+  void               set_loader(oop loader) { _loader = loader; }
+  oop*               loader_addr()         { return &_loader; }
+
+  bool               havesupername()       const { return _havesupername; }
+  void               set_havesupername(bool havesupername) { _havesupername = havesupername; }
+
+  symbolOop          supername()           const { return _supername; }
+  void               set_supername(symbolOop supername) { _supername = supername; }
+  symbolOop*         supername_addr()      { return &_supername; }
+
+  Thread*            definer()             const {return _definer; }
+  void               set_definer(Thread* definer) { _definer = definer; }
+
+  klassOop           instanceKlass()     const {return _instanceKlass; }
+  void               set_instanceKlass(klassOop instanceKlass) { _instanceKlass = instanceKlass; }
+  klassOop*          instanceKlass_addr()   { return &_instanceKlass; }
+
+  SeenThread*        superThreadQ()        const { return _superThreadQ; }
+  void               set_superThreadQ(SeenThread* SeenThread) { _superThreadQ = SeenThread; }
+
+  SeenThread*        loadInstanceThreadQ() const { return _loadInstanceThreadQ; }
+  void               set_loadInstanceThreadQ(SeenThread* SeenThread) { _loadInstanceThreadQ = SeenThread; }
+
+  SeenThread*        defineThreadQ()        const { return _defineThreadQ; }
+  void               set_defineThreadQ(SeenThread* SeenThread) { _defineThreadQ = SeenThread; }
+
+  PlaceholderEntry* next() const {
+    return (PlaceholderEntry*)HashtableEntry::next();
+  }
+
+  PlaceholderEntry** next_addr() {
+    return (PlaceholderEntry**)HashtableEntry::next_addr();
+  }
+
+  // Test for equality
+  // Entries are unique for class/classloader name pair
+  bool equals(symbolOop class_name, oop class_loader) const {
+    return (klass() == class_name && loader() == class_loader);
+  }
+
+  SeenThread* actionToQueue(PlaceholderTable::classloadAction action) {
+    SeenThread* queuehead;
+    switch (action) {
+      case PlaceholderTable::LOAD_INSTANCE:
+         queuehead = _loadInstanceThreadQ;
+         break;
+      case PlaceholderTable::LOAD_SUPER:
+         queuehead = _superThreadQ;
+         break;
+      case PlaceholderTable::DEFINE_CLASS:
+         queuehead = _defineThreadQ;
+         break;
+      default: Unimplemented();
+    }
+    return queuehead;
+  }
+
+  void set_threadQ(SeenThread* seenthread, PlaceholderTable::classloadAction action) {
+    switch (action) {
+      case PlaceholderTable::LOAD_INSTANCE:
+         _loadInstanceThreadQ = seenthread;
+         break;
+      case PlaceholderTable::LOAD_SUPER:
+         _superThreadQ = seenthread;
+         break;
+      case PlaceholderTable::DEFINE_CLASS:
+         _defineThreadQ = seenthread;
+         break;
+      default: Unimplemented();
+    }
+    return;
+  }
+
+  bool super_load_in_progress() {
+     return (_superThreadQ != NULL);
+  } 
+
+  bool instance_load_in_progress() {
+    return (_loadInstanceThreadQ != NULL);
+  }
+
+  bool define_class_in_progress() {
+    return (_defineThreadQ != NULL);
+  }
+
+// Doubly-linked list of Threads per action for class/classloader pair
+// Class circularity support: links in thread before loading superclass
+// bootstrapsearchpath support: links in a thread before load_instance_class
+// definers: use as queue of define requestors, including owner of
+// define token. Appends for debugging of requestor order
+  void add_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
+    assert_lock_strong(SystemDictionary_lock);
+    SeenThread* threadEntry = new SeenThread(thread);
+    SeenThread* seen = actionToQueue(action);
+    
+    if (seen == NULL) {
+      set_threadQ(threadEntry, action);
+      return;
+    }
+    SeenThread* next;
+    while ((next = seen->next()) != NULL) {
+      seen = next;
+    }
+    seen->set_next(threadEntry);
+    threadEntry->set_prev(seen);
+    return;
+  }
+
+  bool check_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
+    assert_lock_strong(SystemDictionary_lock);
+    SeenThread* threadQ = actionToQueue(action);
+    SeenThread* seen = threadQ;
+    while (seen) {
+      if (thread == seen->thread()) {
+        return true;
+      }
+      seen = seen->next();
+    }
+    return false;
+  }
+
+  // returns true if seenthreadQ is now empty
+  // Note, caller must ensure probe still exists while holding
+  // SystemDictionary_lock
+  // ignores if cleanup has already been done
+  // if found, deletes SeenThread
+  bool remove_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
+    assert_lock_strong(SystemDictionary_lock);
+    SeenThread* threadQ = actionToQueue(action);
+    SeenThread* seen = threadQ;
+    SeenThread* prev = NULL;
+    while (seen) {
+      if (thread == seen->thread()) {
+        if (prev) {
+          prev->set_next(seen->next());
+        } else {
+          set_threadQ(seen->next(), action);
+        }
+        if (seen->next()) {
+          seen->next()->set_prev(prev);
+        }
+        delete seen;
+        break;
+      }
+      prev = seen;
+      seen = seen->next();
+    }
+    return (actionToQueue(action) == NULL);
+  }
+
+  // GC support
+  // Applies "f->do_oop" to all root oops in the placeholder table.
+  void oops_do(OopClosure* blk);
+
+  // Print method doesn't append a cr
+  void print() const  PRODUCT_RETURN;
+  void verify() const;
+};
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,124 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)resolutionErrors.cpp	1.6 07/05/05 17:05:54 JVM"
+#endif
+/*
+ * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_resolutionErrors.cpp.incl"
+
+// add new entry to the table
+void ResolutionErrorTable::add_entry(int index, unsigned int hash, 
+				     constantPoolHandle pool, int cp_index, symbolHandle error)
+{
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  assert(!pool.is_null() && !error.is_null(), "adding NULL obj");
+
+  ResolutionErrorEntry* entry = new_entry(hash, pool(), cp_index, error());
+  add_entry(index, entry);
+}
+
+// find entry in the table
+ResolutionErrorEntry* ResolutionErrorTable::find_entry(int index, unsigned int hash, 
+						       constantPoolHandle pool, int cp_index)
+{
+  assert_locked_or_safepoint(SystemDictionary_lock);
+
+  for (ResolutionErrorEntry *error_probe = bucket(index);
+                         error_probe != NULL;
+                         error_probe = error_probe->next()) {
+  if (error_probe->hash() == hash && error_probe->pool() == pool()) {
+      return error_probe;;
+    }
+  }
+  return NULL;
+}
+
+// create new error entry
+ResolutionErrorEntry* ResolutionErrorTable::new_entry(int hash, constantPoolOop pool, 
+						      int cp_index, symbolOop error)
+{   
+  ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable::new_entry(hash, pool);
+  entry->set_cp_index(cp_index);
+  entry->set_error(error);
+  
+  return entry;
+}
+
+// create resolution error table
+ResolutionErrorTable::ResolutionErrorTable(int table_size)
+    : Hashtable(table_size, sizeof(ResolutionErrorEntry)) {
+}
+
+// GC support
+void ResolutionErrorTable::oops_do(OopClosure* f) {
+  for (int i = 0; i < table_size(); i++) {
+    for (ResolutionErrorEntry* probe = bucket(i); 
+                           probe != NULL; 
+                           probe = probe->next()) {
+      assert(probe->pool() != (constantPoolOop)NULL, "resolution error table is corrupt");
+      assert(probe->error() != (symbolOop)NULL, "resolution error table is corrupt");
+      probe->oops_do(f);
+    }
+  }
+}
+
+// GC support
+void ResolutionErrorEntry::oops_do(OopClosure* blk) {
+  blk->do_oop((oop*)pool_addr());
+  blk->do_oop((oop*)error_addr());
+}
+
+// We must keep the symbolOop used in the error alive. The constantPoolOop will
+// decide when the entry can be purged.
+void ResolutionErrorTable::always_strong_classes_do(OopClosure* blk) {
+  for (int i = 0; i < table_size(); i++) {
+    for (ResolutionErrorEntry* probe = bucket(i); 
+                           probe != NULL; 
+                           probe = probe->next()) {
+      assert(probe->error() != (symbolOop)NULL, "resolution error table is corrupt");
+      blk->do_oop((oop*)probe->error_addr());
+    }	
+  }
+}
+
+// Remove unloaded entries from the table
+void ResolutionErrorTable::purge_resolution_errors(BoolObjectClosure* is_alive) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  for (int i = 0; i < table_size(); i++) {
+    for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) {
+      ResolutionErrorEntry* entry = *p;
+      assert(entry->pool() != (constantPoolOop)NULL, "resolution error table is corrupt");
+      constantPoolOop pool = entry->pool();
+      if (is_alive->do_object_b(pool)) {
+	p = entry->next_addr();
+      } else {
+	*p = entry->next();
+	free_entry(entry);
+      }
+    }
+  }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,103 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)resolutionErrors.hpp	1.6 07/05/05 17:05:54 JVM"
+#endif
+/*
+ * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class ResolutionErrorEntry;
+
+// ResolutionError objects are used to record errors encountered during
+// constant pool resolution (JVMS 5.4.3).
+
+class ResolutionErrorTable : public Hashtable {
+
+public:
+  ResolutionErrorTable(int table_size);
+
+  ResolutionErrorEntry* new_entry(int hash, constantPoolOop pool, int cp_index, symbolOop error);
+
+  ResolutionErrorEntry* bucket(int i) {
+    return (ResolutionErrorEntry*)Hashtable::bucket(i);
+  }
+
+  ResolutionErrorEntry** bucket_addr(int i) {
+    return (ResolutionErrorEntry**)Hashtable::bucket_addr(i);
+  }
+
+  void add_entry(int index, ResolutionErrorEntry* new_entry) {
+    Hashtable::add_entry(index, (HashtableEntry*)new_entry);
+  }
+  
+  void add_entry(int index, unsigned int hash,
+		 constantPoolHandle pool, int which, symbolHandle error);
+		 
+
+  // find error given the constant pool and constant pool index
+  ResolutionErrorEntry* find_entry(int index, unsigned int hash, 
+				   constantPoolHandle pool, int cp_index);
+
+
+  unsigned int compute_hash(constantPoolHandle pool, int cp_index) {
+    return (unsigned int) pool->identity_hash() + cp_index;
+  }
+
+  // purges unloaded entries from the table
+  void purge_resolution_errors(BoolObjectClosure* is_alive);	
+ 
+  // this table keeps symbolOops alive 
+  void always_strong_classes_do(OopClosure* blk);
+
+  // GC support.
+  void oops_do(OopClosure* f);
+};
+
+
+class ResolutionErrorEntry : public HashtableEntry {
+ private:
+  int		    _cp_index;
+  symbolOop	    _error;
+
+ public:
+  constantPoolOop    pool() const 		{ return (constantPoolOop)literal(); }
+  constantPoolOop*   pool_addr()  		{ return (constantPoolOop*)literal_addr(); }
+
+  int		     cp_index() const		{ return _cp_index; }
+  void		     set_cp_index(int cp_index) { _cp_index = cp_index; }
+
+  symbolOop          error() const 		{ return _error; }
+  void		     set_error(symbolOop e)	{ _error = e; }
+  symbolOop*         error_addr()		{ return &_error; }
+
+  ResolutionErrorEntry* next() const {
+    return (ResolutionErrorEntry*)HashtableEntry::next();
+  }
+
+  ResolutionErrorEntry** next_addr() {
+    return (ResolutionErrorEntry**)HashtableEntry::next_addr();
+  }
+
+  // GC support
+  void oops_do(OopClosure* blk);
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/stackMapFrame.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,307 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)stackMapFrame.cpp	1.24 07/05/05 17:06:57 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_stackMapFrame.cpp.incl"
+
+StackMapFrame::StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* v) : 
+                      _offset(0), _locals_size(0), _stack_size(0), _flags(0), 
+                      _max_locals(max_locals), _max_stack(max_stack),
+                      _verifier(v) {
+  Thread* thr = v->thread();
+  _locals = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_locals);
+  _stack = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_stack);
+  int32_t i;
+  for(i = 0; i < max_locals; i++) {
+    _locals[i] = VerificationType::bogus_type();
+  }
+  for(i = 0; i < max_stack; i++) {
+    _stack[i] = VerificationType::bogus_type();
+  }  
+}
+
+StackMapFrame* StackMapFrame::frame_in_exception_handler(u1 flags) {
+  Thread* thr = _verifier->thread();
+  VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, 1);
+  StackMapFrame* frame = new StackMapFrame(_offset, flags, _locals_size, 0, _max_locals, _max_stack, _locals, stack, _verifier);
+  return frame;
+}
+
+bool StackMapFrame::has_new_object() const {
+  int32_t i;
+  for (i = 0; i < _max_locals; i++) {
+    if (_locals[i].is_uninitialized()) {
+      return true;
+    }
+  }
+  for (i = 0; i < _stack_size; i++) {
+    if (_stack[i].is_uninitialized()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void StackMapFrame::initialize_object(
+    VerificationType old_object, VerificationType new_object) {
+  int32_t i;
+  for (i = 0; i < _max_locals; i++) {
+    if (_locals[i].equals(old_object)) {
+      _locals[i] = new_object;
+    }
+  }
+  for (i = 0; i < _stack_size; i++) {
+    if (_stack[i].equals(old_object)) {
+      _stack[i] = new_object;
+    }
+  }
+  if (old_object == VerificationType::uninitialized_this_type()) {
+    // "this" has been initialized - reset flags
+    _flags = 0;
+  } 
+}
+
+VerificationType StackMapFrame::set_locals_from_arg(
+    const methodHandle m, VerificationType thisKlass, TRAPS) {
+  symbolHandle signature(THREAD, m->signature());
+  SignatureStream ss(signature);
+  int init_local_num = 0;
+  if (!m->is_static()) {
+    init_local_num++;
+    // add one extra argument for instance method
+    if (m->name() == vmSymbols::object_initializer_name() &&
+       thisKlass.name() != vmSymbols::java_lang_Object()) {
+      _locals[0] = VerificationType::uninitialized_this_type();
+      _flags |= FLAG_THIS_UNINIT;
+    } else {
+      _locals[0] = thisKlass;
+    }
+  } 
+  
+  // local num may be greater than size of parameters because long/double occupies two slots
+  while(!ss.at_return_type()) {
+    init_local_num += _verifier->change_sig_to_verificationType(
+      &ss, &_locals[init_local_num], 
+      CHECK_VERIFY_(verifier(), VerificationType::bogus_type()));
+    ss.next();
+  }
+  _locals_size = init_local_num;
+
+  switch (ss.type()) {
+    case T_OBJECT:
+    case T_ARRAY:
+    {
+      symbolOop sig = ss.as_symbol(CHECK_(VerificationType::bogus_type()));
+      return VerificationType::reference_type(symbolHandle(THREAD, sig));
+    }
+    case T_INT:     return VerificationType::integer_type();
+    case T_BYTE:    return VerificationType::byte_type();
+    case T_CHAR:    return VerificationType::char_type();
+    case T_SHORT:   return VerificationType::short_type();
+    case T_BOOLEAN: return VerificationType::boolean_type();
+    case T_FLOAT:   return VerificationType::float_type();
+    case T_DOUBLE:  return VerificationType::double_type();
+    case T_LONG:    return VerificationType::long_type();
+    case T_VOID:    return VerificationType::bogus_type();
+    default:
+      ShouldNotReachHere();
+  }
+  return VerificationType::bogus_type();
+}
+
+void StackMapFrame::copy_locals(const StackMapFrame* src) {
+  int32_t len = src->locals_size() < _locals_size ? 
+    src->locals_size() : _locals_size;
+  for (int32_t i = 0; i < len; i++) {
+    _locals[i] = src->locals()[i];
+  }
+}
+
+void StackMapFrame::copy_stack(const StackMapFrame* src) {
+  int32_t len = src->stack_size() < _stack_size ? 
+    src->stack_size() : _stack_size;
+  for (int32_t i = 0; i < len; i++) {
+    _stack[i] = src->stack()[i];
+  }
+}
+
+
+bool StackMapFrame::is_assignable_to(
+    VerificationType* from, VerificationType* to, int32_t len, TRAPS) const {
+  for (int32_t i = 0; i < len; i++) {
+    bool subtype = to[i].is_assignable_from(
+      from[i], verifier()->current_class(), THREAD);
+    if (!subtype) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool StackMapFrame::is_assignable_to(const StackMapFrame* target, TRAPS) const {
+  if (_max_locals != target->max_locals() || _stack_size != target->stack_size()) {
+    return false;
+  }
+  // Only need to compare type elements up to target->locals() or target->stack().
+  // The remaining type elements in this state can be ignored because they are
+  // assignable to bogus type.
+  bool match_locals = is_assignable_to(
+    _locals, target->locals(), target->locals_size(), CHECK_false);
+  bool match_stack = is_assignable_to(
+    _stack, target->stack(), _stack_size, CHECK_false);
+  bool match_flags = (_flags | target->flags()) == target->flags();
+  return (match_locals && match_stack && match_flags);
+}
+
+VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {
+  if (_stack_size <= 0) {
+    verifier()->verify_error(_offset, "Operand stack underflow");
+    return VerificationType::bogus_type();
+  }
+  VerificationType top = _stack[--_stack_size];
+  bool subtype = type.is_assignable_from(
+    top, verifier()->current_class(), CHECK_(VerificationType::bogus_type()));
+  if (!subtype) {
+    verifier()->verify_error(_offset, "Bad type on operand stack");
+    return VerificationType::bogus_type();
+  }
+  NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
+  return top;
+}
+
+VerificationType StackMapFrame::get_local(
+    int32_t index, VerificationType type, TRAPS) {
+  if (index >= _max_locals) {
+    verifier()->verify_error(_offset, "Local variable table overflow");
+    return VerificationType::bogus_type(); 
+  }
+  bool subtype = type.is_assignable_from(_locals[index], 
+    verifier()->current_class(), CHECK_(VerificationType::bogus_type()));
+  if (!subtype) {
+    verifier()->verify_error(_offset, "Bad local variable type");
+    return VerificationType::bogus_type();
+  }
+  if(index >= _locals_size) { _locals_size = index + 1; }
+  return _locals[index];
+}
+
+void StackMapFrame::get_local_2(
+    int32_t index, VerificationType type1, VerificationType type2, TRAPS) {
+  assert(type1.is_long() || type1.is_double(), "must be long/double");
+  assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
+  if (index >= _locals_size - 1) {
+    verifier()->verify_error(_offset, "get long/double overflows locals");
+    return;
+  }
+  bool subtype1 = type1.is_assignable_from(
+    _locals[index], verifier()->current_class(), CHECK);
+  bool subtype2 = type2.is_assignable_from(
+    _locals[index+1], verifier()->current_class(), CHECK);
+  if (!subtype1 || !subtype2) {
+    verifier()->verify_error(_offset, "Bad local variable type");
+    return;
+  }
+}
+
+void StackMapFrame::set_local(int32_t index, VerificationType type, TRAPS) {
+  assert(!type.is_check(), "Must be a real type");
+  if (index >= _max_locals) {
+    verifier()->verify_error("Local variable table overflow", _offset);
+    return;
+  }
+  // If type at index is double or long, set the next location to be unusable
+  if (_locals[index].is_double() || _locals[index].is_long()) {
+    assert((index + 1) < _locals_size, "Local variable table overflow");
+    _locals[index + 1] = VerificationType::bogus_type();
+  }
+  // If type at index is double_2 or long_2, set the previous location to be unusable
+  if (_locals[index].is_double2() || _locals[index].is_long2()) {
+    assert(index >= 1, "Local variable table underflow");
+    _locals[index - 1] = VerificationType::bogus_type();
+  }
+  _locals[index] = type;
+  if (index >= _locals_size) {
+#ifdef ASSERT
+    for (int i=_locals_size; i<index; i++) {
+      assert(_locals[i] == VerificationType::bogus_type(), 
+             "holes must be bogus type");
+    }
+#endif
+    _locals_size = index + 1;
+  }
+}
+
+void StackMapFrame::set_local_2(
+    int32_t index, VerificationType type1, VerificationType type2, TRAPS) {
+  assert(type1.is_long() || type1.is_double(), "must be long/double");
+  assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
+  if (index >= _max_locals - 1) {
+    verifier()->verify_error("Local variable table overflow", _offset);
+    return;
+  }
+  // If type at index+1 is double or long, set the next location to be unusable
+  if (_locals[index+1].is_double() || _locals[index+1].is_long()) {
+    assert((index + 2) < _locals_size, "Local variable table overflow");
+    _locals[index + 2] = VerificationType::bogus_type();
+  }
+  // If type at index is double_2 or long_2, set the previous location to be unusable
+  if (_locals[index].is_double2() || _locals[index].is_long2()) {
+    assert(index >= 1, "Local variable table underflow");
+    _locals[index - 1] = VerificationType::bogus_type();
+  }
+  _locals[index] = type1;
+  _locals[index+1] = type2;
+  if (index >= _locals_size - 1) {
+#ifdef ASSERT
+    for (int i=_locals_size; i<index; i++) {
+      assert(_locals[i] == VerificationType::bogus_type(), 
+             "holes must be bogus type");
+    }
+#endif
+    _locals_size = index + 2;
+  }
+}
+
+#ifndef PRODUCT
+
+void StackMapFrame::print() const {
+  tty->print_cr("stackmap_frame[%d]:", _offset);
+  tty->print_cr("flags = 0x%x", _flags);
+  tty->print("locals[%d] = { ", _locals_size);
+  for (int32_t i = 0; i < _locals_size; i++) {
+    _locals[i].print_on(tty);
+  }
+  tty->print_cr(" }");
+  tty->print("stack[%d] = { ", _stack_size);
+  for (int32_t j = 0; j < _stack_size; j++) {
+    _stack[j].print_on(tty);
+  }
+  tty->print_cr(" }");
+}
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/stackMapFrame.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,230 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)stackMapFrame.hpp	1.20 07/05/05 17:06:57 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// A StackMapFrame represents one frame in the stack map attribute.
+
+enum {
+  FLAG_THIS_UNINIT = 0x01
+};
+
+class StackMapFrame : public ResourceObj {
+ private:
+  int32_t _offset;
+
+  // See comment in StackMapTable about _frame_count about why these 
+  // fields are int32_t instead of u2.
+  int32_t _locals_size;  // number of valid type elements in _locals 
+  int32_t _stack_size;   // number of valid type elements in _stack
+
+  int32_t _max_locals;
+  int32_t _max_stack;
+
+  u1 _flags;
+  VerificationType* _locals; // local variable type array
+  VerificationType* _stack;  // operand stack type array
+
+  ClassVerifier* _verifier;  // the verifier verifying this method
+
+ public:
+  // constructors
+
+  // This constructor is used by the type checker to allocate frames 
+  // in type state, which have _max_locals and _max_stack array elements
+  // in _locals and _stack.
+  StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* verifier);
+
+  // This constructor is used to initialize stackmap frames in stackmap table,
+  // which have _locals_size and _stack_size array elements in _locals and _stack.
+  StackMapFrame(int32_t offset,
+                u1 flags,
+                u2 locals_size,
+                u2 stack_size, 
+                u2 max_locals,
+                u2 max_stack,
+                VerificationType* locals,
+                VerificationType* stack,
+                ClassVerifier* v) : _offset(offset), _flags(flags),
+                                    _locals_size(locals_size),
+                                    _stack_size(stack_size),
+                                    _max_locals(max_locals),
+                                    _max_stack(max_stack),
+                                    _locals(locals), _stack(stack),
+                                    _verifier(v) { }
+
+  inline void set_offset(int32_t offset)      { _offset = offset; }
+  inline void set_verifier(ClassVerifier* v)  { _verifier = v; }
+  inline void set_flags(u1 flags)             { _flags = flags; }
+  inline void set_locals_size(u2 locals_size) { _locals_size = locals_size; }
+  inline void set_stack_size(u2 stack_size)   { _stack_size = stack_size; }
+  inline void clear_stack()                   { _stack_size = 0; }
+  inline int32_t offset()   const             { return _offset; }
+  inline ClassVerifier* verifier() const      { return _verifier; }
+  inline u1 flags() const                     { return _flags; }
+  inline int32_t locals_size() const          { return _locals_size; }
+  inline VerificationType* locals() const     { return _locals; }
+  inline int32_t stack_size() const           { return _stack_size; }
+  inline VerificationType* stack() const      { return _stack; }
+  inline int32_t max_locals() const           { return _max_locals; }
+  inline int32_t max_stack() const            { return _max_stack; }
+  inline bool flag_this_uninit() const        { return _flags & FLAG_THIS_UNINIT; }
+
+  // Set locals and stack types to bogus
+  inline void reset() {
+    int32_t i;
+    for (i = 0; i < _max_locals; i++) {
+      _locals[i] = VerificationType::bogus_type();
+    }
+    for (i = 0; i < _max_stack; i++) {
+      _stack[i] = VerificationType::bogus_type();
+    }
+  }
+
+  // Return a StackMapFrame with the same local variable array and empty stack.
+  // Stack array is allocate with unused one element.
+  StackMapFrame* frame_in_exception_handler(u1 flags);
+
+  // Set local variable type array based on m's signature.
+  VerificationType set_locals_from_arg(
+    const methodHandle m, VerificationType thisKlass, TRAPS);
+
+  // Search local variable type array and stack type array.
+  // Return true if an uninitialized object is found.
+  bool has_new_object() const;
+
+  // Search local variable type array and stack type array.
+  // Set every element with type of old_object to new_object.
+  void initialize_object(
+    VerificationType old_object, VerificationType new_object);
+
+  // Copy local variable type array in src into this local variable type array.
+  void copy_locals(const StackMapFrame* src);
+
+  // Copy stack type array in src into this stack type array.
+  void copy_stack(const StackMapFrame* src);
+
+  // Return true if this stack map frame is assignable to target.
+  bool is_assignable_to(const StackMapFrame* target, TRAPS) const;
+
+  // Push type into stack type array.
+  inline void push_stack(VerificationType type, TRAPS) {
+    assert(!type.is_check(), "Must be a real type");
+    if (_stack_size >= _max_stack) {
+      verifier()->verify_error(_offset, "Operand stack overflow");
+      return;
+    }
+    _stack[_stack_size++] = type;
+  }
+
+  inline void push_stack_2(
+      VerificationType type1, VerificationType type2, TRAPS) {
+    assert(type1.is_long() || type1.is_double(), "must be long/double");
+    assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
+    if (_stack_size >= _max_stack - 1) {
+      verifier()->verify_error(_offset, "Operand stack overflow");
+      return;
+    }
+    _stack[_stack_size++] = type1;
+    _stack[_stack_size++] = type2;
+  }
+
+  // Pop and return the top type on stack without verifying.
+  inline VerificationType pop_stack(TRAPS) {
+    if (_stack_size <= 0) {
+      verifier()->verify_error(_offset, "Operand stack underflow");
+      return VerificationType::bogus_type();
+    }
+    // Put bogus type to indicate it's no longer valid.
+    // Added to make it consistent with the other pop_stack method.
+    VerificationType top = _stack[--_stack_size];
+    NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
+    return top;
+  }
+
+  // Pop and return the top type on stack type array after verifying it
+  // is assignable to type.
+  inline VerificationType pop_stack(VerificationType type, TRAPS) {
+    if (_stack_size != 0) {
+      VerificationType top = _stack[_stack_size - 1];
+      bool subtype = type.is_assignable_from(
+        top, verifier()->current_class(), 
+        CHECK_(VerificationType::bogus_type()));
+      if (subtype) {
+        _stack_size --;
+        NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
+        return top;
+      }
+    }
+    return pop_stack_ex(type, THREAD);
+  }
+
+  inline void pop_stack_2(
+      VerificationType type1, VerificationType type2, TRAPS) {
+    assert(type1.is_long2() || type1.is_double2(), "must be long/double");
+    assert(type2.is_long() || type2.is_double(), "must be long/double_2");
+    if (_stack_size >= 2) {
+      VerificationType top1 = _stack[_stack_size - 1];
+      bool subtype1 = type1.is_assignable_from(
+        top1, verifier()->current_class(), CHECK);
+      VerificationType top2 = _stack[_stack_size - 2];
+      bool subtype2 = type2.is_assignable_from(
+        top2, verifier()->current_class(), CHECK);
+      if (subtype1 && subtype2) {
+        _stack_size -= 2;
+        NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
+        NOT_PRODUCT( _stack[_stack_size+1] = VerificationType::bogus_type(); )
+        return;
+      }
+    }
+    pop_stack_ex(type1, THREAD);
+    pop_stack_ex(type2, THREAD);
+  }
+
+  // Uncommon case that throws exceptions.
+  VerificationType pop_stack_ex(VerificationType type, TRAPS);
+
+  // Return the type at index in local variable array after verifying
+  // it is assignable to type.
+  VerificationType get_local(int32_t index, VerificationType type, TRAPS);
+  // For long/double.
+  void get_local_2(
+    int32_t index, VerificationType type1, VerificationType type2, TRAPS);
+
+  // Set element at index in local variable array to type.
+  void set_local(int32_t index, VerificationType type, TRAPS);
+  // For long/double.
+  void set_local_2(
+    int32_t index, VerificationType type1, VerificationType type2, TRAPS);
+
+  // Private auxiliary method used only in is_assignable_to(StackMapFrame).
+  // Returns true if src is assignable to target.
+  bool is_assignable_to(
+    VerificationType* src, VerificationType* target, int32_t len, TRAPS) const;
+
+  // Debugging
+  void print() const PRODUCT_RETURN;
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/stackMapTable.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,431 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)stackMapTable.cpp	1.28 07/05/05 17:06:53 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_stackMapTable.cpp.incl"
+
+StackMapTable::StackMapTable(StackMapReader* reader, StackMapFrame* init_frame,
+                             u2 max_locals, u2 max_stack,
+                             char* code_data, int code_len, TRAPS) {
+  _code_length = code_len;
+  _frame_count = reader->get_frame_count();
+  if (_frame_count > 0) {
+    _frame_array = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD,
+                                                StackMapFrame*, _frame_count);
+    StackMapFrame* pre_frame = init_frame;
+    for (int32_t i = 0; i < _frame_count; i++) {
+      StackMapFrame* frame = reader->next(
+        pre_frame, i == 0, max_locals, max_stack, 
+        CHECK_VERIFY(pre_frame->verifier()));
+      _frame_array[i] = frame;
+      int offset = frame->offset();
+      if (offset >= code_len || code_data[offset] == 0) {
+        frame->verifier()->verify_error("StackMapTable error: bad offset");
+        return;
+      }
+      pre_frame = frame;
+    }
+  }
+  reader->check_end(CHECK);
+}
+
+// This method is only called by method in StackMapTable.
+int StackMapTable::get_index_from_offset(int32_t offset) const {
+  int i = 0;
+  for (; i < _frame_count; i++) {
+    if (_frame_array[i]->offset() == offset) {
+      return i;
+    }
+  }
+  return i;  // frame with offset doesn't exist in the array
+}
+
+bool StackMapTable::match_stackmap(
+    StackMapFrame* frame, int32_t target, 
+    bool match, bool update, TRAPS) const {
+  int index = get_index_from_offset(target);
+
+  return match_stackmap(
+    frame, target, index, match, 
+    update, CHECK_VERIFY_(frame->verifier(), false));
+}
+
+// Match and/or update current_frame to the frame in stackmap table with
+// specified offset and frame index. Return true if the two frames match.
+//
+// The values of match and update are:                  _match__update_
+//
+// checking a branch target/exception handler:           true   false
+// linear bytecode verification following an 
+// unconditional branch:                                 false  true
+// linear bytecode verification not following an 
+// unconditional branch:                                 true   true
+bool StackMapTable::match_stackmap(
+    StackMapFrame* frame, int32_t target, int32_t frame_index,
+    bool match, bool update, TRAPS) const {
+  if (frame_index < 0 || frame_index >= _frame_count) {
+    frame->verifier()->verify_error(frame->offset(),
+      "Expecting a stackmap frame at branch target %d", target);
+    return false;
+  }
+
+  bool result = true;
+  StackMapFrame *stackmap_frame = _frame_array[frame_index];
+  if (match) {
+    // Has direct control flow from last instruction, need to match the two
+    // frames.
+    result = frame->is_assignable_to(
+      stackmap_frame, CHECK_VERIFY_(frame->verifier(), false));
+  }
+  if (update) { 
+    // Use the frame in stackmap table as current frame
+    int lsize = stackmap_frame->locals_size();
+    int ssize = stackmap_frame->stack_size();
+    if (frame->locals_size() > lsize || frame->stack_size() > ssize) {
+      // Make sure unused type array items are all _bogus_type.
+      frame->reset();
+    }
+    frame->set_locals_size(lsize);
+    frame->copy_locals(stackmap_frame);
+    frame->set_stack_size(ssize);
+    frame->copy_stack(stackmap_frame);
+    frame->set_flags(stackmap_frame->flags());
+  }
+  return result;
+}
+
+void StackMapTable::check_jump_target(
+    StackMapFrame* frame, int32_t target, TRAPS) const {
+  bool match = match_stackmap(
+    frame, target, true, false, CHECK_VERIFY(frame->verifier()));
+  if (!match || (target < 0 || target >= _code_length)) {
+    frame->verifier()->verify_error(frame->offset(),
+      "Inconsistent stackmap frames at branch target %d", target); 
+    return;
+  }
+  // check if uninitialized objects exist on backward branches
+  check_new_object(frame, target, CHECK_VERIFY(frame->verifier()));
+}
+
+void StackMapTable::check_new_object(
+    const StackMapFrame* frame, int32_t target, TRAPS) const {
+  if (frame->offset() > target && frame->has_new_object()) {
+    frame->verifier()->verify_error(frame->offset(),
+      "Uninitialized object exists on backward branch %d", target); 
+    return;
+  }
+}
+
+#ifndef PRODUCT
+
+void StackMapTable::print() const {
+  tty->print_cr("StackMapTable: frame_count = %d", _frame_count);
+  tty->print_cr("table = { ");
+  for (int32_t i = 0; i < _frame_count; i++) {
+    _frame_array[i]->print();
+  }
+  tty->print_cr(" }");
+}
+
+#endif
+
+int32_t StackMapReader::chop(
+    VerificationType* locals, int32_t length, int32_t chops) {
+  int32_t pos = length - 1;
+  for (int32_t i=0; i<chops; i++) {
+    if (locals[pos].is_category2_2nd()) {
+      pos -= 2;
+    } else {
+      pos --;
+    }
+    if (pos<0 && i<(chops-1)) return -1;
+  }
+  return pos+1;
+}
+
+VerificationType StackMapReader::parse_verification_type(u1* flags, TRAPS) {
+  u1 tag = _stream->get_u1(THREAD);
+  if (tag < (u1)ITEM_UninitializedThis) {
+    return VerificationType::from_tag(tag);
+  }
+  if (tag == ITEM_Object) {
+    u2 class_index = _stream->get_u2(THREAD);
+    int nconstants = _cp->length();
+    if ((class_index <= 0 || class_index >= nconstants) ||
+        (!_cp->tag_at(class_index).is_klass() &&
+         !_cp->tag_at(class_index).is_unresolved_klass())) {
+      _stream->stackmap_format_error("bad class index", THREAD);
+      return VerificationType::bogus_type();
+    }
+    return VerificationType::reference_type(
+      symbolHandle(THREAD, _cp->klass_name_at(class_index)));
+  }
+  if (tag == ITEM_UninitializedThis) {
+    if (flags != NULL) {
+      *flags |= FLAG_THIS_UNINIT;
+    }
+    return VerificationType::uninitialized_this_type();
+  }
+  if (tag == ITEM_Uninitialized) {
+    u2 offset = _stream->get_u2(THREAD);
+    if (offset >= _code_length ||
+        _code_data[offset] != ClassVerifier::NEW_OFFSET) {
+      ResourceMark rm(THREAD);
+      _verifier->class_format_error(
+        "StackMapTable format error: bad offset for Uninitialized");
+      return VerificationType::bogus_type();
+    }
+    return VerificationType::uninitialized_type(offset);
+  }
+  _stream->stackmap_format_error("bad verification type", THREAD);
+  return VerificationType::bogus_type();
+}
+
+StackMapFrame* StackMapReader::next(
+    StackMapFrame* pre_frame, bool first, u2 max_locals, u2 max_stack, TRAPS) {
+  StackMapFrame* frame;
+  int offset;
+  VerificationType* locals = NULL;
+  u1 frame_type = _stream->get_u1(THREAD);
+  if (frame_type < 64) {
+    // same_frame
+    if (first) {
+      offset = frame_type;
+      // Can't share the locals array since that is updated by the verifier.
+      if (pre_frame->locals_size() > 0) {
+        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
+          THREAD, VerificationType, pre_frame->locals_size());
+      }
+    } else {
+      offset = pre_frame->offset() + frame_type + 1;
+      locals = pre_frame->locals();
+    }
+    frame = new StackMapFrame(
+      offset, pre_frame->flags(), pre_frame->locals_size(), 0,
+      max_locals, max_stack, locals, NULL, _verifier);
+    if (first && locals != NULL) {
+      frame->copy_locals(pre_frame);
+    }
+    return frame;
+  } 
+  if (frame_type < 128) {
+    // same_locals_1_stack_item_frame
+    if (first) {
+      offset = frame_type - 64;
+      // Can't share the locals array since that is updated by the verifier.
+      if (pre_frame->locals_size() > 0) {
+        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
+          THREAD, VerificationType, pre_frame->locals_size());
+      }
+    } else {
+      offset = pre_frame->offset() + frame_type - 63;
+      locals = pre_frame->locals();
+    }
+    VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(
+      THREAD, VerificationType, 2);
+    u2 stack_size = 1; 
+    stack[0] = parse_verification_type(NULL, CHECK_VERIFY_(_verifier, NULL));
+    if (stack[0].is_category2()) {
+      stack[1] = stack[0].to_category2_2nd();
+      stack_size = 2;
+    }
+    check_verification_type_array_size(
+      stack_size, max_stack, CHECK_VERIFY_(_verifier, NULL));
+    frame = new StackMapFrame(
+      offset, pre_frame->flags(), pre_frame->locals_size(), stack_size,
+      max_locals, max_stack, locals, stack, _verifier);
+    if (first && locals != NULL) {
+      frame->copy_locals(pre_frame);
+    }
+    return frame;
+  }
+
+  u2 offset_delta = _stream->get_u2(THREAD);
+
+  if (frame_type < SAME_LOCALS_1_STACK_ITEM_EXTENDED) {
+    // reserved frame types
+    _stream->stackmap_format_error(
+      "reserved frame type", CHECK_VERIFY_(_verifier, NULL));
+  }
+
+  if (frame_type == SAME_LOCALS_1_STACK_ITEM_EXTENDED) {
+    // same_locals_1_stack_item_frame_extended
+    if (first) {
+      offset = offset_delta;
+      // Can't share the locals array since that is updated by the verifier.
+      if (pre_frame->locals_size() > 0) {
+        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
+          THREAD, VerificationType, pre_frame->locals_size());
+      }
+    } else {
+      offset = pre_frame->offset() + offset_delta + 1;
+      locals = pre_frame->locals();
+    }
+    VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(
+      THREAD, VerificationType, 2);
+    u2 stack_size = 1; 
+    stack[0] = parse_verification_type(NULL, CHECK_VERIFY_(_verifier, NULL));
+    if (stack[0].is_category2()) {
+      stack[1] = stack[0].to_category2_2nd();
+      stack_size = 2;
+    }
+    check_verification_type_array_size(
+      stack_size, max_stack, CHECK_VERIFY_(_verifier, NULL));
+    frame = new StackMapFrame(
+      offset, pre_frame->flags(), pre_frame->locals_size(), stack_size,
+      max_locals, max_stack, locals, stack, _verifier);
+    if (first && locals != NULL) {
+      frame->copy_locals(pre_frame);
+    }
+    return frame;
+  }
+
+  if (frame_type <= SAME_EXTENDED) {
+    // chop_frame or same_frame_extended
+    locals = pre_frame->locals();
+    int length = pre_frame->locals_size();
+    int chops = SAME_EXTENDED - frame_type;
+    int new_length = length;
+    u1 flags = pre_frame->flags();
+    if (chops != 0) {
+      new_length = chop(locals, length, chops);
+      check_verification_type_array_size(
+        new_length, max_locals, CHECK_VERIFY_(_verifier, NULL));
+      // Recompute flags since uninitializedThis could have been chopped.
+      flags = 0;
+      for (int i=0; i<new_length; i++) {
+        if (locals[i].is_uninitialized_this()) {
+          flags |= FLAG_THIS_UNINIT;
+          break;
+        }
+      }
+    }
+    if (first) {
+      offset = offset_delta;
+      // Can't share the locals array since that is updated by the verifier.
+      if (new_length > 0) {
+        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
+          THREAD, VerificationType, new_length);
+      } else {
+        locals = NULL;
+      }
+    } else {
+      offset = pre_frame->offset() + offset_delta + 1;
+    }
+    frame = new StackMapFrame(
+      offset, flags, new_length, 0, max_locals, max_stack, 
+      locals, NULL, _verifier);
+    if (first && locals != NULL) {
+      frame->copy_locals(pre_frame);
+    }
+    return frame;
+  } else if (frame_type < SAME_EXTENDED + 4) {
+    // append_frame
+    int appends = frame_type - SAME_EXTENDED;
+    int real_length = pre_frame->locals_size();
+    int new_length = real_length + appends*2;
+    locals = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, VerificationType, new_length);
+    VerificationType* pre_locals = pre_frame->locals();
+    int i;
+    for (i=0; i<pre_frame->locals_size(); i++) {
+      locals[i] = pre_locals[i];
+    }
+    u1 flags = pre_frame->flags();
+    for (i=0; i<appends; i++) {
+      locals[real_length] = parse_verification_type(&flags, THREAD);
+      if (locals[real_length].is_category2()) {
+        locals[real_length + 1] = locals[real_length].to_category2_2nd();
+        ++real_length;
+      }
+      ++real_length;
+    }
+    check_verification_type_array_size(
+      real_length, max_locals, CHECK_VERIFY_(_verifier, NULL));
+    if (first) {
+      offset = offset_delta;
+    } else {
+      offset = pre_frame->offset() + offset_delta + 1;
+    }
+    frame = new StackMapFrame(
+      offset, flags, real_length, 0, max_locals, 
+      max_stack, locals, NULL, _verifier);
+    return frame;
+  }
+  if (frame_type == FULL) {
+    // full_frame
+    u1 flags = 0;
+    u2 locals_size = _stream->get_u2(THREAD);
+    int real_locals_size = 0;
+    if (locals_size > 0) {
+      locals = NEW_RESOURCE_ARRAY_IN_THREAD(
+        THREAD, VerificationType, locals_size*2);
+    }
+    int i;
+    for (i=0; i<locals_size; i++) {
+      locals[real_locals_size] = parse_verification_type(&flags, THREAD);
+      if (locals[real_locals_size].is_category2()) {
+        locals[real_locals_size + 1] = 
+          locals[real_locals_size].to_category2_2nd();
+        ++real_locals_size;
+      }
+      ++real_locals_size;
+    }
+    check_verification_type_array_size(
+      real_locals_size, max_locals, CHECK_VERIFY_(_verifier, NULL));
+    u2 stack_size = _stream->get_u2(THREAD);
+    int real_stack_size = 0;
+    VerificationType* stack = NULL;
+    if (stack_size > 0) {
+      stack = NEW_RESOURCE_ARRAY_IN_THREAD(
+        THREAD, VerificationType, stack_size*2);
+    }
+    for (i=0; i<stack_size; i++) {
+      stack[real_stack_size] = parse_verification_type(NULL, THREAD);
+      if (stack[real_stack_size].is_category2()) {
+        stack[real_stack_size + 1] = stack[real_stack_size].to_category2_2nd();
+        ++real_stack_size;
+      }
+      ++real_stack_size;
+    }
+    check_verification_type_array_size(
+      real_stack_size, max_stack, CHECK_VERIFY_(_verifier, NULL));
+    if (first) {
+      offset = offset_delta;
+    } else {
+      offset = pre_frame->offset() + offset_delta + 1;
+    }
+    frame = new StackMapFrame(
+      offset, flags, real_locals_size, real_stack_size,
+      max_locals, max_stack, locals, stack, _verifier);
+    return frame;
+  }
+
+  _stream->stackmap_format_error(
+    "reserved frame type", CHECK_VERIFY_(pre_frame->verifier(), NULL));
+  return NULL;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/stackMapTable.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,164 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)stackMapTable.hpp	1.21 07/05/05 17:06:57 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class StackMapReader;
+
+// StackMapTable class is the StackMap table used by type checker
+class StackMapTable : public StackObj {
+ private:
+  // Logically, the _frame_count (as well as many fields in the StackFrame)
+  // should be a u2, but if we defined the variable as that type it will
+  // be difficult to detect/recover from overflow or underflow conditions.
+  // Widening the type and making it signed will help detect these.
+  int32_t              _code_length;
+  int32_t              _frame_count;     // Stackmap frame count 
+  StackMapFrame**       _frame_array;
+
+ public:
+  StackMapTable(StackMapReader* reader, StackMapFrame* init_frame,
+                u2 max_locals, u2 max_stack,
+                char* code_data, int code_len, TRAPS);
+
+  inline int32_t get_frame_count() const { return _frame_count; }
+  inline int get_offset(int index) const { 
+    return _frame_array[index]->offset(); 
+  }
+
+  // Match and/or update current_frame to the frame in stackmap table with
+  // specified offset. Return true if the two frames match. 
+  bool match_stackmap(
+    StackMapFrame* current_frame, int32_t offset, 
+    bool match, bool update, TRAPS) const;
+  // Match and/or update current_frame to the frame in stackmap table with
+  // specified offset and frame index. Return true if the two frames match. 
+  bool match_stackmap(
+    StackMapFrame* current_frame, int32_t offset, int32_t frame_index,
+    bool match, bool update, TRAPS) const;
+
+  // Check jump instructions. Make sure there are no uninitialized 
+  // instances on backward branch.
+  void check_jump_target(StackMapFrame* frame, int32_t target, TRAPS) const;
+
+  // The following methods are only used inside this class.
+
+  // Returns the frame array index where the frame with offset is stored. 
+  int get_index_from_offset(int32_t offset) const;
+
+  // Make sure that there's no uninitialized object exist on backward branch.
+  void check_new_object(
+    const StackMapFrame* frame, int32_t target, TRAPS) const;
+
+  // Debugging
+  void print() const PRODUCT_RETURN;
+};
+
+class StackMapStream : StackObj {
+ private:
+  typeArrayHandle _data;
+  int _index;
+ public:
+  StackMapStream(typeArrayHandle ah) 
+    : _data(ah), _index(0) {
+  }
+  u1 get_u1(TRAPS) {
+    if (_data == NULL || _index >= _data->length()) {
+      stackmap_format_error("access beyond the end of attribute", CHECK_0);
+    }
+    return _data->byte_at(_index++);
+  }
+  u2 get_u2(TRAPS) {
+    if (_data == NULL || _index >= _data->length() - 1) {
+      stackmap_format_error("access beyond the end of attribute", CHECK_0);
+    }
+    u2 res = Bytes::get_Java_u2((u1*)_data->byte_at_addr(_index));
+    _index += 2;
+    return res;
+  }
+  bool at_end() {
+    return (_data == NULL) || (_index == _data->length());
+  }
+  static void stackmap_format_error(const char* msg, TRAPS);
+};
+
+class StackMapReader : StackObj {
+ private:
+  // information about the class and method 
+  constantPoolHandle  _cp;
+  ClassVerifier* _verifier;
+  StackMapStream* _stream;
+  char* _code_data;
+  int32_t _code_length;
+
+  // information get from the attribute
+  int32_t  _frame_count;       // frame count 
+
+  int32_t chop(VerificationType* locals, int32_t length, int32_t chops);
+  VerificationType parse_verification_type(u1* flags, TRAPS);
+  void check_verification_type_array_size(
+      int32_t size, int32_t max_size, TRAPS) {
+    if (size < 0 || size > max_size) {
+      // Since this error could be caused someone rewriting the method
+      // but not knowing to update the stackmap data, we call the the
+      // verifier's error method, which may not throw an exception and 
+      // failover to the old verifier instead.
+      _verifier->class_format_error(
+        "StackMapTable format error: bad type array size");
+    }
+  }
+
+  enum {
+    SAME_LOCALS_1_STACK_ITEM_EXTENDED = 247,
+    SAME_EXTENDED = 251,
+    FULL = 255
+  };
+
+ public:
+  // Constructor
+  StackMapReader(ClassVerifier* v, StackMapStream* stream, char* code_data,
+                 int32_t code_len, TRAPS) :
+                 _verifier(v), _stream(stream),
+                 _code_data(code_data), _code_length(code_len) {
+    methodHandle m = v->method();
+    if (m->has_stackmap_table()) {
+      _cp = constantPoolHandle(THREAD, m->constants());
+      _frame_count = _stream->get_u2(CHECK);
+    } else {
+      // There's no stackmap table present. Frame count and size are 0.
+      _frame_count = 0;
+    }
+  }
+
+  inline int32_t get_frame_count() const		{ return _frame_count; }
+  StackMapFrame* next(StackMapFrame* pre_frame, bool first,
+                      u2 max_locals, u2 max_stack, TRAPS);
+
+  void check_end(TRAPS) {
+    if (!_stream->at_end()) {
+      StackMapStream::stackmap_format_error("wrong attribute size", CHECK);
+    }
+  }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,487 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)symbolTable.cpp	1.69 07/05/05 17:05:55 JVM"
+#endif
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_symbolTable.cpp.incl"
+
+// --------------------------------------------------------------------------
+
+SymbolTable* SymbolTable::_the_table = NULL;
+
+// Lookup a symbol in a bucket.
+
+symbolOop SymbolTable::lookup(int index, const char* name,
+                              int len, unsigned int hash) {
+  for (HashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
+    if (e->hash() == hash) {
+      symbolOop sym = symbolOop(e->literal());
+      if (sym->equals(name, len)) {
+        return sym;
+      }
+    }
+  }
+  return NULL;
+}
+
+
+// We take care not to be blocking while holding the
+// SymbolTable_lock. Otherwise, the system might deadlock, since the
+// symboltable is used during compilation (VM_thread) The lock free
+// synchronization is simplified by the fact that we do not delete
+// entries in the symbol table during normal execution (only during
+// safepoints).
+
+symbolOop SymbolTable::lookup(const char* name, int len, TRAPS) {  
+  unsigned int hashValue = hash_symbol(name, len);
+  int index = the_table()->hash_to_index(hashValue);
+
+  symbolOop s = the_table()->lookup(index, name, len, hashValue);
+
+  // Found
+  if (s != NULL) return s;
+  
+  // Otherwise, add to symbol to table
+  return the_table()->basic_add(index, (u1*)name, len, hashValue, CHECK_NULL);
+}
+
+symbolOop SymbolTable::lookup(symbolHandle sym, int begin, int end, TRAPS) {
+  char* buffer;
+  int index, len;
+  unsigned int hashValue;
+  char* name;
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+
+    name = (char*)sym->base() + begin;
+    len = end - begin;
+    hashValue = hash_symbol(name, len);
+    index = the_table()->hash_to_index(hashValue);
+    symbolOop s = the_table()->lookup(index, name, len, hashValue);
+  
+    // Found
+    if (s != NULL) return s;
+  }
+   
+  // Otherwise, add to symbol to table. Copy to a C string first.
+  char stack_buf[128];
+  ResourceMark rm(THREAD);
+  if (len <= 128) {
+    buffer = stack_buf;
+  } else {
+    buffer = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, len);
+  }
+  for (int i=0; i<len; i++) {
+    buffer[i] = name[i];
+  }
+  // Make sure there is no safepoint in the code above since name can't move.
+  // We can't include the code in No_Safepoint_Verifier because of the
+  // ResourceMark.
+
+  return the_table()->basic_add(index, (u1*)buffer, len, hashValue, CHECK_NULL);
+}
+
+symbolOop SymbolTable::lookup_only(const char* name, int len,
+                                   unsigned int& hash) {  
+  hash = hash_symbol(name, len);
+  int index = the_table()->hash_to_index(hash);
+
+  return the_table()->lookup(index, name, len, hash);
+}
+
+void SymbolTable::add(constantPoolHandle cp, int names_count,
+                      const char** names, int* lengths, int* cp_indices,
+                      unsigned int* hashValues, TRAPS) {
+  SymbolTable* table = the_table();
+  bool added = table->basic_add(cp, names_count, names, lengths,
+                                cp_indices, hashValues, CHECK);
+  if (!added) {
+    // do it the hard way
+    for (int i=0; i<names_count; i++) {
+      int index = table->hash_to_index(hashValues[i]);
+      symbolOop sym = table->basic_add(index, (u1*)names[i], lengths[i],
+                                       hashValues[i], CHECK);
+      cp->symbol_at_put(cp_indices[i], sym);
+    }
+  }
+}
+
+// Needed for preloading classes in signatures when compiling.
+
+symbolOop SymbolTable::probe(const char* name, int len) {
+  unsigned int hashValue = hash_symbol(name, len);
+  int index = the_table()->hash_to_index(hashValue);
+  return the_table()->lookup(index, name, len, hashValue);
+}
+
+
+symbolOop SymbolTable::basic_add(int index, u1 *name, int len,
+                                 unsigned int hashValue, TRAPS) {  
+  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+         "proposed name of symbol must be stable");
+
+  // We assume that lookup() has been called already, that it failed,
+  // and symbol was not found.  We create the symbol here.
+  symbolKlass* sk  = (symbolKlass*) Universe::symbolKlassObj()->klass_part();
+  symbolOop s_oop = sk->allocate_symbol(name, len, CHECK_NULL);
+  symbolHandle sym (THREAD, s_oop);
+
+  // Allocation must be done before grapping the SymbolTable_lock lock
+  MutexLocker ml(SymbolTable_lock, THREAD);
+
+  assert(sym->equals((char*)name, len), "symbol must be properly initialized");
+
+  // Since look-up was done lock-free, we need to check if another
+  // thread beat us in the race to insert the symbol.
+
+  symbolOop test = lookup(index, (char*)name, len, hashValue);
+  if (test != NULL) {
+    // A race occured and another thread introduced the symbol, this one
+    // will be dropped and collected.
+    return test;
+  }  
+
+  HashtableEntry* entry = new_entry(hashValue, sym());
+  add_entry(index, entry);
+  return sym();
+}
+
+bool SymbolTable::basic_add(constantPoolHandle cp, int names_count,
+                            const char** names, int* lengths,
+                            int* cp_indices, unsigned int* hashValues,
+                            TRAPS) {
+  symbolKlass* sk  = (symbolKlass*) Universe::symbolKlassObj()->klass_part();
+  symbolOop sym_oops[symbol_alloc_batch_size];
+  bool allocated = sk->allocate_symbols(names_count, names, lengths,
+                                        sym_oops, CHECK_false);
+  if (!allocated) {
+    return false;
+  }
+  symbolHandle syms[symbol_alloc_batch_size];
+  int i;
+  for (i=0; i<names_count; i++) {
+    syms[i] = symbolHandle(THREAD, sym_oops[i]);
+  }
+
+  // Allocation must be done before grabbing the SymbolTable_lock lock
+  MutexLocker ml(SymbolTable_lock, THREAD);
+
+  for (i=0; i<names_count; i++) {
+    assert(syms[i]->equals(names[i], lengths[i]), "symbol must be properly initialized");
+    // Since look-up was done lock-free, we need to check if another
+    // thread beat us in the race to insert the symbol.
+    int index = hash_to_index(hashValues[i]);
+    symbolOop test = lookup(index, names[i], lengths[i], hashValues[i]);
+    if (test != NULL) {
+      // A race occured and another thread introduced the symbol, this one
+      // will be dropped and collected. Use test instead.
+      cp->symbol_at_put(cp_indices[i], test);
+    } else {
+      symbolOop sym = syms[i]();
+      HashtableEntry* entry = new_entry(hashValues[i], sym);
+      add_entry(index, entry);
+      cp->symbol_at_put(cp_indices[i], sym);
+    }
+  }
+
+  return true;
+}
+
+
+void SymbolTable::verify() {
+  for (int i = 0; i < the_table()->table_size(); ++i) {
+    HashtableEntry* p = the_table()->bucket(i);
+    for ( ; p != NULL; p = p->next()) {
+      symbolOop s = symbolOop(p->literal());
+      guarantee(s != NULL, "symbol is NULL");
+      s->verify();
+      guarantee(s->is_perm(), "symbol not in permspace");
+      unsigned int h = hash_symbol((char*)s->bytes(), s->utf8_length());
+      guarantee(p->hash() == h, "broken hash in symbol table entry");
+      guarantee(the_table()->hash_to_index(h) == i,
+                "wrong index in symbol table");
+    }
+  }
+}
+
+
+//---------------------------------------------------------------------------
+// Non-product code
+
+#ifndef PRODUCT
+
+void SymbolTable::print_histogram() {
+  MutexLocker ml(SymbolTable_lock);
+  const int results_length = 100;
+  int results[results_length];
+  int i,j;
+  
+  // initialize results to zero
+  for (j = 0; j < results_length; j++) {
+    results[j] = 0;
+  }
+
+  int total = 0;
+  int max_symbols = 0;
+  int out_of_range = 0;
+  for (i = 0; i < the_table()->table_size(); i++) {
+    HashtableEntry* p = the_table()->bucket(i);
+    for ( ; p != NULL; p = p->next()) {
+      int counter = symbolOop(p->literal())->utf8_length();
+      total += counter;
+      if (counter < results_length) {
+        results[counter]++;
+      } else {
+        out_of_range++;
+      }
+      max_symbols = MAX2(max_symbols, counter);
+    }
+  }
+  tty->print_cr("Symbol Table:");
+  tty->print_cr("%8s %5d", "Total  ", total);
+  tty->print_cr("%8s %5d", "Maximum", max_symbols);
+  tty->print_cr("%8s %3.2f", "Average",
+	  ((float) total / (float) the_table()->table_size()));
+  tty->print_cr("%s", "Histogram:");
+  tty->print_cr(" %s %29s", "Length", "Number chains that length");
+  for (i = 0; i < results_length; i++) {
+    if (results[i] > 0) {
+      tty->print_cr("%6d %10d", i, results[i]);
+    }
+  }
+  int line_length = 70;    
+  tty->print_cr("%s %30s", " Length", "Number chains that length");
+  for (i = 0; i < results_length; i++) {
+    if (results[i] > 0) {
+      tty->print("%4d", i);
+      for (j = 0; (j < results[i]) && (j < line_length);  j++) {
+        tty->print("%1s", "*");
+      }
+      if (j == line_length) {
+        tty->print("%1s", "+");
+      }
+      tty->cr();
+    }
+  }  
+  tty->print_cr(" %s %d: %d\n", "Number chains longer than",
+	            results_length, out_of_range);
+}
+
+#endif // PRODUCT
+
+// --------------------------------------------------------------------------
+
+#ifdef ASSERT
+class StableMemoryChecker : public StackObj {
+  enum { _bufsize = wordSize*4 };
+
+  address _region;
+  jint    _size;
+  u1      _save_buf[_bufsize];
+
+  int sample(u1* save_buf) {
+    if (_size <= _bufsize) {
+      memcpy(save_buf, _region, _size);
+      return _size;
+    } else {
+      // copy head and tail
+      memcpy(&save_buf[0],          _region,                      _bufsize/2);
+      memcpy(&save_buf[_bufsize/2], _region + _size - _bufsize/2, _bufsize/2);
+      return (_bufsize/2)*2;
+    }
+  }
+
+ public:
+  StableMemoryChecker(const void* region, jint size) {
+    _region = (address) region;
+    _size   = size;
+    sample(_save_buf);
+  }
+
+  bool verify() {
+    u1 check_buf[sizeof(_save_buf)];
+    int check_size = sample(check_buf);
+    return (0 == memcmp(_save_buf, check_buf, check_size));
+  }
+
+  void set_region(const void* region) { _region = (address) region; }
+};
+#endif
+
+
+// --------------------------------------------------------------------------
+
+
+// Compute the hash value for a java.lang.String object which would
+// contain the characters passed in. This hash value is used for at
+// least two purposes.
+//
+// (a) As the hash value used by the StringTable for bucket selection
+//     and comparison (stored in the HashtableEntry structures).  This
+//     is used in the String.intern() method.
+//
+// (b) As the hash value used by the String object itself, in
+//     String.hashCode().  This value is normally calculate in Java code
+//     in the String.hashCode method(), but is precomputed for String
+//     objects in the shared archive file.
+//
+//     For this reason, THIS ALGORITHM MUST MATCH String.hashCode().
+
+int StringTable::hash_string(jchar* s, int len) {
+  unsigned h = 0;
+  while (len-- > 0) {
+    h = 31*h + (unsigned) *s;
+    s++;
+  }
+  return h;
+}
+
+
+StringTable* StringTable::_the_table = NULL;
+
+oop StringTable::lookup(int index, jchar* name,
+                        int len, unsigned int hash) {
+  for (HashtableEntry* l = bucket(index); l != NULL; l = l->next()) {
+    if (l->hash() == hash) {
+      if (java_lang_String::equals(l->literal(), name, len)) {
+        return l->literal();
+      }
+    }
+  }
+  return NULL;
+}
+
+
+oop StringTable::basic_add(int index, Handle string_or_null, jchar* name,
+                           int len, unsigned int hashValue, TRAPS) {  
+  debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
+  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+         "proposed name of symbol must be stable");
+
+  Handle string;
+  // try to reuse the string if possible
+  if (!string_or_null.is_null() && string_or_null()->is_perm()) {
+    string = string_or_null;
+  } else {
+    string = java_lang_String::create_tenured_from_unicode(name, len, CHECK_NULL);
+  }
+
+  // Allocation must be done before grapping the SymbolTable_lock lock
+  MutexLocker ml(StringTable_lock, THREAD);
+
+  assert(java_lang_String::equals(string(), name, len),
+         "string must be properly initialized");
+
+  // Since look-up was done lock-free, we need to check if another
+  // thread beat us in the race to insert the symbol.
+
+  oop test = lookup(index, name, len, hashValue); // calls lookup(u1*, int)
+  if (test != NULL) {
+    // Entry already added
+    return test;
+  }  
+
+  HashtableEntry* entry = new_entry(hashValue, string());
+  add_entry(index, entry);
+  return string();
+}
+
+
+oop StringTable::lookup(symbolOop symbol) {
+  ResourceMark rm;
+  int length;
+  jchar* chars = symbol->as_unicode(length);
+  unsigned int hashValue = hash_string(chars, length);
+  int index = the_table()->hash_to_index(hashValue);
+  return the_table()->lookup(index, chars, length, hashValue);
+}
+
+
+oop StringTable::intern(Handle string_or_null, jchar* name,
+                        int len, TRAPS) {
+  unsigned int hashValue = hash_string(name, len);
+  int index = the_table()->hash_to_index(hashValue);
+  oop string = the_table()->lookup(index, name, len, hashValue);
+
+  // Found
+  if (string != NULL) return string;
+  
+  // Otherwise, add to symbol to table
+  return the_table()->basic_add(index, string_or_null, name, len,
+                                hashValue, CHECK_NULL);  
+}
+
+oop StringTable::intern(symbolOop symbol, TRAPS) {
+  if (symbol == NULL) return NULL;
+  ResourceMark rm(THREAD);
+  int length;
+  jchar* chars = symbol->as_unicode(length);
+  Handle string;
+  oop result = intern(string, chars, length, CHECK_NULL);
+  return result;
+}
+
+
+oop StringTable::intern(oop string, TRAPS)
+{
+  if (string == NULL) return NULL;
+  ResourceMark rm(THREAD);
+  int length;
+  Handle h_string (THREAD, string);
+  jchar* chars = java_lang_String::as_unicode_string(string, length);
+  oop result = intern(h_string, chars, length, CHECK_NULL);
+  return result;
+}
+
+
+oop StringTable::intern(const char* utf8_string, TRAPS) {
+  if (utf8_string == NULL) return NULL;
+  ResourceMark rm(THREAD);
+  int length = UTF8::unicode_length(utf8_string);
+  jchar* chars = NEW_RESOURCE_ARRAY(jchar, length);
+  UTF8::convert_to_unicode(utf8_string, chars, length);
+  Handle string;
+  oop result = intern(string, chars, length, CHECK_NULL);
+  return result;
+}
+
+void StringTable::verify() {
+  for (int i = 0; i < the_table()->table_size(); ++i) {
+    HashtableEntry* p = the_table()->bucket(i);
+    for ( ; p != NULL; p = p->next()) {
+      oop s = p->literal();
+      guarantee(s != NULL, "interned string is NULL");
+      guarantee(s->is_perm(), "interned string not in permspace");
+
+      int length;
+      jchar* chars = java_lang_String::as_unicode_string(s, length);
+      unsigned int h = hash_string(chars, length);
+      guarantee(p->hash() == h, "broken hash in string table entry");
+      guarantee(the_table()->hash_to_index(h) == i,
+                "wrong index in string table");
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,216 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)symbolTable.hpp	1.48 07/05/05 17:05:56 JVM"
+#endif
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// The symbol table holds all symbolOops and corresponding interned strings.
+// symbolOops and literal strings should be canonicalized.
+//
+// The interned strings are created lazily.
+//
+// It is implemented as an open hash table with a fixed number of buckets.
+//
+// %note:
+//  - symbolTableEntrys are allocated in blocks to reduce the space overhead.
+
+class BoolObjectClosure;
+
+
+class SymbolTable : public Hashtable {
+  friend class VMStructs;
+
+private:
+  // The symbol table
+  static SymbolTable* _the_table;
+
+  // Adding elements    
+  symbolOop basic_add(int index, u1* name, int len,
+                      unsigned int hashValue, TRAPS);
+  bool basic_add(constantPoolHandle cp, int names_count,
+                 const char** names, int* lengths, int* cp_indices,
+                 unsigned int* hashValues, TRAPS);
+
+  // Table size
+  enum {
+    symbol_table_size = 20011
+  };
+
+  symbolOop lookup(int index, const char* name, int len, unsigned int hash);
+
+  SymbolTable()
+    : Hashtable(symbol_table_size, sizeof (HashtableEntry)) {}
+
+  SymbolTable(HashtableBucket* t, int number_of_entries)
+    : Hashtable(symbol_table_size, sizeof (HashtableEntry), t,
+                number_of_entries) {}
+
+
+public:
+  enum {
+    symbol_alloc_batch_size = 8
+  };
+
+  // The symbol table
+  static SymbolTable* the_table() { return _the_table; }
+
+  static void create_table() {
+    assert(_the_table == NULL, "One symbol table allowed.");
+    _the_table = new SymbolTable();
+  }
+
+  static void create_table(HashtableBucket* t, int length,
+                           int number_of_entries) {
+    assert(_the_table == NULL, "One symbol table allowed.");
+    assert(length == symbol_table_size * sizeof(HashtableBucket),
+           "bad shared symbol size.");
+    _the_table = new SymbolTable(t, number_of_entries);
+  }
+
+  static symbolOop lookup(const char* name, int len, TRAPS);
+  // lookup only, won't add. Also calculate hash.
+  static symbolOop lookup_only(const char* name, int len, unsigned int& hash);
+  // Only copy to C string to be added if lookup failed.
+  static symbolOop lookup(symbolHandle sym, int begin, int end, TRAPS);
+
+  static void add(constantPoolHandle cp, int names_count,
+                  const char** names, int* lengths, int* cp_indices,
+                  unsigned int* hashValues, TRAPS);
+
+  // GC support
+  //   Delete pointers to otherwise-unreachable objects.
+  static void unlink(BoolObjectClosure* cl) {
+    the_table()->Hashtable::unlink(cl);
+  }
+
+  // Invoke "f->do_oop" on the locations of all oops in the table.
+  static void oops_do(OopClosure* f) {
+    the_table()->Hashtable::oops_do(f);
+  }
+
+  // Symbol lookup
+  static symbolOop lookup(int index, const char* name, int len, TRAPS);
+
+  // Needed for preloading classes in signatures when compiling.
+  // Returns the symbol is already present in symbol table, otherwise
+  // NULL.  NO ALLOCATION IS GUARANTEED!
+  static symbolOop probe(const char* name, int len);
+
+  // Histogram
+  static void print_histogram()     PRODUCT_RETURN;
+
+  // Debugging
+  static void verify();
+
+  // Sharing
+  static void copy_buckets(char** top, char*end) {
+    the_table()->Hashtable::copy_buckets(top, end);
+  }
+  static void copy_table(char** top, char*end) {
+    the_table()->Hashtable::copy_table(top, end);
+  }
+  static void reverse(void* boundary = NULL) {
+    ((Hashtable*)the_table())->reverse(boundary);
+  }
+};
+
+
+class StringTable : public Hashtable {
+  friend class VMStructs;
+
+private:
+  // The string table
+  static StringTable* _the_table;
+
+  static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS);
+  oop basic_add(int index, Handle string_or_null, jchar* name, int len,
+                unsigned int hashValue, TRAPS);
+
+  // Table size
+  enum {
+    string_table_size = 1009
+  };
+
+  oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
+
+  StringTable() : Hashtable(string_table_size, sizeof (HashtableEntry)) {}
+
+  StringTable(HashtableBucket* t, int number_of_entries)
+    : Hashtable(string_table_size, sizeof (HashtableEntry), t,
+                number_of_entries) {}
+
+public:
+  // The string table
+  static StringTable* the_table() { return _the_table; }
+
+  static void create_table() {
+    assert(_the_table == NULL, "One string table allowed.");
+    _the_table = new StringTable();
+  }
+
+  static void create_table(HashtableBucket* t, int length,
+                           int number_of_entries) {
+    assert(_the_table == NULL, "One string table allowed.");
+    assert(length == string_table_size * sizeof(HashtableBucket),
+           "bad shared string size.");
+    _the_table = new StringTable(t, number_of_entries);
+  }
+
+
+  static int hash_string(jchar* s, int len);
+
+
+  // GC support
+  //   Delete pointers to otherwise-unreachable objects.
+  static void unlink(BoolObjectClosure* cl) {
+    the_table()->Hashtable::unlink(cl);
+  }
+
+  // Invoke "f->do_oop" on the locations of all oops in the table.
+  static void oops_do(OopClosure* f) {
+    the_table()->Hashtable::oops_do(f);
+  }
+
+  // Probing
+  static oop lookup(symbolOop symbol);
+
+  // Interning
+  static oop intern(symbolOop symbol, TRAPS);
+  static oop intern(oop string, TRAPS);
+  static oop intern(const char *utf8_string, TRAPS);
+
+  // Debugging
+  static void verify();
+
+  // Sharing
+  static void copy_buckets(char** top, char*end) {
+    the_table()->Hashtable::copy_buckets(top, end);
+  }
+  static void copy_table(char** top, char*end) {
+    the_table()->Hashtable::copy_table(top, end);
+  }
+  static void reverse() {
+    ((BasicHashtable*)the_table())->reverse();
+  }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,2449 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)systemDictionary.cpp	1.357 07/05/17 15:50:33 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_systemDictionary.cpp.incl"
+
+
+Dictionary*       SystemDictionary::_dictionary = NULL;
+PlaceholderTable* SystemDictionary::_placeholders = NULL;
+Dictionary*       SystemDictionary::_shared_dictionary = NULL;
+LoaderConstraintTable* SystemDictionary::_loader_constraints = NULL;
+ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL;
+
+
+int         SystemDictionary::_number_of_modifications = 0;
+
+oop         SystemDictionary::_system_loader_lock_obj     =  NULL;
+
+klassOop    SystemDictionary::_object_klass               =  NULL;
+klassOop    SystemDictionary::_string_klass               =  NULL;
+klassOop    SystemDictionary::_class_klass                =  NULL;
+klassOop    SystemDictionary::_cloneable_klass            =  NULL;
+klassOop    SystemDictionary::_classloader_klass          =  NULL;
+klassOop    SystemDictionary::_serializable_klass         =  NULL;
+klassOop    SystemDictionary::_system_klass               =  NULL;
+
+klassOop    SystemDictionary::_throwable_klass            =  NULL;
+klassOop    SystemDictionary::_error_klass                =  NULL;
+klassOop    SystemDictionary::_threaddeath_klass          =  NULL;
+klassOop    SystemDictionary::_exception_klass            =  NULL;
+klassOop    SystemDictionary::_runtime_exception_klass    =  NULL;
+klassOop    SystemDictionary::_classNotFoundException_klass = NULL;
+klassOop    SystemDictionary::_noClassDefFoundError_klass = NULL;
+klassOop    SystemDictionary::_linkageError_klass         = NULL;
+klassOop    SystemDictionary::_classCastException_klass   =  NULL;
+klassOop    SystemDictionary::_arrayStoreException_klass  =  NULL;
+klassOop    SystemDictionary::_virtualMachineError_klass  =  NULL;
+klassOop    SystemDictionary::_outOfMemoryError_klass     =  NULL;
+klassOop    SystemDictionary::_StackOverflowError_klass   =  NULL;
+klassOop    SystemDictionary::_illegalMonitorStateException_klass   =  NULL;
+klassOop    SystemDictionary::_protectionDomain_klass     =  NULL;
+klassOop    SystemDictionary::_AccessControlContext_klass = NULL;
+
+klassOop    SystemDictionary::_reference_klass            =  NULL;
+klassOop    SystemDictionary::_soft_reference_klass       =  NULL;
+klassOop    SystemDictionary::_weak_reference_klass       =  NULL;
+klassOop    SystemDictionary::_final_reference_klass      =  NULL;
+klassOop    SystemDictionary::_phantom_reference_klass    =  NULL;
+klassOop    SystemDictionary::_finalizer_klass            =  NULL;
+
+klassOop    SystemDictionary::_thread_klass               =  NULL;
+klassOop    SystemDictionary::_threadGroup_klass          =  NULL;
+klassOop    SystemDictionary::_properties_klass           =  NULL;
+klassOop    SystemDictionary::_reflect_accessible_object_klass =  NULL;
+klassOop    SystemDictionary::_reflect_field_klass        =  NULL;
+klassOop    SystemDictionary::_reflect_method_klass       =  NULL;
+klassOop    SystemDictionary::_reflect_constructor_klass  =  NULL;
+klassOop    SystemDictionary::_reflect_magic_klass        =  NULL;
+klassOop    SystemDictionary::_reflect_method_accessor_klass = NULL;
+klassOop    SystemDictionary::_reflect_constructor_accessor_klass = NULL;
+klassOop    SystemDictionary::_reflect_delegating_classloader_klass = NULL;
+klassOop    SystemDictionary::_reflect_constant_pool_klass =  NULL;
+klassOop    SystemDictionary::_reflect_unsafe_static_field_accessor_impl_klass = NULL;
+
+klassOop    SystemDictionary::_vector_klass               =  NULL;
+klassOop    SystemDictionary::_hashtable_klass            =  NULL;
+klassOop    SystemDictionary::_stringBuffer_klass         =  NULL;
+
+klassOop    SystemDictionary::_stackTraceElement_klass    =  NULL;
+
+klassOop    SystemDictionary::_java_nio_Buffer_klass      =  NULL;
+
+klassOop    SystemDictionary::_sun_misc_AtomicLongCSImpl_klass = NULL;
+
+klassOop    SystemDictionary::_boolean_klass              =  NULL;
+klassOop    SystemDictionary::_char_klass                 =  NULL;
+klassOop    SystemDictionary::_float_klass                =  NULL;
+klassOop    SystemDictionary::_double_klass               =  NULL;
+klassOop    SystemDictionary::_byte_klass                 =  NULL;
+klassOop    SystemDictionary::_short_klass                =  NULL;
+klassOop    SystemDictionary::_int_klass                  =  NULL;
+klassOop    SystemDictionary::_long_klass                 =  NULL;
+klassOop    SystemDictionary::_box_klasses[T_VOID+1]      =  { NULL /*, NULL...*/ };
+
+oop         SystemDictionary::_int_mirror                 =  NULL;
+oop         SystemDictionary::_float_mirror               =  NULL;
+oop         SystemDictionary::_double_mirror              =  NULL;
+oop         SystemDictionary::_byte_mirror                =  NULL;
+oop         SystemDictionary::_bool_mirror                =  NULL;
+oop         SystemDictionary::_char_mirror                =  NULL;
+oop         SystemDictionary::_long_mirror                =  NULL;
+oop         SystemDictionary::_short_mirror               =  NULL;
+oop         SystemDictionary::_void_mirror                =  NULL;
+oop         SystemDictionary::_mirrors[T_VOID+1]          =  { NULL /*, NULL...*/ };
+
+oop         SystemDictionary::_java_system_loader         =  NULL;
+
+bool        SystemDictionary::_has_loadClassInternal      =  false;
+bool        SystemDictionary::_has_checkPackageAccess     =  false;
+
+// lazily initialized klass variables
+volatile klassOop    SystemDictionary::_abstract_ownable_synchronizer_klass = NULL;
+
+
+// ----------------------------------------------------------------------------
+// Java-level SystemLoader
+
+oop SystemDictionary::java_system_loader() {
+  return _java_system_loader;
+}
+
+void SystemDictionary::compute_java_system_loader(TRAPS) {
+  KlassHandle system_klass(THREAD, _classloader_klass);    
+  JavaValue result(T_OBJECT);
+  JavaCalls::call_static(&result, 
+                         KlassHandle(THREAD, _classloader_klass),
+                         vmSymbolHandles::getSystemClassLoader_name(),
+                         vmSymbolHandles::void_classloader_signature(),
+                         CHECK);
+    
+  _java_system_loader = (oop)result.get_jobject();    
+}
+
+
+// ----------------------------------------------------------------------------
+// debugging
+
+#ifdef ASSERT
+
+// return true if class_name contains no '.' (internal format is '/')
+bool SystemDictionary::is_internal_format(symbolHandle class_name) {
+  if (class_name.not_null()) {
+    ResourceMark rm;
+    char* name = class_name->as_C_string();
+    return strchr(name, '.') == NULL;
+  } else {
+    return true;
+  }
+}
+
+#endif
+
+// ----------------------------------------------------------------------------
+// Resolving of classes
+
+// Forwards to resolve_or_null
+
+klassOop SystemDictionary::resolve_or_fail(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS) {  
+  klassOop klass = resolve_or_null(class_name, class_loader, protection_domain, THREAD);
+  if (HAS_PENDING_EXCEPTION || klass == NULL) {
+    KlassHandle k_h(THREAD, klass);
+    // can return a null klass
+    klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD);
+  }
+  return klass;
+}
+
+klassOop SystemDictionary::handle_resolution_exception(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS) {
+  if (HAS_PENDING_EXCEPTION) {
+    // If we have a pending exception we forward it to the caller, unless throw_error is true,
+    // in which case we have to check whether the pending exception is a ClassNotFoundException,
+    // and if so convert it to a NoClassDefFoundError
+    // And chain the original ClassNotFoundException
+    if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass())) {
+      ResourceMark rm(THREAD);
+      assert(klass_h() == NULL, "Should not have result with exception pending");
+      Handle e(THREAD, PENDING_EXCEPTION);
+      CLEAR_PENDING_EXCEPTION;
+      THROW_MSG_CAUSE_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string(), e);
+    } else {
+      return NULL; 
+    }
+  }
+  // Class not found, throw appropriate error or exception depending on value of throw_error
+  if (klass_h() == NULL) {
+    ResourceMark rm(THREAD);
+    if (throw_error) {
+      THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string());
+    } else {      
+      THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());      
+    }
+  }
+  return (klassOop)klass_h(); 
+}
+
+
+klassOop SystemDictionary::resolve_or_fail(symbolHandle class_name,
+                                           bool throw_error, TRAPS)
+{
+  return resolve_or_fail(class_name, Handle(), Handle(), throw_error, THREAD);
+}
+
+
+// Forwards to resolve_instance_class_or_null
+
+klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS) {  
+  assert(!THREAD->is_Compiler_thread(), "Can not load classes with the Compiler thread");
+  if (FieldType::is_array(class_name())) {
+    return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+  } else {
+    return resolve_instance_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+  }
+}
+
+klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, TRAPS) {  
+  return resolve_or_null(class_name, Handle(), Handle(), THREAD);
+}
+
+// Forwards to resolve_instance_class_or_null
+
+klassOop SystemDictionary::resolve_array_class_or_null(symbolHandle class_name,
+                                                       Handle class_loader, 
+                                                       Handle protection_domain,
+                                                       TRAPS) {  
+  assert(FieldType::is_array(class_name()), "must be array");
+  jint dimension;
+  symbolOop object_key;
+  klassOop k = NULL;  
+  // dimension and object_key are assigned as a side-effect of this call
+  BasicType t = FieldType::get_array_info(class_name(), 
+                                          &dimension, 
+                                          &object_key, 
+                                          CHECK_NULL);
+
+  if (t == T_OBJECT) {
+    symbolHandle h_key(THREAD, object_key);
+    // naked oop "k" is OK here -- we assign back into it
+    k = SystemDictionary::resolve_instance_class_or_null(h_key, 
+                                                         class_loader, 
+                                                         protection_domain, 
+                                                         CHECK_NULL);
+    if (k != NULL) {
+      k = Klass::cast(k)->array_klass(dimension, CHECK_NULL);
+    }
+  } else {
+    k = Universe::typeArrayKlassObj(t);
+    k = typeArrayKlass::cast(k)->array_klass(dimension, CHECK_NULL);
+  }
+  return k;
+}
+
+
+// Must be called for any super-class or super-interface resolution
+// during class definition to allow class circularity checking
+// super-interface callers: 
+//    parse_interfaces - for defineClass & jvmtiRedefineClasses
+// super-class callers:
+//   ClassFileParser - for defineClass & jvmtiRedefineClasses
+//   load_shared_class - while loading a class from shared archive
+//   resolve_instance_class_or_fail:
+//      when resolving a class that has an existing placeholder with
+//      a saved superclass [i.e. a defineClass is currently in progress]
+//      if another thread is trying to resolve the class, it must do
+//      super-class checks on its own thread to catch class circularity
+// This last call is critical in class circularity checking for cases
+// where classloading is delegated to different threads and the
+// classloader lock is released.
+// Take the case: Base->Super->Base
+//   1. If thread T1 tries to do a defineClass of class Base
+//    resolve_super_or_fail creates placeholder: T1, Base (super Super)
+//   2. resolve_instance_class_or_null does not find SD or placeholder for Super
+//    so it tries to load Super
+//   3. If we load the class internally, or user classloader uses same thread
+//      loadClassFromxxx or defineClass via parseClassFile Super ...
+//      3.1 resolve_super_or_fail creates placeholder: T1, Super (super Base) 
+//      3.3 resolve_instance_class_or_null Base, finds placeholder for Base
+//      3.4 calls resolve_super_or_fail Base
+//      3.5 finds T1,Base -> throws class circularity
+//OR 4. If T2 tries to resolve Super via defineClass Super ...
+//      4.1 resolve_super_or_fail creates placeholder: T2, Super (super Base) 
+//      4.2 resolve_instance_class_or_null Base, finds placeholder for Base (super Super)
+//      4.3 calls resolve_super_or_fail Super in parallel on own thread T2
+//      4.4 finds T2, Super -> throws class circularity
+// Must be called, even if superclass is null, since this is
+// where the placeholder entry is created which claims this
+// thread is loading this class/classloader.
+klassOop SystemDictionary::resolve_super_or_fail(symbolHandle child_name,
+                                                 symbolHandle class_name,
+                                                 Handle class_loader,
+                                                 Handle protection_domain,
+                                                 bool is_superclass,
+                                                 TRAPS) {
+
+  // Double-check, if child class is already loaded, just return super-class,interface
+  // Don't add a placedholder if already loaded, i.e. already in system dictionary
+  // Make sure there's a placeholder for the *child* before resolving.
+  // Used as a claim that this thread is currently loading superclass/classloader
+  // Used here for ClassCircularity checks and also for heap verification
+  // (every instanceKlass in the heap needs to be in the system dictionary
+  // or have a placeholder).
+  // Must check ClassCircularity before checking if super class is already loaded
+  //
+  // We might not already have a placeholder if this child_name was
+  // first seen via resolve_from_stream (jni_DefineClass or JVM_DefineClass);
+  // the name of the class might not be known until the stream is actually
+  // parsed.
+  // Bugs 4643874, 4715493
+  // compute_hash can have a safepoint
+
+  unsigned int d_hash = dictionary()->compute_hash(child_name, class_loader);
+  int d_index = dictionary()->hash_to_index(d_hash);
+  unsigned int p_hash = placeholders()->compute_hash(child_name, class_loader);
+  int p_index = placeholders()->hash_to_index(p_hash);
+  // can't throw error holding a lock
+  bool child_already_loaded = false;
+  bool throw_circularity_error = false;
+  {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    klassOop childk = find_class(d_index, d_hash, child_name, class_loader);
+    klassOop quicksuperk;
+    // to support // loading: if child done loading, just return superclass
+    // if class_name, & class_loader don't match:
+    // if initial define, SD update will give LinkageError
+    // if redefine: compare_class_versions will give HIERARCHY_CHANGED
+    // so we don't throw an exception here.
+    // see: nsk redefclass014 & java.lang.instrument Instrument032
+    if ((childk != NULL ) && (is_superclass) &&
+       ((quicksuperk = instanceKlass::cast(childk)->super()) != NULL) &&
+      
+         ((Klass::cast(quicksuperk)->name() == class_name()) && 
+            (Klass::cast(quicksuperk)->class_loader()  == class_loader()))) {
+           return quicksuperk;
+    } else {
+      PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, class_loader);
+      if (probe && probe->check_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER)) {
+          throw_circularity_error = true;
+      } 
+
+      // add placeholder entry even if error - callers will remove on error
+      PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, class_loader, PlaceholderTable::LOAD_SUPER, class_name, THREAD); 
+      if (throw_circularity_error) {
+         newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
+      }
+    }
+  }
+  if (throw_circularity_error) {
+      ResourceMark rm(THREAD);
+      THROW_MSG_0(vmSymbols::java_lang_ClassCircularityError(), child_name->as_C_string());
+  }
+
+// java.lang.Object should have been found above
+  assert(class_name() != NULL, "null super class for resolving");
+  // Resolve the super class or interface, check results on return
+  klassOop superk = NULL;
+  superk = SystemDictionary::resolve_or_null(class_name,
+                                                 class_loader,
+                                                 protection_domain,
+                                                 THREAD);
+  
+  KlassHandle superk_h(THREAD, superk);
+  
+  // Note: clean up of placeholders currently in callers of
+  // resolve_super_or_fail - either at update_dictionary time
+  // or on error 
+  {
+  MutexLocker mu(SystemDictionary_lock, THREAD);
+   PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, class_loader);
+   if (probe != NULL) {
+      probe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
+   }
+  }
+  if (HAS_PENDING_EXCEPTION || superk_h() == NULL) {
+    // can null superk
+    superk_h = KlassHandle(THREAD, handle_resolution_exception(class_name, class_loader, protection_domain, true, superk_h, THREAD));
+  }
+
+  return superk_h();
+}
+
+
+void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
+                                                  Handle class_loader,
+                                                  Handle protection_domain,
+                                                  TRAPS) {
+  if(!has_checkPackageAccess()) return;
+
+  // Now we have to call back to java to check if the initating class has access
+  JavaValue result(T_VOID);
+  if (TraceProtectionDomainVerification) {
+    // Print out trace information
+    tty->print_cr("Checking package access");
+    tty->print(" - class loader:      "); class_loader()->print_value_on(tty);      tty->cr();
+    tty->print(" - protection domain: "); protection_domain()->print_value_on(tty); tty->cr();
+    tty->print(" - loading:           "); klass()->print_value_on(tty);             tty->cr();
+  }
+  
+  assert(class_loader() != NULL, "should not have non-null protection domain for null classloader");
+
+  KlassHandle system_loader(THREAD, SystemDictionary::classloader_klass());
+  JavaCalls::call_special(&result,
+                         class_loader,
+                         system_loader,
+                         vmSymbolHandles::checkPackageAccess_name(),
+                         vmSymbolHandles::class_protectiondomain_signature(), 
+                         Handle(THREAD, klass->java_mirror()),
+                         protection_domain,
+                         THREAD);
+
+  if (TraceProtectionDomainVerification) {
+    if (HAS_PENDING_EXCEPTION) {
+      tty->print_cr(" -> DENIED !!!!!!!!!!!!!!!!!!!!!");
+    } else {
+     tty->print_cr(" -> granted");
+    }
+    tty->cr();
+  }
+
+  if (HAS_PENDING_EXCEPTION) return; 
+    
+  // If no exception has been thrown, we have validated the protection domain
+  // Insert the protection domain of the initiating class into the set.
+  {
+    // We recalculate the entry here -- we've called out to java since
+    // the last time it was calculated.
+    symbolHandle kn(THREAD, klass->name());
+    unsigned int d_hash = dictionary()->compute_hash(kn, class_loader);
+    int d_index = dictionary()->hash_to_index(d_hash);
+
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    { 
+      // Note that we have an entry, and entries can be deleted only during GC,
+      // so we cannot allow GC to occur while we're holding this entry.
+
+      // We're using a No_Safepoint_Verifier to catch any place where we
+      // might potentially do a GC at all.
+      // SystemDictionary::do_unloading() asserts that classes are only
+      // unloaded at a safepoint.
+      No_Safepoint_Verifier nosafepoint;
+      dictionary()->add_protection_domain(d_index, d_hash, klass, class_loader,
+                                          protection_domain, THREAD);
+    }
+  }
+}
+
+// We only get here if this thread finds that another thread
+// has already claimed the placeholder token for the current operation,
+// but that other thread either never owned or gave up the
+// object lock
+// Waits on SystemDictionary_lock to indicate placeholder table updated
+// On return, caller must recheck placeholder table state
+//
+// We only get here if 
+//  1) custom classLoader, i.e. not bootstrap classloader
+//  2) UnsyncloadClass not set
+//  3) custom classLoader has broken the class loader objectLock
+//     so another thread got here in parallel
+//
+// lockObject must be held. 
+// Complicated dance due to lock ordering:
+// Must first release the classloader object lock to
+// allow initial definer to complete the class definition
+// and to avoid deadlock
+// Reclaim classloader lock object with same original recursion count
+// Must release SystemDictionary_lock after notify, since
+// class loader lock must be claimed before SystemDictionary_lock
+// to prevent deadlocks
+//
+// The notify allows applications that did an untimed wait() on
+// the classloader object lock to not hang.
+void SystemDictionary::double_lock_wait(Handle lockObject, TRAPS) {
+  assert_lock_strong(SystemDictionary_lock);
+
+  bool calledholdinglock 
+      = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
+  assert(calledholdinglock,"must hold lock for notify");
+  assert(!UnsyncloadClass, "unexpected double_lock_wait");
+  ObjectSynchronizer::notifyall(lockObject, THREAD);
+  intptr_t recursions =  ObjectSynchronizer::complete_exit(lockObject, THREAD);
+  SystemDictionary_lock->wait();
+  SystemDictionary_lock->unlock();
+  ObjectSynchronizer::reenter(lockObject, recursions, THREAD);
+  SystemDictionary_lock->lock();
+}
+
+// If the class in is in the placeholder table, class loading is in progress
+// For cases where the application changes threads to load classes, it
+// is critical to ClassCircularity detection that we try loading
+// the superclass on the same thread internally, so we do parallel
+// super class loading here.
+// This also is critical in cases where the original thread gets stalled
+// even in non-circularity situations.
+// Note: only one thread can define the class, but multiple can resolve
+// Note: must call resolve_super_or_fail even if null super -
+// to force placeholder entry creation for this class
+// Caller must check for pending exception
+// Returns non-null klassOop if other thread has completed load
+// and we are done, 
+// If return null klassOop and no pending exception, the caller must load the class
+// At this point, handle_parallel_super_load should never be called
+// with the bootstrapclass loader
+instanceKlassHandle SystemDictionary::handle_parallel_super_load(
+    symbolHandle name, symbolHandle superclassname, Handle class_loader, 
+    Handle protection_domain, Handle lockObject, TRAPS) {
+
+  instanceKlassHandle nh = instanceKlassHandle(); // null Handle
+  unsigned int d_hash = dictionary()->compute_hash(name, class_loader);
+  int d_index = dictionary()->hash_to_index(d_hash);
+  unsigned int p_hash = placeholders()->compute_hash(name, class_loader);
+  int p_index = placeholders()->hash_to_index(p_hash);
+
+  // superk is not used, resolve_super called for circularity check only
+  // This code is reached in two situations. One if this thread
+  // is loading the same class twice (e.g. ClassCircularity, or 
+  // java.lang.instrument).
+  // The second is if another thread started the resolve_super first
+  // and has not yet finished. 
+  // In both cases the original caller will clean up the placeholder
+  // entry on error.
+  klassOop superk = SystemDictionary::resolve_super_or_fail(name,
+                                                          superclassname,
+                                                          class_loader,
+                                                          protection_domain,
+                                                          true,
+                                                          CHECK_(nh));
+  // We don't redefine the class, so we just need to clean up if there
+  // was not an error (don't want to modify any system dictionary
+  // data structures).
+  {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
+    SystemDictionary_lock->notify_all();
+  }
+
+  // UnsyncloadClass does NOT wait for parallel superclass loads to complete
+  // Bootstrap classloader does wait for parallel superclass loads
+ if (UnsyncloadClass) {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    // Check if classloading completed while we were loading superclass or waiting
+    klassOop check = find_class(d_index, d_hash, name, class_loader);
+    if (check != NULL) {
+      // Klass is already loaded, so just return it
+      return(instanceKlassHandle(THREAD, check));
+    } else {
+      return nh;
+    }
+  } 
+
+  // must loop to both handle other placeholder updates
+  // and spurious notifications
+  bool super_load_in_progress = true;
+  PlaceholderEntry* placeholder;
+  while (super_load_in_progress) {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    // Check if classloading completed while we were loading superclass or waiting
+    klassOop check = find_class(d_index, d_hash, name, class_loader);
+    if (check != NULL) {
+      // Klass is already loaded, so just return it
+      return(instanceKlassHandle(THREAD, check));
+    } else {
+      placeholder = placeholders()->get_entry(p_index, p_hash, name, class_loader);
+      if (placeholder && placeholder->super_load_in_progress() ){
+        // Before UnsyncloadClass:
+        // We only get here if the application has released the
+        // classloader lock when another thread was in the middle of loading a
+        // superclass/superinterface for this class, and now
+        // this thread is also trying to load this class.
+        // To minimize surprises, the first thread that started to
+        // load a class should be the one to complete the loading
+        // with the classfile it initially expected.
+        // This logic has the current thread wait once it has done
+        // all the superclass/superinterface loading it can, until
+        // the original thread completes the class loading or fails
+        // If it completes we will use the resulting instanceKlass
+        // which we will find below in the systemDictionary.
+        // We also get here for parallel bootstrap classloader
+        if (class_loader.is_null()) {
+          SystemDictionary_lock->wait();
+        } else {
+          double_lock_wait(lockObject, THREAD);
+        }
+      } else {
+        // If not in SD and not in PH, other thread's load must have failed
+        super_load_in_progress = false;
+      }
+    }
+  }
+  return (nh);
+}
+
+
+klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS) {
+  assert(class_name.not_null() && !FieldType::is_array(class_name()), "invalid class name");
+  // First check to see if we should remove wrapping L and ;
+  symbolHandle name;    
+  if (FieldType::is_obj(class_name())) {
+    ResourceMark rm(THREAD);
+    // Ignore wrapping L and ;.
+    name = oopFactory::new_symbol_handle(class_name()->as_C_string() + 1, class_name()->utf8_length() - 2, CHECK_NULL);    
+  } else {
+    name = class_name;
+  }
+
+  // UseNewReflection
+  // Fix for 4474172; see evaluation for more details
+  class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
+
+  // Do lookup to see if class already exist and the protection domain
+  // has the right access
+  unsigned int d_hash = dictionary()->compute_hash(name, class_loader);
+  int d_index = dictionary()->hash_to_index(d_hash);
+  klassOop probe = dictionary()->find(d_index, d_hash, name, class_loader,
+                                      protection_domain, THREAD);
+  if (probe != NULL) return probe;
+
+
+  // Non-bootstrap class loaders will call out to class loader and
+  // define via jvm/jni_DefineClass which will acquire the
+  // class loader object lock to protect against multiple threads
+  // defining the class in parallel by accident.
+  // This lock must be acquired here so the waiter will find
+  // any successful result in the SystemDictionary and not attempt
+  // the define
+  // Classloaders that support parallelism, e.g. bootstrap classloader,
+  // or all classloaders with UnsyncloadClass do not acquire lock here
+  bool DoObjectLock = true;
+  if (UnsyncloadClass || (class_loader.is_null())) {
+    DoObjectLock = false;
+  }
+
+  unsigned int p_hash = placeholders()->compute_hash(name, class_loader);
+  int p_index = placeholders()->hash_to_index(p_hash);
+
+  // Class is not in SystemDictionary so we have to do loading.
+  // Make sure we are synchronized on the class loader before we proceed
+  Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
+  check_loader_lock_contention(lockObject, THREAD);
+  ObjectLocker ol(lockObject, THREAD, DoObjectLock);
+
+  // Check again (after locking) if class already exist in SystemDictionary
+  bool class_has_been_loaded   = false;
+  bool super_load_in_progress  = false;
+  bool havesupername = false;
+  instanceKlassHandle k;
+  PlaceholderEntry* placeholder;
+  symbolHandle superclassname;
+
+  {           
+    MutexLocker mu(SystemDictionary_lock, THREAD);  
+    klassOop check = find_class(d_index, d_hash, name, class_loader);
+    if (check != NULL) {
+      // Klass is already loaded, so just return it
+      class_has_been_loaded = true;
+      k = instanceKlassHandle(THREAD, check);
+    } else {
+      placeholder = placeholders()->get_entry(p_index, p_hash, name, class_loader);
+      if (placeholder && placeholder->super_load_in_progress()) {
+         super_load_in_progress = true;
+         if (placeholder->havesupername() == true) {
+           superclassname = symbolHandle(THREAD, placeholder->supername());
+           havesupername = true;
+         }
+      } 
+    }
+  }
+
+  // If the class in is in the placeholder table, class loading is in progress
+  if (super_load_in_progress && havesupername==true) {
+    k = SystemDictionary::handle_parallel_super_load(name, superclassname, 
+        class_loader, protection_domain, lockObject, THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      return NULL;
+    }
+    if (!k.is_null()) {
+      class_has_been_loaded = true;
+    }
+  }
+
+  if (!class_has_been_loaded) {
+  
+    // add placeholder entry to record loading instance class
+    // to prevent parallel instance class loading if classloader object lock
+    // broken
+    // Also needed to prevent modifying bootclasssearchpath
+    // in parallel with a classload of same classname
+    // Classloaders that support parallelism, such as the bootstrap classloader
+    // or all classloaders with UnsyncloadClass flag
+    // allow parallel loading of same class/classloader pair
+    symbolHandle nullsymbolHandle;
+    bool throw_circularity_error = false;
+    // If not a classloader that supports parallelism and
+    // if NOT UnsyncloadClass, and we find an existing LOAD_INSTANCE for this
+    // class/classloader pair,  we know that the
+    // custom classloader explicitly did a wait to release the lock
+    // since we called out to loadClass with the objectlock already held
+    // In that case we should already own the ObjectLocker
+    // and want to send a notify on it
+    // For parallel bootstrap classloader we won't own the ObjectLocker
+    {
+      MutexLocker mu(SystemDictionary_lock, THREAD);
+      if (!UnsyncloadClass) {
+        PlaceholderEntry* oldprobe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
+        if (oldprobe) {
+          // only need check_seen_thread once, not on each loop
+          // 6341374 java/lang/Instrument with -Xcomp
+          if (oldprobe->check_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE)) {
+            throw_circularity_error = true;
+          } else {
+            while (!class_has_been_loaded && oldprobe && oldprobe->instance_load_in_progress()) {
+     
+              // For classloaders that allow parallelism, including bootstrap classloader
+              // we want to wait on the first requestor for a specific
+              // class/classloader pair
+              if (class_loader.is_null()) {
+                SystemDictionary_lock->wait();
+              } else {
+              // if another thread is already loading this instance, then we
+              // know the user has broken the classloader lock
+              // we need to ensure that the first requestor completes the request
+              // and other requestors wait for that completion
+              // The notify allows applications that did an untimed wait() on
+              // the classloader object lock to not hang.
+              // see test b4699981 
+                double_lock_wait(lockObject, THREAD);
+              }
+              // Check if classloading completed while we were waiting
+              klassOop check = find_class(d_index, d_hash, name, class_loader);
+              if (check != NULL) {
+                // Klass is already loaded, so just return it
+                k = instanceKlassHandle(THREAD, check);
+                class_has_been_loaded = true;
+              }
+              // check if other thread failed to load and cleaned up
+              oldprobe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
+            } 
+          } 
+        }
+      }
+      // add LOAD_INSTANCE regardless of flag
+      // classloaders that support parallelism, such as bootstrap classloader
+      // or all loaders with  UnsyncloadClass 
+      // allow competing threads to try LOAD_INSTANCE in parallel
+      // add placeholder entry even if error - callers will remove on error
+      if (!class_has_been_loaded) {
+        PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, name, class_loader, PlaceholderTable::LOAD_INSTANCE, nullsymbolHandle, THREAD); 
+        if (throw_circularity_error) {
+          newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
+        }
+      }
+    }
+    // must throw error outside of owning lock
+    if (throw_circularity_error) {
+      ResourceMark rm(THREAD);
+      THROW_MSG_0(vmSymbols::java_lang_ClassCircularityError(), name->as_C_string());
+    }
+
+    if (!class_has_been_loaded) {
+
+      // Do actual loading
+      k = load_instance_class(name, class_loader, THREAD);
+
+      // In custom class loaders, the usual findClass calls
+      // findLoadedClass, which directly searches  the SystemDictionary, then
+      // defineClass. If these are not atomic with respect to other threads,
+      // the findLoadedClass can fail, but the defineClass can get a 
+      // LinkageError:: duplicate class definition.
+      // If they got a linkageError, check if a parallel class load succeeded.
+      // If it did, then for bytecode resolution the specification requires
+      // that we return the same result we did for the other thread, i.e. the
+      // successfully loaded instanceKlass
+      // Note: Class can not be unloaded as long as any classloader refs exist
+      // Should not get here for classloaders that support parallelism
+      // with the new cleaner mechanism, e.g. bootstrap classloader
+      if (UnsyncloadClass || (class_loader.is_null())) {
+        if (k.is_null() && HAS_PENDING_EXCEPTION 
+          && PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+          MutexLocker mu(SystemDictionary_lock, THREAD);
+          klassOop check = find_class(d_index, d_hash, name, class_loader);
+          if (check != NULL) {
+            // Klass is already loaded, so just use it
+            k = instanceKlassHandle(THREAD, check);
+            CLEAR_PENDING_EXCEPTION;
+            guarantee((!class_loader.is_null()), "dup definition for bootstrap loader?");
+          }
+        }
+      }
+
+      // clean up placeholder entries for success or error
+      // This cleans up LOAD_INSTANCE entries
+      // It also cleans up LOAD_SUPER entries on errors from 
+      // calling load_instance_class
+      { 
+        MutexLocker mu(SystemDictionary_lock, THREAD);
+        PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
+        if (probe != NULL) {
+          probe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
+          placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
+          SystemDictionary_lock->notify_all();
+        }
+      }
+
+      // If everything was OK (no exceptions, no null return value), and
+      // class_loader is NOT the defining loader, do a little more bookkeeping.
+      if (!HAS_PENDING_EXCEPTION && !k.is_null() && 
+        k->class_loader() != class_loader()) {
+
+        check_constraints(d_index, d_hash, k, class_loader, false, THREAD);
+
+        // Need to check for a PENDING_EXCEPTION again; check_constraints
+        // can throw and doesn't use the CHECK macro.
+        if (!HAS_PENDING_EXCEPTION) {
+          { // Grabbing the Compile_lock prevents systemDictionary updates
+            // during compilations. 
+            MutexLocker mu(Compile_lock, THREAD);      
+            update_dictionary(d_index, d_hash, p_index, p_hash,
+                            k, class_loader, THREAD);
+          }
+          if (JvmtiExport::should_post_class_load()) {
+            Thread *thread = THREAD;
+            assert(thread->is_Java_thread(), "thread->is_Java_thread()");
+            JvmtiExport::post_class_load((JavaThread *) thread, k());
+          }
+        }
+      }
+      if (HAS_PENDING_EXCEPTION || k.is_null()) {
+        // On error, clean up placeholders
+        {
+          MutexLocker mu(SystemDictionary_lock, THREAD);
+          placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
+          SystemDictionary_lock->notify_all();
+        }
+        return NULL;
+      }
+    }
+  }
+
+#ifdef ASSERT
+  {
+    Handle loader (THREAD, k->class_loader());
+    MutexLocker mu(SystemDictionary_lock, THREAD);  
+    oop kk = find_class_or_placeholder(name, loader);
+    assert(kk == k(), "should be present in dictionary");
+  }
+#endif
+
+  // return if the protection domain in NULL
+  if (protection_domain() == NULL) return k();
+
+  // Check the protection domain has the right access 
+  {
+    MutexLocker mu(SystemDictionary_lock, THREAD);  
+    // Note that we have an entry, and entries can be deleted only during GC,
+    // so we cannot allow GC to occur while we're holding this entry.
+    // We're using a No_Safepoint_Verifier to catch any place where we
+    // might potentially do a GC at all.
+    // SystemDictionary::do_unloading() asserts that classes are only
+    // unloaded at a safepoint.
+    No_Safepoint_Verifier nosafepoint;
+    if (dictionary()->is_valid_protection_domain(d_index, d_hash, name,
+                                                 class_loader,
+                                                 protection_domain)) {
+      return k();
+    }
+  }
+
+  // Verify protection domain. If it fails an exception is thrown
+  validate_protection_domain(k, class_loader, protection_domain, CHECK_(klassOop(NULL)));
+
+  return k();
+}
+
+
+// This routine does not lock the system dictionary.
+//
+// Since readers don't hold a lock, we must make sure that system
+// dictionary entries are only removed at a safepoint (when only one
+// thread is running), and are added to in a safe way (all links must
+// be updated in an MT-safe manner).
+//
+// Callers should be aware that an entry could be added just after
+// _dictionary->bucket(index) is read here, so the caller will not see
+// the new entry.
+
+klassOop SystemDictionary::find(symbolHandle class_name,
+                                Handle class_loader, 
+                                Handle protection_domain,
+                                TRAPS) {
+
+  unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
+  int d_index = dictionary()->hash_to_index(d_hash);
+
+  {
+    // Note that we have an entry, and entries can be deleted only during GC,
+    // so we cannot allow GC to occur while we're holding this entry.
+    // We're using a No_Safepoint_Verifier to catch any place where we
+    // might potentially do a GC at all.
+    // SystemDictionary::do_unloading() asserts that classes are only
+    // unloaded at a safepoint.
+    No_Safepoint_Verifier nosafepoint;
+    return dictionary()->find(d_index, d_hash, class_name, class_loader,
+                              protection_domain, THREAD);
+  }
+}
+
+
+// Look for a loaded instance or array klass by name.  Do not do any loading.
+// return NULL in case of error.
+klassOop SystemDictionary::find_instance_or_array_klass(symbolHandle class_name,
+                                                        Handle class_loader,
+							Handle protection_domain,
+                                                        TRAPS) {
+  klassOop k = NULL;
+  assert(class_name() != NULL, "class name must be non NULL");
+  if (FieldType::is_array(class_name())) {
+    // The name refers to an array.  Parse the name.
+    jint dimension;
+    symbolOop object_key;
+
+    // dimension and object_key are assigned as a side-effect of this call
+    BasicType t = FieldType::get_array_info(class_name(), &dimension,
+					    &object_key, CHECK_(NULL));
+    if (t != T_OBJECT) {
+      k = Universe::typeArrayKlassObj(t);
+    } else {
+      symbolHandle h_key(THREAD, object_key);
+      k = SystemDictionary::find(h_key, class_loader, protection_domain, THREAD);
+    }
+    if (k != NULL) {
+      k = Klass::cast(k)->array_klass_or_null(dimension);
+    }
+  } else {
+    k = find(class_name, class_loader, protection_domain, THREAD);
+  }
+  return k;
+}
+
+// Note: this method is much like resolve_from_stream, but
+// updates no supplemental data structures.
+// TODO consolidate the two methods with a helper routine?
+klassOop SystemDictionary::parse_stream(symbolHandle class_name,
+                                        Handle class_loader,
+                                        Handle protection_domain,
+                                        ClassFileStream* st,
+                                        TRAPS) {
+  symbolHandle parsed_name;
+
+  // Parse the stream. Note that we do this even though this klass might
+  // already be present in the SystemDictionary, otherwise we would not
+  // throw potential ClassFormatErrors.
+  //
+  // Note: "name" is updated.
+  // Further note:  a placeholder will be added for this class when
+  //   super classes are loaded (resolve_super_or_fail). We expect this
+  //   to be called for all classes but java.lang.Object; and we preload
+  //   java.lang.Object through resolve_or_fail, not this path.
+
+  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
+                                                             class_loader,
+                                                             protection_domain,
+                                                             parsed_name,
+                                                             THREAD);
+
+
+  // We don't redefine the class, so we just need to clean up whether there
+  // was an error or not (don't want to modify any system dictionary
+  // data structures).
+  // Parsed name could be null if we threw an error before we got far
+  // enough along to parse it -- in that case, there is nothing to clean up.
+  if (!parsed_name.is_null()) {
+    unsigned int p_hash = placeholders()->compute_hash(parsed_name, 
+                                                       class_loader);
+    int p_index = placeholders()->hash_to_index(p_hash);
+    {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    placeholders()->find_and_remove(p_index, p_hash, parsed_name, class_loader, THREAD);
+    SystemDictionary_lock->notify_all();
+    }
+  }
+
+  return k();
+}
+
+// Add a klass to the system from a stream (called by jni_DefineClass and
+// JVM_DefineClass).
+// Note: class_name can be NULL. In that case we do not know the name of 
+// the class until we have parsed the stream.
+
+klassOop SystemDictionary::resolve_from_stream(symbolHandle class_name, 
+                                               Handle class_loader, 
+                                               Handle protection_domain, 
+                                               ClassFileStream* st, 
+                                               TRAPS) {
+
+  // Make sure we are synchronized on the class loader before we initiate 
+  // loading.
+  Handle lockObject = compute_loader_lock_object(class_loader, THREAD); 
+  check_loader_lock_contention(lockObject, THREAD);
+  ObjectLocker ol(lockObject, THREAD);
+
+  symbolHandle parsed_name;
+
+  // Parse the stream. Note that we do this even though this klass might 
+  // already be present in the SystemDictionary, otherwise we would not 
+  // throw potential ClassFormatErrors.
+  //
+  // Note: "name" is updated.
+  // Further note:  a placeholder will be added for this class when
+  //   super classes are loaded (resolve_super_or_fail). We expect this
+  //   to be called for all classes but java.lang.Object; and we preload
+  //   java.lang.Object through resolve_or_fail, not this path.
+
+  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, 
+                                                             class_loader, 
+                                                             protection_domain,
+                                                             parsed_name,
+                                                             THREAD);
+
+  const char* pkg = "java/";
+  if (!HAS_PENDING_EXCEPTION && 
+      !class_loader.is_null() && 
+      !parsed_name.is_null() && 
+      !strncmp((const char*)parsed_name->bytes(), pkg, strlen(pkg))) {
+    // It is illegal to define classes in the "java." package from
+    // JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
+    ResourceMark rm(THREAD);
+    char* name = parsed_name->as_C_string();
+    char* index = strrchr(name, '/');
+    *index = '\0'; // chop to just the package name
+    while ((index = strchr(name, '/')) != NULL) {
+      *index = '.'; // replace '/' with '.' in package name
+    }
+    const char* fmt = "Prohibited package name: %s";
+    size_t len = strlen(fmt) + strlen(name);
+    char* message = NEW_RESOURCE_ARRAY(char, len);
+    jio_snprintf(message, len, fmt, name);
+    Exceptions::_throw_msg(THREAD_AND_LOCATION, 
+      vmSymbols::java_lang_SecurityException(), message);
+  }
+
+  if (!HAS_PENDING_EXCEPTION) {
+    assert(!parsed_name.is_null(), "Sanity");
+    assert(class_name.is_null() || class_name() == parsed_name(), 
+           "name mismatch");
+    // Verification prevents us from creating names with dots in them, this
+    // asserts that that's the case.
+    assert(is_internal_format(parsed_name),
+           "external class name format used internally");
+
+    // Add class just loaded
+    define_instance_class(k, THREAD);
+  }
+
+  // If parsing the class file or define_instance_class failed, we
+  // need to remove the placeholder added on our behalf. But we
+  // must make sure parsed_name is valid first (it won't be if we had
+  // a format error before the class was parsed far enough to
+  // find the name).
+  if (HAS_PENDING_EXCEPTION && !parsed_name.is_null()) {
+    unsigned int p_hash = placeholders()->compute_hash(parsed_name, 
+                                                       class_loader);
+    int p_index = placeholders()->hash_to_index(p_hash);
+    {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    placeholders()->find_and_remove(p_index, p_hash, parsed_name, class_loader, THREAD);
+    SystemDictionary_lock->notify_all();
+    }
+    return NULL;
+  }
+
+  // Make sure that we didn't leave a place holder in the
+  // SystemDictionary; this is only done on success
+  debug_only( {
+    if (!HAS_PENDING_EXCEPTION) {
+      assert(!parsed_name.is_null(), "parsed_name is still null?");
+      symbolHandle h_name   (THREAD, k->name());
+      Handle h_loader (THREAD, k->class_loader());
+
+      MutexLocker mu(SystemDictionary_lock, THREAD);
+
+      oop check = find_class_or_placeholder(parsed_name, class_loader);
+      assert(check == k(), "should be present in the dictionary");
+
+      oop check2 = find_class_or_placeholder(h_name, h_loader);
+      assert(check == check2, "name inconsistancy in SystemDictionary");
+    }
+  } );
+
+  return k();
+}
+
+
+void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length,
+                                             int number_of_entries) {
+  assert(length == _nof_buckets * sizeof(HashtableBucket),
+         "bad shared dictionary size.");
+  _shared_dictionary = new Dictionary(_nof_buckets, t, number_of_entries);
+}
+
+
+// If there is a shared dictionary, then find the entry for the
+// given shared system class, if any.
+
+klassOop SystemDictionary::find_shared_class(symbolHandle class_name) {
+  if (shared_dictionary() != NULL) {
+    unsigned int d_hash = dictionary()->compute_hash(class_name, Handle());
+    int d_index = dictionary()->hash_to_index(d_hash);
+    return shared_dictionary()->find_shared_class(d_index, d_hash, class_name);
+  } else {
+    return NULL;
+  }
+}
+
+
+// Load a class from the shared spaces (found through the shared system
+// dictionary).  Force the superclass and all interfaces to be loaded.
+// Update the class definition to include sibling classes and no
+// subclasses (yet).  [Classes in the shared space are not part of the
+// object hierarchy until loaded.]
+
+instanceKlassHandle SystemDictionary::load_shared_class(
+                 symbolHandle class_name, Handle class_loader, TRAPS) {
+  instanceKlassHandle ik (THREAD, find_shared_class(class_name));
+  return load_shared_class(ik, class_loader, THREAD);
+}
+
+// Note well!  Changes to this method may affect oop access order
+// in the shared archive.  Please take care to not make changes that
+// adversely affect cold start time by changing the oop access order
+// that is specified in dump.cpp MarkAndMoveOrderedReadOnly and
+// MarkAndMoveOrderedReadWrite closures.
+instanceKlassHandle SystemDictionary::load_shared_class(
+                 instanceKlassHandle ik, Handle class_loader, TRAPS) {
+  assert(class_loader.is_null(), "non-null classloader for shared class?");
+  if (ik.not_null()) {
+    instanceKlassHandle nh = instanceKlassHandle(); // null Handle
+    symbolHandle class_name(THREAD, ik->name());
+
+    // Found the class, now load the superclass and interfaces.  If they
+    // are shared, add them to the main system dictionary and reset
+    // their hierarchy references (supers, subs, and interfaces).
+
+    if (ik->super() != NULL) {
+      symbolHandle cn(THREAD, ik->super()->klass_part()->name());
+      resolve_super_or_fail(class_name, cn,
+                            class_loader, Handle(), true, CHECK_(nh));
+    }
+
+    objArrayHandle interfaces (THREAD, ik->local_interfaces());
+    int num_interfaces = interfaces->length();
+    for (int index = 0; index < num_interfaces; index++) {
+      klassOop k = klassOop(interfaces->obj_at(index));
+
+      // Note: can not use instanceKlass::cast here because
+      // interfaces' instanceKlass's C++ vtbls haven't been
+      // reinitialized yet (they will be once the interface classes
+      // are loaded)
+      symbolHandle name (THREAD, k->klass_part()->name());
+      resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh));
+    }
+
+    // Adjust methods to recover missing data.  They need addresses for
+    // interpreter entry points and their default native method address
+    // must be reset.
+
+    // Updating methods must be done under a lock so multiple
+    // threads don't update these in parallel
+    // Shared classes are all currently loaded by the bootstrap
+    // classloader, so this will never cause a deadlock on
+    // a custom class loader lock.
+
+    {
+      Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
+      check_loader_lock_contention(lockObject, THREAD);
+      ObjectLocker ol(lockObject, THREAD, true);
+
+      objArrayHandle methods (THREAD, ik->methods());
+      int num_methods = methods->length();
+      for (int index2 = 0; index2 < num_methods; ++index2) {
+        methodHandle m(THREAD, methodOop(methods->obj_at(index2)));
+        m()->link_method(m, CHECK_(nh));
+      }
+    }
+
+    if (TraceClassLoading) {
+      ResourceMark rm;
+      tty->print("[Loaded %s", ik->external_name());
+      tty->print(" from shared objects file");
+      tty->print_cr("]");
+    }
+    // notify a class loaded from shared object
+    ClassLoadingService::notify_class_loaded(instanceKlass::cast(ik()), 
+                                             true /* shared class */);
+  }
+  return ik;
+}
+
+
+
+instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS) {
+  instanceKlassHandle nh = instanceKlassHandle(); // null Handle
+  if (class_loader.is_null()) {
+    // Search the shared system dictionary for classes preloaded into the
+    // shared spaces.
+    instanceKlassHandle k;
+    k = load_shared_class(class_name, class_loader, THREAD);
+
+    if (k.is_null()) {
+      // Use VM class loader
+      k = ClassLoader::load_classfile(class_name, CHECK_(nh));
+    }
+
+    // find_or_define_instance_class may return a different k
+    if (!k.is_null()) {
+      k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh));
+    }
+    return k;
+  } else {
+    // Use user specified class loader to load class. Call loadClass operation on class_loader.
+    ResourceMark rm(THREAD);
+      
+    Handle s = java_lang_String::create_from_symbol(class_name, CHECK_(nh));
+    // Translate to external class name format, i.e., convert '/' chars to '.'
+    Handle string = java_lang_String::externalize_classname(s, CHECK_(nh));
+
+    JavaValue result(T_OBJECT);
+
+    KlassHandle spec_klass (THREAD, SystemDictionary::classloader_klass());
+
+    // UnsyncloadClass option means don't synchronize loadClass() calls.
+    // loadClassInternal() is synchronized and public loadClass(String) is not.
+    // This flag is for diagnostic purposes only. It is risky to call
+    // custom class loaders without synchronization.
+    // WARNING If a custom class loader does NOT synchronizer findClass, or callers of
+    // findClass, this flag risks unexpected timing bugs in the field.
+    // Do NOT assume this will be supported in future releases.
+    if (!UnsyncloadClass && has_loadClassInternal()) {
+      JavaCalls::call_special(&result, 
+                              class_loader, 
+                              spec_klass,
+                              vmSymbolHandles::loadClassInternal_name(),
+                              vmSymbolHandles::string_class_signature(), 
+                              string,
+                              CHECK_(nh));
+    } else {
+      JavaCalls::call_virtual(&result, 
+                              class_loader, 
+                              spec_klass,
+                              vmSymbolHandles::loadClass_name(),
+                              vmSymbolHandles::string_class_signature(), 
+                              string,
+                              CHECK_(nh));
+    }
+
+    assert(result.get_type() == T_OBJECT, "just checking");
+    oop obj = (oop) result.get_jobject();
+
+    // Primitive classes return null since forName() can not be
+    // used to obtain any of the Class objects representing primitives or void
+    if ((obj != NULL) && !(java_lang_Class::is_primitive(obj))) {      
+      instanceKlassHandle k = 
+                instanceKlassHandle(THREAD, java_lang_Class::as_klassOop(obj));
+      // For user defined Java class loaders, check that the name returned is
+      // the same as that requested.  This check is done for the bootstrap
+      // loader when parsing the class file.
+      if (class_name() == k->name()) {
+        return k;
+      }
+    }
+    // Class is not found or has the wrong name, return NULL
+    return nh;
+  }
+}
+
+void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
+
+  Handle class_loader_h(THREAD, k->class_loader());
+
+  // for bootstrap classloader don't acquire lock
+  if (!class_loader_h.is_null()) {
+    assert(ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, 
+         compute_loader_lock_object(class_loader_h, THREAD)),
+         "define called without lock");
+  }
+
+
+  // Check class-loading constraints. Throw exception if violation is detected.
+  // Grabs and releases SystemDictionary_lock
+  // The check_constraints/find_class call and update_dictionary sequence
+  // must be "atomic" for a specific class/classloader pair so we never
+  // define two different instanceKlasses for that class/classloader pair.
+  // Existing classloaders will call define_instance_class with the
+  // classloader lock held
+  // Parallel classloaders will call find_or_define_instance_class
+  // which will require a token to perform the define class
+  symbolHandle name_h(THREAD, k->name());
+  unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader_h);
+  int d_index = dictionary()->hash_to_index(d_hash);
+  check_constraints(d_index, d_hash, k, class_loader_h, true, CHECK);
+
+  // Register class just loaded with class loader (placed in Vector)
+  // Note we do this before updating the dictionary, as this can
+  // fail with an OutOfMemoryError (if it does, we will *not* put this
+  // class in the dictionary and will not update the class hierarchy).
+  if (k->class_loader() != NULL) {
+    methodHandle m(THREAD, Universe::loader_addClass_method());
+    JavaValue result(T_VOID);
+    JavaCallArguments args(class_loader_h);
+    args.push_oop(Handle(THREAD, k->java_mirror()));
+    JavaCalls::call(&result, m, &args, CHECK);
+  }
+
+  // Add the new class. We need recompile lock during update of CHA.
+  {
+    unsigned int p_hash = placeholders()->compute_hash(name_h, class_loader_h);
+    int p_index = placeholders()->hash_to_index(p_hash);
+
+    MutexLocker mu_r(Compile_lock, THREAD);                    
+
+    // Add to class hierarchy, initialize vtables, and do possible
+    // deoptimizations.
+    add_to_hierarchy(k, CHECK); // No exception, but can block
+
+    // Add to systemDictionary - so other classes can see it.
+    // Grabs and releases SystemDictionary_lock
+    update_dictionary(d_index, d_hash, p_index, p_hash,
+                      k, class_loader_h, THREAD);
+  }
+  k->eager_initialize(THREAD);
+
+  // notify jvmti
+  if (JvmtiExport::should_post_class_load()) {
+      assert(THREAD->is_Java_thread(), "thread->is_Java_thread()");
+      JvmtiExport::post_class_load((JavaThread *) THREAD, k());
+
+  }
+}
+
+// Support parallel classloading
+// Initial implementation for bootstrap classloader
+// For future:
+// For custom class loaders that support parallel classloading,
+// in case they do not synchronize around
+// FindLoadedClass/DefineClass calls, we check for parallel
+// loading for them, wait if a defineClass is in progress
+// and return the initial requestor's results
+// For better performance, the class loaders should synchronize
+// findClass(), i.e. FindLoadedClass/DefineClass or they
+// potentially waste time reading and parsing the bytestream.
+// Note: VM callers should ensure consistency of k/class_name,class_loader
+instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle class_name, Handle class_loader, instanceKlassHandle k, TRAPS) {
+
+  instanceKlassHandle nh = instanceKlassHandle(); // null Handle
+
+  unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
+  int d_index = dictionary()->hash_to_index(d_hash);
+
+// Hold SD lock around find_class and placeholder creation for DEFINE_CLASS
+  unsigned int p_hash = placeholders()->compute_hash(class_name, class_loader);
+  int p_index = placeholders()->hash_to_index(p_hash);
+  PlaceholderEntry* probe;
+
+  { 
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    // First check if class already defined
+    klassOop check = find_class(d_index, d_hash, class_name, class_loader);
+    if (check != NULL) {
+      return(instanceKlassHandle(THREAD, check));
+    }
+
+    // Acquire define token for this class/classloader
+    symbolHandle nullsymbolHandle;
+    probe = placeholders()->find_and_add(p_index, p_hash, class_name, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD); 
+    // Check if another thread defining in parallel
+    if (probe->definer() == NULL) {
+      // Thread will define the class
+      probe->set_definer(THREAD);
+    } else {
+      // Wait for defining thread to finish and return results
+      while (probe->definer() != NULL) {
+        SystemDictionary_lock->wait();
+      }
+      if (probe->instanceKlass() != NULL) {
+        probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
+        return(instanceKlassHandle(THREAD, probe->instanceKlass()));
+      } else {
+        // If definer had an error, try again as any new thread would
+        probe->set_definer(THREAD);
+#ifdef ASSERT
+        klassOop check = find_class(d_index, d_hash, class_name, class_loader);
+        assert(check == NULL, "definer missed recording success");
+#endif
+      }
+    }
+  }
+
+  define_instance_class(k, THREAD);
+
+  Handle linkage_exception = Handle(); // null handle
+
+  // definer must notify any waiting threads
+  {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, class_name, class_loader);
+    assert(probe != NULL, "DEFINE_INSTANCE placeholder lost?");
+    if (probe != NULL) {
+      if (HAS_PENDING_EXCEPTION) {
+        linkage_exception = Handle(THREAD,PENDING_EXCEPTION);
+        CLEAR_PENDING_EXCEPTION;
+      } else {
+        probe->set_instanceKlass(k());
+      }
+      probe->set_definer(NULL);
+      probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
+      SystemDictionary_lock->notify_all();
+    }
+  }
+
+  // Can't throw exception while holding lock due to rank ordering
+  if (linkage_exception() != NULL) {
+    THROW_OOP_(linkage_exception(), nh); // throws exception and returns
+  }
+
+  return k;
+}
+
+Handle SystemDictionary::compute_loader_lock_object(Handle class_loader, TRAPS) {
+  // If class_loader is NULL we synchronize on _system_loader_lock_obj
+  if (class_loader.is_null()) {
+    return Handle(THREAD, _system_loader_lock_obj);
+  } else {
+    return class_loader;
+  }
+}
+
+// This method is added to check how often we have to wait to grab loader
+// lock. The results are being recorded in the performance counters defined in
+// ClassLoader::_sync_systemLoaderLockContentionRate and
+// ClassLoader::_sync_nonSystemLoaderLockConteionRate. 
+void SystemDictionary::check_loader_lock_contention(Handle loader_lock, TRAPS) {
+  if (!UsePerfData) {
+    return;
+  }
+
+  assert(!loader_lock.is_null(), "NULL lock object");
+
+  if (ObjectSynchronizer::query_lock_ownership((JavaThread*)THREAD, loader_lock)
+      == ObjectSynchronizer::owner_other) {
+    // contention will likely happen, so increment the corresponding 
+    // contention counter.
+    if (loader_lock() == _system_loader_lock_obj) {
+      ClassLoader::sync_systemLoaderLockContentionRate()->inc();
+    } else {
+      ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
+    }
+  }
+} 
+  
+// ----------------------------------------------------------------------------
+// Lookup
+
+klassOop SystemDictionary::find_class(int index, unsigned int hash,
+                                      symbolHandle class_name,
+                                      Handle class_loader) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  assert (index == dictionary()->index_for(class_name, class_loader),
+          "incorrect index?");
+
+  klassOop k = dictionary()->find_class(index, hash, class_name, class_loader);
+  return k;
+}
+
+
+// Basic find on classes in the midst of being loaded
+symbolOop SystemDictionary::find_placeholder(int index, unsigned int hash,
+                                             symbolHandle class_name,
+                                             Handle class_loader) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+
+  return placeholders()->find_entry(index, hash, class_name, class_loader);
+}
+
+
+// Used for assertions and verification only
+oop SystemDictionary::find_class_or_placeholder(symbolHandle class_name, 
+                                                Handle class_loader) {
+  #ifndef ASSERT
+  guarantee(VerifyBeforeGC   || 
+            VerifyDuringGC   || 
+            VerifyBeforeExit ||
+            VerifyAfterGC, "too expensive"); 
+  #endif
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  symbolOop class_name_ = class_name();
+  oop class_loader_ = class_loader();
+
+  // First look in the loaded class array
+  unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
+  int d_index = dictionary()->hash_to_index(d_hash);
+  oop lookup = find_class(d_index, d_hash, class_name, class_loader);
+
+  if (lookup == NULL) {
+    // Next try the placeholders
+    unsigned int p_hash = placeholders()->compute_hash(class_name,class_loader);
+    int p_index = placeholders()->hash_to_index(p_hash);
+    lookup = find_placeholder(p_index, p_hash, class_name, class_loader);
+  }
+
+  return lookup;
+}
+
+
+// Get the next class in the diictionary.
+klassOop SystemDictionary::try_get_next_class() {
+  return dictionary()->try_get_next_class();
+}
+
+
+// ----------------------------------------------------------------------------
+// Update hierachy. This is done before the new klass has been added to the SystemDictionary. The Recompile_lock
+// is held, to ensure that the compiler is not using the class hierachy, and that deoptimization will kick in
+// before a new class is used.
+
+void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
+  assert(k.not_null(), "just checking");
+  // Link into hierachy. Make sure the vtables are initialized before linking into 
+  k->append_to_sibling_list();                    // add to superklass/sibling list
+  k->process_interfaces(THREAD);                  // handle all "implements" declarations  
+  k->set_init_state(instanceKlass::loaded);
+  // Now flush all code that depended on old class hierarchy.
+  // Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
+  // Also, first reinitialize vtable because it may have gotten out of synch 
+  // while the new class wasn't connected to the class hierarchy.     
+  Universe::flush_dependents_on(k);
+}
+
+
+// ----------------------------------------------------------------------------
+// GC support
+
+// Following roots during mark-sweep is separated in two phases. 
+//
+// The first phase follows preloaded classes and all other system 
+// classes, since these will never get unloaded anyway.
+//
+// The second phase removes (unloads) unreachable classes from the
+// system dictionary and follows the remaining classes' contents.
+
+void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
+  // Follow preloaded classes/mirrors and system loader object
+  blk->do_oop(&_java_system_loader);
+  preloaded_oops_do(blk);
+  always_strong_classes_do(blk);
+}
+
+
+void SystemDictionary::always_strong_classes_do(OopClosure* blk) {
+  // Follow all system classes and temporary placeholders in dictionary
+  dictionary()->always_strong_classes_do(blk);
+  
+  // Placeholders. These are *always* strong roots, as they
+  // represent classes we're actively loading.
+  placeholders_do(blk);  
+
+  // Loader constraints. We must keep the symbolOop used in the name alive.
+  constraints()->always_strong_classes_do(blk);
+
+  // Resolution errors keep the symbolOop for the error alive
+  resolution_errors()->always_strong_classes_do(blk);
+}
+
+
+void SystemDictionary::placeholders_do(OopClosure* blk) {
+  placeholders()->oops_do(blk);
+}
+
+
+bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
+  bool result = dictionary()->do_unloading(is_alive);
+  constraints()->purge_loader_constraints(is_alive);
+  resolution_errors()->purge_resolution_errors(is_alive);
+  return result;
+}
+
+
+void SystemDictionary::oops_do(OopClosure* f) {
+  // Adjust preloaded classes/mirrors and system loader object
+  f->do_oop(&_java_system_loader);
+  preloaded_oops_do(f);
+
+  lazily_loaded_oops_do(f);
+
+  // Adjust dictionary
+  dictionary()->oops_do(f);
+
+  // Partially loaded classes
+  placeholders()->oops_do(f);
+
+  // Adjust constraint table
+  constraints()->oops_do(f);
+
+  // Adjust resolution error table
+  resolution_errors()->oops_do(f);
+}
+
+
+void SystemDictionary::preloaded_oops_do(OopClosure* f) {
+  f->do_oop((oop*) &_string_klass);
+  f->do_oop((oop*) &_object_klass);
+  f->do_oop((oop*) &_class_klass);
+  f->do_oop((oop*) &_cloneable_klass);
+  f->do_oop((oop*) &_classloader_klass);
+  f->do_oop((oop*) &_serializable_klass);
+  f->do_oop((oop*) &_system_klass);
+
+  f->do_oop((oop*) &_throwable_klass);
+  f->do_oop((oop*) &_error_klass);
+  f->do_oop((oop*) &_threaddeath_klass);
+  f->do_oop((oop*) &_exception_klass);
+  f->do_oop((oop*) &_runtime_exception_klass);
+  f->do_oop((oop*) &_classNotFoundException_klass);
+  f->do_oop((oop*) &_noClassDefFoundError_klass);
+  f->do_oop((oop*) &_linkageError_klass);
+  f->do_oop((oop*) &_classCastException_klass);
+  f->do_oop((oop*) &_arrayStoreException_klass);
+  f->do_oop((oop*) &_virtualMachineError_klass);
+  f->do_oop((oop*) &_outOfMemoryError_klass);
+  f->do_oop((oop*) &_StackOverflowError_klass);
+  f->do_oop((oop*) &_illegalMonitorStateException_klass);
+  f->do_oop((oop*) &_protectionDomain_klass);
+  f->do_oop((oop*) &_AccessControlContext_klass);
+
+  f->do_oop((oop*) &_reference_klass);
+  f->do_oop((oop*) &_soft_reference_klass);
+  f->do_oop((oop*) &_weak_reference_klass);
+  f->do_oop((oop*) &_final_reference_klass);
+  f->do_oop((oop*) &_phantom_reference_klass);
+  f->do_oop((oop*) &_finalizer_klass);
+  
+  f->do_oop((oop*) &_thread_klass);
+  f->do_oop((oop*) &_threadGroup_klass);
+  f->do_oop((oop*) &_properties_klass);      
+  f->do_oop((oop*) &_reflect_accessible_object_klass);      
+  f->do_oop((oop*) &_reflect_field_klass);      
+  f->do_oop((oop*) &_reflect_method_klass);      
+  f->do_oop((oop*) &_reflect_constructor_klass);      
+  f->do_oop((oop*) &_reflect_magic_klass);
+  f->do_oop((oop*) &_reflect_method_accessor_klass);
+  f->do_oop((oop*) &_reflect_constructor_accessor_klass);
+  f->do_oop((oop*) &_reflect_delegating_classloader_klass);
+  f->do_oop((oop*) &_reflect_constant_pool_klass);
+  f->do_oop((oop*) &_reflect_unsafe_static_field_accessor_impl_klass);
+
+  f->do_oop((oop*) &_stringBuffer_klass);
+  f->do_oop((oop*) &_vector_klass);
+  f->do_oop((oop*) &_hashtable_klass);
+
+  f->do_oop((oop*) &_stackTraceElement_klass);
+
+  f->do_oop((oop*) &_java_nio_Buffer_klass);
+
+  f->do_oop((oop*) &_sun_misc_AtomicLongCSImpl_klass);
+
+  f->do_oop((oop*) &_boolean_klass);
+  f->do_oop((oop*) &_char_klass);
+  f->do_oop((oop*) &_float_klass);
+  f->do_oop((oop*) &_double_klass);
+  f->do_oop((oop*) &_byte_klass);
+  f->do_oop((oop*) &_short_klass);
+  f->do_oop((oop*) &_int_klass);
+  f->do_oop((oop*) &_long_klass);
+  {
+    for (int i = 0; i < T_VOID+1; i++) {
+      if (_box_klasses[i] != NULL) {
+	assert(i >= T_BOOLEAN, "checking");
+	f->do_oop((oop*) &_box_klasses[i]);
+      }
+    }
+  }
+
+  // Do the basic type mirrors.  (These are shared with Universe::oops_do.)
+  shared_oops_do(f);
+  
+  f->do_oop((oop*) &_system_loader_lock_obj); 
+  FilteredFieldsMap::klasses_oops_do(f); 
+}
+
+void SystemDictionary::shared_oops_do(OopClosure* f) {
+  f->do_oop((oop*) &_int_mirror);
+  f->do_oop((oop*) &_float_mirror);
+  f->do_oop((oop*) &_double_mirror);
+  f->do_oop((oop*) &_byte_mirror);
+  f->do_oop((oop*) &_bool_mirror);
+  f->do_oop((oop*) &_char_mirror);
+  f->do_oop((oop*) &_long_mirror);
+  f->do_oop((oop*) &_short_mirror);
+  f->do_oop((oop*) &_void_mirror);
+
+  // It's important to iterate over these guys even if they are null,
+  // since that's how shared heaps are restored.
+  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
+    f->do_oop((oop*) &_mirrors[i]);
+  }
+  assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
+}
+
+void SystemDictionary::lazily_loaded_oops_do(OopClosure* f) {
+  f->do_oop((oop*) &_abstract_ownable_synchronizer_klass);
+}
+
+// Just the classes from defining class loaders
+// Don't iterate over placeholders
+void SystemDictionary::classes_do(void f(klassOop)) {
+  dictionary()->classes_do(f);
+}
+
+// Added for initialize_itable_for_klass
+//   Just the classes from defining class loaders
+// Don't iterate over placeholders
+void SystemDictionary::classes_do(void f(klassOop, TRAPS), TRAPS) {
+  dictionary()->classes_do(f, CHECK);
+}
+
+//   All classes, and their class loaders
+// Don't iterate over placeholders
+void SystemDictionary::classes_do(void f(klassOop, oop)) {
+  dictionary()->classes_do(f);
+}
+
+//   All classes, and their class loaders
+//   (added for helpers that use HandleMarks and ResourceMarks)
+// Don't iterate over placeholders
+void SystemDictionary::classes_do(void f(klassOop, oop, TRAPS), TRAPS) {
+  dictionary()->classes_do(f, CHECK);
+}
+
+void SystemDictionary::placeholders_do(void f(symbolOop, oop)) {
+  placeholders()->entries_do(f);
+}
+
+void SystemDictionary::methods_do(void f(methodOop)) {
+  dictionary()->methods_do(f);
+}
+
+// ----------------------------------------------------------------------------
+// Lazily load klasses
+
+void SystemDictionary::load_abstract_ownable_synchronizer_klass(TRAPS) {
+  assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later");
+
+  // if multiple threads calling this function, only one thread will load
+  // the class.  The other threads will find the loaded version once the
+  // class is loaded.
+  klassOop aos = _abstract_ownable_synchronizer_klass;
+  if (aos == NULL) {
+    klassOop k = resolve_or_fail(vmSymbolHandles::java_util_concurrent_locks_AbstractOwnableSynchronizer(), true, CHECK);
+    // Force a fence to prevent any read before the write completes
+    OrderAccess::fence();
+    _abstract_ownable_synchronizer_klass = k;
+  }
+}
+
+// ----------------------------------------------------------------------------
+// Initialization
+
+void SystemDictionary::initialize(TRAPS) {
+  // Allocate arrays
+  assert(dictionary() == NULL,
+         "SystemDictionary should only be initialized once");
+  _dictionary = new Dictionary(_nof_buckets);
+  _placeholders = new PlaceholderTable(_nof_buckets);
+  _number_of_modifications = 0;
+  _loader_constraints = new LoaderConstraintTable(_loader_constraint_size);
+  _resolution_errors = new ResolutionErrorTable(_resolution_error_size);
+
+  // Allocate private object used as system class loader lock
+  _system_loader_lock_obj = oopFactory::new_system_objArray(0, CHECK);
+  // Initialize basic classes
+  initialize_preloaded_classes(CHECK);
+}
+
+
+void SystemDictionary::initialize_preloaded_classes(TRAPS) {
+  assert(_object_klass == NULL, "preloaded classes should only be initialized once");
+  // Preload commonly used klasses
+  _object_klass            = resolve_or_fail(vmSymbolHandles::java_lang_Object(),                true, CHECK);
+  _string_klass            = resolve_or_fail(vmSymbolHandles::java_lang_String(),                true, CHECK);  
+  _class_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Class(),                 true, CHECK);
+  debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(_class_klass));
+
+  // Fixup mirrors for classes loaded before java.lang.Class
+  initialize_basic_type_mirrors(CHECK);
+  Universe::fixup_mirrors(CHECK);
+
+  _cloneable_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Cloneable(),             true, CHECK);
+  _classloader_klass       = resolve_or_fail(vmSymbolHandles::java_lang_ClassLoader(),           true, CHECK);
+  _serializable_klass      = resolve_or_fail(vmSymbolHandles::java_io_Serializable(),            true, CHECK);
+  _system_klass            = resolve_or_fail(vmSymbolHandles::java_lang_System(),                true, CHECK);  
+
+  _throwable_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Throwable(),             true, CHECK);
+  _error_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Error(),                 true, CHECK);
+  _threaddeath_klass       = resolve_or_fail(vmSymbolHandles::java_lang_ThreadDeath(),           true, CHECK);
+  _exception_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Exception(),             true, CHECK);
+  _runtime_exception_klass = resolve_or_fail(vmSymbolHandles::java_lang_RuntimeException(),      true, CHECK);
+  _protectionDomain_klass  = resolve_or_fail(vmSymbolHandles::java_security_ProtectionDomain(),  true, CHECK);
+  _AccessControlContext_klass = resolve_or_fail(vmSymbolHandles::java_security_AccessControlContext(),  true, CHECK);
+  _classNotFoundException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassNotFoundException(),  true, CHECK);
+  _noClassDefFoundError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_NoClassDefFoundError(),  true, CHECK);  
+  _linkageError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_LinkageError(),  true, CHECK);  
+  _classCastException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassCastException(),   true, CHECK);  
+  _arrayStoreException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ArrayStoreException(),   true, CHECK);  
+  _virtualMachineError_klass = resolve_or_fail(vmSymbolHandles::java_lang_VirtualMachineError(),   true, CHECK);  
+  _outOfMemoryError_klass  = resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(),      true, CHECK);  
+  _StackOverflowError_klass = resolve_or_fail(vmSymbolHandles::java_lang_StackOverflowError(),   true, CHECK);  
+  _illegalMonitorStateException_klass = resolve_or_fail(vmSymbolHandles::java_lang_IllegalMonitorStateException(),   true, CHECK);  
+
+  // Preload ref klasses and set reference types
+  _reference_klass         = resolve_or_fail(vmSymbolHandles::java_lang_ref_Reference(),         true, CHECK);
+  instanceKlass::cast(_reference_klass)->set_reference_type(REF_OTHER);
+  instanceRefKlass::update_nonstatic_oop_maps(_reference_klass);
+
+  _soft_reference_klass    = resolve_or_fail(vmSymbolHandles::java_lang_ref_SoftReference(),     true, CHECK);
+  instanceKlass::cast(_soft_reference_klass)->set_reference_type(REF_SOFT);
+  _weak_reference_klass    = resolve_or_fail(vmSymbolHandles::java_lang_ref_WeakReference(),     true, CHECK);
+  instanceKlass::cast(_weak_reference_klass)->set_reference_type(REF_WEAK);
+  _final_reference_klass   = resolve_or_fail(vmSymbolHandles::java_lang_ref_FinalReference(),    true, CHECK);
+  instanceKlass::cast(_final_reference_klass)->set_reference_type(REF_FINAL);
+  _phantom_reference_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_PhantomReference(),  true, CHECK);
+  instanceKlass::cast(_phantom_reference_klass)->set_reference_type(REF_PHANTOM);
+  _finalizer_klass         = resolve_or_fail(vmSymbolHandles::java_lang_ref_Finalizer(),         true, CHECK);
+
+  _thread_klass           = resolve_or_fail(vmSymbolHandles::java_lang_Thread(),                true, CHECK);
+  _threadGroup_klass      = resolve_or_fail(vmSymbolHandles::java_lang_ThreadGroup(),           true, CHECK);
+  _properties_klass       = resolve_or_fail(vmSymbolHandles::java_util_Properties(),            true, CHECK);  
+  _reflect_accessible_object_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_AccessibleObject(),  true, CHECK);  
+  _reflect_field_klass    = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Field(),         true, CHECK);  
+  _reflect_method_klass   = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(),        true, CHECK);  
+  _reflect_constructor_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Constructor(),   true, CHECK);  
+  // Universe::is_gte_jdk14x_version() is not set up by this point.
+  // It's okay if these turn out to be NULL in non-1.4 JDKs.
+  _reflect_magic_klass    = resolve_or_null(vmSymbolHandles::sun_reflect_MagicAccessorImpl(),         CHECK);
+  _reflect_method_accessor_klass = resolve_or_null(vmSymbolHandles::sun_reflect_MethodAccessorImpl(),     CHECK);
+  _reflect_constructor_accessor_klass = resolve_or_null(vmSymbolHandles::sun_reflect_ConstructorAccessorImpl(),     CHECK);
+  _reflect_delegating_classloader_klass = resolve_or_null(vmSymbolHandles::sun_reflect_DelegatingClassLoader(),     CHECK);
+  _reflect_constant_pool_klass = resolve_or_null(vmSymbolHandles::sun_reflect_ConstantPool(),         CHECK);
+  _reflect_unsafe_static_field_accessor_impl_klass = resolve_or_null(vmSymbolHandles::sun_reflect_UnsafeStaticFieldAccessorImpl(), CHECK);
+
+  _vector_klass           = resolve_or_fail(vmSymbolHandles::java_util_Vector(),                true, CHECK);  
+  _hashtable_klass        = resolve_or_fail(vmSymbolHandles::java_util_Hashtable(),             true, CHECK);  
+  _stringBuffer_klass     = resolve_or_fail(vmSymbolHandles::java_lang_StringBuffer(),          true, CHECK);  
+
+  // It's NULL in non-1.4 JDKs.
+  _stackTraceElement_klass = resolve_or_null(vmSymbolHandles::java_lang_StackTraceElement(),          CHECK);
+
+  // Universe::is_gte_jdk14x_version() is not set up by this point.
+  // It's okay if this turns out to be NULL in non-1.4 JDKs.
+  _java_nio_Buffer_klass   = resolve_or_null(vmSymbolHandles::java_nio_Buffer(),                 CHECK);
+
+  // If this class isn't present, it won't be referenced.
+  _sun_misc_AtomicLongCSImpl_klass = resolve_or_null(vmSymbolHandles::sun_misc_AtomicLongCSImpl(),     CHECK);
+
+  // Preload boxing klasses
+  _boolean_klass           = resolve_or_fail(vmSymbolHandles::java_lang_Boolean(),               true, CHECK);
+  _char_klass              = resolve_or_fail(vmSymbolHandles::java_lang_Character(),             true, CHECK);
+  _float_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Float(),                 true, CHECK);
+  _double_klass            = resolve_or_fail(vmSymbolHandles::java_lang_Double(),                true, CHECK);
+  _byte_klass              = resolve_or_fail(vmSymbolHandles::java_lang_Byte(),                  true, CHECK);
+  _short_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Short(),                 true, CHECK);
+  _int_klass               = resolve_or_fail(vmSymbolHandles::java_lang_Integer(),               true, CHECK);
+  _long_klass              = resolve_or_fail(vmSymbolHandles::java_lang_Long(),                  true, CHECK);
+
+  _box_klasses[T_BOOLEAN] = _boolean_klass;
+  _box_klasses[T_CHAR]    = _char_klass;
+  _box_klasses[T_FLOAT]   = _float_klass;
+  _box_klasses[T_DOUBLE]  = _double_klass;
+  _box_klasses[T_BYTE]    = _byte_klass;
+  _box_klasses[T_SHORT]   = _short_klass;
+  _box_klasses[T_INT]     = _int_klass;
+  _box_klasses[T_LONG]    = _long_klass;
+  //_box_klasses[T_OBJECT]  = _object_klass;
+  //_box_klasses[T_ARRAY]   = _object_klass;
+
+  { // Compute whether we should use loadClass or loadClassInternal when loading classes.
+    methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
+    _has_loadClassInternal = (method != NULL);
+  }
+
+  { // Compute whether we should use checkPackageAccess or NOT
+    methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
+    _has_checkPackageAccess = (method != NULL); 
+  }
+}
+
+void SystemDictionary::initialize_basic_type_mirrors(TRAPS) { 
+  if (UseSharedSpaces) {
+    assert(_int_mirror != NULL, "already loaded");
+    assert(_void_mirror == _mirrors[T_VOID], "consistently loaded");
+    return;
+  }
+
+  assert(_int_mirror==NULL, "basic type mirrors already initialized");
+
+  _int_mirror     = java_lang_Class::create_basic_type_mirror("int",    T_INT,     CHECK);
+  _float_mirror   = java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
+  _double_mirror  = java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
+  _byte_mirror    = java_lang_Class::create_basic_type_mirror("byte",   T_BYTE,    CHECK);
+  _bool_mirror    = java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
+  _char_mirror    = java_lang_Class::create_basic_type_mirror("char",   T_CHAR,    CHECK);
+  _long_mirror    = java_lang_Class::create_basic_type_mirror("long",   T_LONG,    CHECK);
+  _short_mirror   = java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
+  _void_mirror    = java_lang_Class::create_basic_type_mirror("void",   T_VOID,    CHECK);
+
+  _mirrors[T_INT]     = _int_mirror;
+  _mirrors[T_FLOAT]   = _float_mirror;
+  _mirrors[T_DOUBLE]  = _double_mirror;
+  _mirrors[T_BYTE]    = _byte_mirror;
+  _mirrors[T_BOOLEAN] = _bool_mirror;
+  _mirrors[T_CHAR]    = _char_mirror;
+  _mirrors[T_LONG]    = _long_mirror;
+  _mirrors[T_SHORT]   = _short_mirror;
+  _mirrors[T_VOID]    = _void_mirror;
+  //_mirrors[T_OBJECT]  = instanceKlass::cast(_object_klass)->java_mirror();
+  //_mirrors[T_ARRAY]   = instanceKlass::cast(_object_klass)->java_mirror();
+}
+
+
+// Tells if a given klass is a box (wrapper class, such as java.lang.Integer).
+// If so, returns the basic type it holds.  If not, returns T_OBJECT.
+BasicType SystemDictionary::box_klass_type(klassOop k) {
+  assert(k != NULL, "");
+  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
+    if (_box_klasses[i] == k)
+      return (BasicType)i;
+  }
+  return T_OBJECT;
+}
+
+// Constraints on class loaders. The details of the algorithm can be
+// found in the OOPSLA'98 paper "Dynamic Class Loading in the Java
+// Virtual Machine" by Sheng Liang and Gilad Bracha.  The basic idea is
+// that the system dictionary needs to maintain a set of contraints that
+// must be satisfied by all classes in the dictionary.
+// if defining is true, then LinkageError if already in systemDictionary
+// if initiating loader, then ok if instanceKlass matches existing entry
+
+void SystemDictionary::check_constraints(int d_index, unsigned int d_hash,
+                                         instanceKlassHandle k,
+                                         Handle class_loader, bool defining, 
+                                         TRAPS) {
+  const char *linkage_error = NULL;
+  {
+    symbolHandle name (THREAD, k->name());
+    MutexLocker mu(SystemDictionary_lock, THREAD);         
+
+    klassOop check = find_class(d_index, d_hash, name, class_loader);
+    if (check != (klassOop)NULL) { 
+      // if different instanceKlass - duplicate class definition,
+      // else - ok, class loaded by a different thread in parallel,
+      // we should only have found it if it was done loading and ok to use 
+      // system dictionary only holds instance classes, placeholders
+      // also holds array classes
+      
+      assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary");
+      if ((defining == true) || (k() != check)) {
+        linkage_error = "loader (instance of  %s): attempted  duplicate class "
+	  "definition for name: \"%s\"";
+      } else {
+        return;
+      }
+    }
+
+#ifdef ASSERT
+    unsigned int p_hash = placeholders()->compute_hash(name, class_loader);
+    int p_index = placeholders()->hash_to_index(p_hash);
+    symbolOop ph_check = find_placeholder(p_index, p_hash, name, class_loader);
+    assert(ph_check == NULL || ph_check == name(), "invalid symbol");
+#endif
+
+    if (linkage_error == NULL) {
+      if (constraints()->check_or_update(k, class_loader, name) == false) {
+	linkage_error = "loader constraint violation: loader (instance of %s)"
+	  " previously initiated loading for a different type with name \"%s\"";
+      }
+    }
+  }
+
+  // Throw error now if needed (cannot throw while holding 
+  // SystemDictionary_lock because of rank ordering)
+
+  if (linkage_error) {
+    ResourceMark rm(THREAD);
+    const char* class_loader_name = loader_name(class_loader());
+    char* type_name = k->name()->as_C_string();
+    size_t buflen = strlen(linkage_error) + strlen(class_loader_name) +
+      strlen(type_name);
+    char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
+    jio_snprintf(buf, buflen, linkage_error, class_loader_name, type_name);
+    THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
+  }
+}
+
+
+// Update system dictionary - done after check_constraint and add_to_hierachy 
+// have been called.
+void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
+                                         int p_index, unsigned int p_hash,
+                                         instanceKlassHandle k, 
+                                         Handle class_loader, 
+                                         TRAPS) {
+  // Compile_lock prevents systemDictionary updates during compilations
+  assert_locked_or_safepoint(Compile_lock);
+  symbolHandle name (THREAD, k->name());
+
+  {
+  MutexLocker mu1(SystemDictionary_lock, THREAD);           
+
+  // See whether biased locking is enabled and if so set it for this
+  // klass.
+  // Note that this must be done past the last potential blocking
+  // point / safepoint. We enable biased locking lazily using a
+  // VM_Operation to iterate the SystemDictionary and installing the
+  // biasable mark word into each instanceKlass's prototype header.
+  // To avoid race conditions where we accidentally miss enabling the
+  // optimization for one class in the process of being added to the
+  // dictionary, we must not safepoint after the test of
+  // BiasedLocking::enabled().
+  if (UseBiasedLocking && BiasedLocking::enabled()) {
+    // Set biased locking bit for all loaded classes; it will be
+    // cleared if revocation occurs too often for this type
+    // NOTE that we must only do this when the class is initally
+    // defined, not each time it is referenced from a new class loader
+    if (k->class_loader() == class_loader()) {
+      k->set_prototype_header(markOopDesc::biased_locking_prototype());
+    }
+  }
+
+  // Check for a placeholder. If there, remove it and make a
+  // new system dictionary entry.
+  placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
+  klassOop sd_check = find_class(d_index, d_hash, name, class_loader);
+  if (sd_check == NULL) {
+    dictionary()->add_klass(name, class_loader, k);
+    notice_modification();
+  }
+#ifdef ASSERT
+  sd_check = find_class(d_index, d_hash, name, class_loader);
+  assert (sd_check != NULL, "should have entry in system dictionary");
+// Changed to allow PH to remain to complete class circularity checking
+// while only one thread can define a class at one time, multiple
+// classes can resolve the superclass for a class at one time, 
+// and the placeholder is used to track that
+//  symbolOop ph_check = find_placeholder(p_index, p_hash, name, class_loader);
+//  assert (ph_check == NULL, "should not have a placeholder entry");
+#endif
+    SystemDictionary_lock->notify_all();
+  }
+}
+
+
+klassOop SystemDictionary::find_constrained_instance_or_array_klass(
+                    symbolHandle class_name, Handle class_loader, TRAPS) {
+
+  // First see if it has been loaded directly.
+  // Force the protection domain to be null.  (This removes protection checks.)
+  Handle no_protection_domain;
+  klassOop klass = find_instance_or_array_klass(class_name, class_loader,
+                                                no_protection_domain, CHECK_NULL);
+  if (klass != NULL)
+    return klass;
+
+  // Now look to see if it has been loaded elsewhere, and is subject to
+  // a loader constraint that would require this loader to return the
+  // klass that is already loaded.
+  if (FieldType::is_array(class_name())) {
+    // Array classes are hard because their klassOops are not kept in the
+    // constraint table. The array klass may be constrained, but the elem class
+    // may not be. 
+    jint dimension;
+    symbolOop object_key;
+    BasicType t = FieldType::get_array_info(class_name(), &dimension,
+                                            &object_key, CHECK_(NULL));
+    if (t != T_OBJECT) {
+      klass = Universe::typeArrayKlassObj(t);
+    } else {
+      MutexLocker mu(SystemDictionary_lock, THREAD);
+      symbolHandle elem_name(THREAD, object_key);
+      klass = constraints()->find_constrained_elem_klass(class_name, elem_name, class_loader, THREAD);
+    }
+    if (klass != NULL) {
+      klass = Klass::cast(klass)->array_klass_or_null(dimension);
+    }
+  } else {
+    MutexLocker mu(SystemDictionary_lock, THREAD);
+    // Non-array classes are easy: simply check the constraint table.
+    klass = constraints()->find_constrained_klass(class_name, class_loader);
+  }
+      
+  return klass;
+}
+
+
+bool SystemDictionary::add_loader_constraint(symbolHandle class_name,
+                                             Handle class_loader1,
+                                             Handle class_loader2, 
+					     Thread* THREAD) {
+  unsigned int d_hash1 = dictionary()->compute_hash(class_name, class_loader1);
+  int d_index1 = dictionary()->hash_to_index(d_hash1);
+
+  unsigned int d_hash2 = dictionary()->compute_hash(class_name, class_loader2);
+  int d_index2 = dictionary()->hash_to_index(d_hash2);
+
+  {
+    MutexLocker mu_s(SystemDictionary_lock, THREAD);
+
+    // Better never do a GC while we're holding these oops
+    No_Safepoint_Verifier nosafepoint;
+
+    klassOop klass1 = find_class(d_index1, d_hash1, class_name, class_loader1);
+    klassOop klass2 = find_class(d_index2, d_hash2, class_name, class_loader2);
+    return constraints()->add_entry(class_name, klass1, class_loader1,
+				    klass2, class_loader2);
+  }
+}
+
+// Add entry to resolution error table to record the error when the first
+// attempt to resolve a reference to a class has failed.
+void SystemDictionary::add_resolution_error(constantPoolHandle pool, int which, symbolHandle error) {
+  unsigned int hash = resolution_errors()->compute_hash(pool, which);
+  int index = resolution_errors()->hash_to_index(hash);
+  { 
+    MutexLocker ml(SystemDictionary_lock, Thread::current());
+    resolution_errors()->add_entry(index, hash, pool, which, error);
+  }
+}
+
+// Lookup resolution error table. Returns error if found, otherwise NULL.
+symbolOop SystemDictionary::find_resolution_error(constantPoolHandle pool, int which) {
+  unsigned int hash = resolution_errors()->compute_hash(pool, which);
+  int index = resolution_errors()->hash_to_index(hash);
+  { 
+    MutexLocker ml(SystemDictionary_lock, Thread::current());
+    ResolutionErrorEntry* entry = resolution_errors()->find_entry(index, hash, pool, which);
+    return (entry != NULL) ? entry->error() : (symbolOop)NULL;
+  }
+}
+
+
+// Make sure all class components (including arrays) in the given
+// signature will be resolved to the same class in both loaders.
+// Returns the name of the type that failed a loader constraint check, or
+// NULL if no constraint failed. The returned C string needs cleaning up
+// with a ResourceMark in the caller
+char* SystemDictionary::check_signature_loaders(symbolHandle signature,
+                                               Handle loader1, Handle loader2,
+                                               bool is_method, TRAPS)  {
+  // Nothing to do if loaders are the same. 
+  if (loader1() == loader2()) {
+    return NULL;
+  }
+  
+  SignatureStream sig_strm(signature, is_method);
+  while (!sig_strm.is_done()) {
+    if (sig_strm.is_object()) {
+      symbolOop s = sig_strm.as_symbol(CHECK_NULL);
+      symbolHandle sig (THREAD, s);
+      if (!add_loader_constraint(sig, loader1, loader2, THREAD)) {
+	return sig()->as_C_string();
+      }
+    }
+    sig_strm.next();
+  }
+  return NULL;
+}
+
+
+// Since the identity hash code for symbols changes when the symbols are
+// moved from the regular perm gen (hash in the mark word) to the shared
+// spaces (hash is the address), the classes loaded into the dictionary
+// may be in the wrong buckets.
+
+void SystemDictionary::reorder_dictionary() {
+  dictionary()->reorder_dictionary();
+}
+
+
+void SystemDictionary::copy_buckets(char** top, char* end) {
+  dictionary()->copy_buckets(top, end);
+}
+
+
+void SystemDictionary::copy_table(char** top, char* end) {
+  dictionary()->copy_table(top, end);
+}
+
+
+void SystemDictionary::reverse() {
+  dictionary()->reverse();
+}
+
+int SystemDictionary::number_of_classes() {
+  return dictionary()->number_of_entries();
+}
+
+
+// ----------------------------------------------------------------------------
+#ifndef PRODUCT
+
+void SystemDictionary::print() {
+  dictionary()->print();
+
+  // Placeholders
+  GCMutexLocker mu(SystemDictionary_lock);
+  placeholders()->print();
+
+  // loader constraints - print under SD_lock
+  constraints()->print();
+}
+
+#endif
+
+void SystemDictionary::verify() {
+  guarantee(dictionary() != NULL, "Verify of system dictionary failed");
+  guarantee(constraints() != NULL,
+            "Verify of loader constraints failed");
+  guarantee(dictionary()->number_of_entries() >= 0 &&
+            placeholders()->number_of_entries() >= 0,
+            "Verify of system dictionary failed");
+
+  // Verify dictionary
+  dictionary()->verify();
+
+  GCMutexLocker mu(SystemDictionary_lock);
+  placeholders()->verify();
+
+  // Verify constraint table
+  guarantee(constraints() != NULL, "Verify of loader constraints failed");
+  constraints()->verify(dictionary());
+}
+
+
+void SystemDictionary::verify_obj_klass_present(Handle obj,
+                                                symbolHandle class_name,
+                                                Handle class_loader) {
+  GCMutexLocker mu(SystemDictionary_lock);
+  oop probe = find_class_or_placeholder(class_name, class_loader);
+  if (probe == NULL) {
+    probe = SystemDictionary::find_shared_class(class_name);
+  }
+  guarantee(probe != NULL && 
+            (!probe->is_klass() || probe == obj()), 
+                     "Loaded klasses should be in SystemDictionary");
+}
+
+#ifndef PRODUCT
+
+// statistics code
+class ClassStatistics: AllStatic {
+ private:
+  static int nclasses;        // number of classes
+  static int nmethods;        // number of methods
+  static int nmethoddata;     // number of methodData    
+  static int class_size;      // size of class objects in words
+  static int method_size;     // size of method objects in words
+  static int debug_size;      // size of debug info in methods
+  static int methoddata_size; // size of methodData objects in words
+
+  static void do_class(klassOop k) {
+    nclasses++;
+    class_size += k->size();
+    if (k->klass_part()->oop_is_instance()) {
+      instanceKlass* ik = (instanceKlass*)k->klass_part();
+      class_size += ik->methods()->size();
+      class_size += ik->constants()->size();
+      class_size += ik->local_interfaces()->size();
+      class_size += ik->transitive_interfaces()->size();
+      // We do not have to count implementors, since we only store one!      
+      class_size += ik->fields()->size();
+    }
+  }
+
+  static void do_method(methodOop m) {
+    nmethods++;
+    method_size += m->size();
+    // class loader uses same objArray for empty vectors, so don't count these
+    if (m->exception_table()->length() != 0)   method_size += m->exception_table()->size();
+    if (m->has_stackmap_table()) {
+      method_size += m->stackmap_data()->size();
+    }
+
+    methodDataOop mdo = m->method_data();
+    if (mdo != NULL) {
+      nmethoddata++;
+      methoddata_size += mdo->size();
+    }
+  }
+
+ public:
+  static void print() {
+    SystemDictionary::classes_do(do_class);
+    SystemDictionary::methods_do(do_method);
+    tty->print_cr("Class statistics:");
+    tty->print_cr("%d classes (%d bytes)", nclasses, class_size * oopSize);
+    tty->print_cr("%d methods (%d bytes = %d base + %d debug info)", nmethods, 
+                  (method_size + debug_size) * oopSize, method_size * oopSize, debug_size * oopSize);
+    tty->print_cr("%d methoddata (%d bytes)", nmethoddata, methoddata_size * oopSize);
+  }
+};
+
+
+int ClassStatistics::nclasses        = 0;  
+int ClassStatistics::nmethods        = 0;
+int ClassStatistics::nmethoddata     = 0;
+int ClassStatistics::class_size      = 0;
+int ClassStatistics::method_size     = 0; 
+int ClassStatistics::debug_size      = 0;
+int ClassStatistics::methoddata_size = 0;
+
+void SystemDictionary::print_class_statistics() {
+  ResourceMark rm;
+  ClassStatistics::print();
+}
+
+
+class MethodStatistics: AllStatic {
+ public:
+  enum {
+    max_parameter_size = 10
+  };
+ private:
+
+  static int _number_of_methods;
+  static int _number_of_final_methods;
+  static int _number_of_static_methods;
+  static int _number_of_native_methods;
+  static int _number_of_synchronized_methods;
+  static int _number_of_profiled_methods;
+  static int _number_of_bytecodes;
+  static int _parameter_size_profile[max_parameter_size];
+  static int _bytecodes_profile[Bytecodes::number_of_java_codes];
+
+  static void initialize() {
+    _number_of_methods        = 0;
+    _number_of_final_methods  = 0;
+    _number_of_static_methods = 0;
+    _number_of_native_methods = 0;
+    _number_of_synchronized_methods = 0;
+    _number_of_profiled_methods = 0;
+    _number_of_bytecodes      = 0;
+    for (int i = 0; i < max_parameter_size             ; i++) _parameter_size_profile[i] = 0;
+    for (int j = 0; j < Bytecodes::number_of_java_codes; j++) _bytecodes_profile     [j] = 0;
+  };
+
+  static void do_method(methodOop m) {
+    _number_of_methods++;
+    // collect flag info
+    if (m->is_final()       ) _number_of_final_methods++;
+    if (m->is_static()      ) _number_of_static_methods++;
+    if (m->is_native()      ) _number_of_native_methods++;
+    if (m->is_synchronized()) _number_of_synchronized_methods++;
+    if (m->method_data() != NULL) _number_of_profiled_methods++;
+    // collect parameter size info (add one for receiver, if any)
+    _parameter_size_profile[MIN2(m->size_of_parameters() + (m->is_static() ? 0 : 1), max_parameter_size - 1)]++;
+    // collect bytecodes info
+    { 
+      Thread *thread = Thread::current();
+      HandleMark hm(thread);
+      BytecodeStream s(methodHandle(thread, m));
+      Bytecodes::Code c;
+      while ((c = s.next()) >= 0) {
+        _number_of_bytecodes++;
+        _bytecodes_profile[c]++;
+      }
+    }
+  }
+
+ public:
+  static void print() {
+    initialize();
+    SystemDictionary::methods_do(do_method);
+    // generate output
+    tty->cr();
+    tty->print_cr("Method statistics (static):");
+    // flag distribution
+    tty->cr();
+    tty->print_cr("%6d final        methods  %6.1f%%", _number_of_final_methods       , _number_of_final_methods        * 100.0F / _number_of_methods);
+    tty->print_cr("%6d static       methods  %6.1f%%", _number_of_static_methods      , _number_of_static_methods       * 100.0F / _number_of_methods);
+    tty->print_cr("%6d native       methods  %6.1f%%", _number_of_native_methods      , _number_of_native_methods       * 100.0F / _number_of_methods);
+    tty->print_cr("%6d synchronized methods  %6.1f%%", _number_of_synchronized_methods, _number_of_synchronized_methods * 100.0F / _number_of_methods);
+    tty->print_cr("%6d profiled     methods  %6.1f%%", _number_of_profiled_methods, _number_of_profiled_methods * 100.0F / _number_of_methods);
+    // parameter size profile
+    tty->cr();
+    { int tot = 0;
+      int avg = 0;
+      for (int i = 0; i < max_parameter_size; i++) {
+        int n = _parameter_size_profile[i];
+        tot += n;
+        avg += n*i;
+        tty->print_cr("parameter size = %1d: %6d methods  %5.1f%%", i, n, n * 100.0F / _number_of_methods);
+      }
+      assert(tot == _number_of_methods, "should be the same");
+      tty->print_cr("                    %6d methods  100.0%%", _number_of_methods);
+      tty->print_cr("(average parameter size = %3.1f including receiver, if any)", (float)avg / _number_of_methods);
+    }
+    // bytecodes profile
+    tty->cr();
+    { int tot = 0;
+      for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
+        if (Bytecodes::is_defined(i)) {
+          Bytecodes::Code c = Bytecodes::cast(i);
+          int n = _bytecodes_profile[c];
+          tot += n;
+          tty->print_cr("%9d  %7.3f%%  %s", n, n * 100.0F / _number_of_bytecodes, Bytecodes::name(c));
+        }
+      }
+      assert(tot == _number_of_bytecodes, "should be the same");
+      tty->print_cr("%9d  100.000%%", _number_of_bytecodes);
+    }
+    tty->cr();
+  }
+};
+
+int MethodStatistics::_number_of_methods;
+int MethodStatistics::_number_of_final_methods;
+int MethodStatistics::_number_of_static_methods;
+int MethodStatistics::_number_of_native_methods;
+int MethodStatistics::_number_of_synchronized_methods;
+int MethodStatistics::_number_of_profiled_methods;
+int MethodStatistics::_number_of_bytecodes;
+int MethodStatistics::_parameter_size_profile[MethodStatistics::max_parameter_size];
+int MethodStatistics::_bytecodes_profile[Bytecodes::number_of_java_codes];
+
+
+void SystemDictionary::print_method_statistics() {
+  MethodStatistics::print();
+}
+
+#endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,610 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)systemDictionary.hpp	1.153 07/05/05 17:05:56 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// The system dictionary stores all loaded classes and maps:
+//
+//   [class name,class loader] -> class   i.e.  [symbolOop,oop] -> klassOop
+//
+// Classes are loaded lazily. The default VM class loader is
+// represented as NULL.
+
+// The underlying data structure is an open hash table with a fixed number
+// of buckets. During loading the loader object is locked, (for the VM loader 
+// a private lock object is used). Class loading can thus be done concurrently,
+// but only by different loaders.
+//
+// During loading a placeholder (name, loader) is temporarily placed in
+// a side data structure, and is used to detect ClassCircularityErrors
+// and to perform verification during GC.  A GC can occur in the midst
+// of class loading, as we call out to Java, have to take locks, etc.
+//
+// When class loading is finished, a new entry is added to the system
+// dictionary and the place holder is removed. Note that the protection
+// domain field of the system dictionary has not yet been filled in when
+// the "real" system dictionary entry is created.
+//
+// Clients of this class who are interested in finding if a class has
+// been completely loaded -- not classes in the process of being loaded --
+// can read the SystemDictionary unlocked. This is safe because
+//    - entries are only deleted at safepoints  
+//    - readers cannot come to a safepoint while actively examining
+//         an entry  (an entry cannot be deleted from under a reader) 
+//    - entries must be fully formed before they are available to concurrent
+//         readers (we must ensure write ordering)
+//
+// Note that placeholders are deleted at any time, as they are removed
+// when a class is completely loaded. Therefore, readers as well as writers
+// of placeholders must hold the SystemDictionary_lock.
+// 
+
+class Dictionary;
+class PlaceholderTable;
+class LoaderConstraintTable;
+class HashtableBucket;
+class ResolutionErrorTable;
+
+class SystemDictionary : AllStatic {
+  friend class VMStructs;
+  friend class CompactingPermGenGen;
+  NOT_PRODUCT(friend class instanceKlassKlass;)
+
+ public:
+  // Returns a class with a given class name and class loader.  Loads the
+  // class if needed. If not found a NoClassDefFoundError or a
+  // ClassNotFoundException is thrown, depending on the value on the
+  // throw_error flag.  For most uses the throw_error argument should be set
+  // to true.
+
+  static klassOop resolve_or_fail(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS);
+  // Convenient call for null loader and protection domain.
+  static klassOop resolve_or_fail(symbolHandle class_name, bool throw_error, TRAPS);
+private:
+  // handle error translation for resolve_or_null results
+  static klassOop handle_resolution_exception(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS);
+
+public:
+
+  // Returns a class with a given class name and class loader.
+  // Loads the class if needed. If not found NULL is returned.
+  static klassOop resolve_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
+  // Version with null loader and protection domain
+  static klassOop resolve_or_null(symbolHandle class_name, TRAPS);
+
+  // Resolve a superclass or superinterface. Called from ClassFileParser, 
+  // parse_interfaces, resolve_instance_class_or_null, load_shared_class
+  // "child_name" is the class whose super class or interface is being resolved.
+  static klassOop resolve_super_or_fail(symbolHandle child_name,
+                                        symbolHandle class_name,
+                                        Handle class_loader,
+                                        Handle protection_domain,
+                                        bool is_superclass,
+                                        TRAPS);
+
+  // Parse new stream. This won't update the system dictionary or
+  // class hierarchy, simply parse the stream. Used by JVMTI RedefineClasses.
+  static klassOop parse_stream(symbolHandle class_name,
+                               Handle class_loader,
+                               Handle protection_domain,
+                               ClassFileStream* st,
+                               TRAPS);
+                               
+  // Resolve from stream (called by jni_DefineClass and JVM_DefineClass)
+  static klassOop resolve_from_stream(symbolHandle class_name, Handle class_loader, Handle protection_domain, ClassFileStream* st, TRAPS);
+  
+  // Lookup an already loaded class. If not found NULL is returned.
+  static klassOop find(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
+
+  // Lookup an already loaded instance or array class.
+  // Do not make any queries to class loaders; consult only the cache.
+  // If not found NULL is returned.
+  static klassOop find_instance_or_array_klass(symbolHandle class_name,
+					       Handle class_loader,
+					       Handle protection_domain,
+					       TRAPS);
+
+  // Lookup an instance or array class that has already been loaded
+  // either into the given class loader, or else into another class
+  // loader that is constrained (via loader constraints) to produce
+  // a consistent class.  Do not take protection domains into account.
+  // Do not make any queries to class loaders; consult only the cache.
+  // Return NULL if the class is not found.
+  //
+  // This function is a strict superset of find_instance_or_array_klass.
+  // This function (the unchecked version) makes a conservative prediction
+  // of the result of the checked version, assuming successful lookup.
+  // If both functions return non-null, they must return the same value.
+  // Also, the unchecked version may sometimes be non-null where the
+  // checked version is null.  This can occur in several ways:
+  //   1. No query has yet been made to the class loader.
+  //   2. The class loader was queried, but chose not to delegate.
+  //   3. ClassLoader.checkPackageAccess rejected a proposed protection domain.
+  //   4. Loading was attempted, but there was a linkage error of some sort.
+  // In all of these cases, the loader constraints on this type are
+  // satisfied, and it is safe for classes in the given class loader
+  // to manipulate strongly-typed values of the found class, subject
+  // to local linkage and access checks.
+  static klassOop find_constrained_instance_or_array_klass(symbolHandle class_name,
+                                                           Handle class_loader,
+                                                           TRAPS);
+  
+  // Iterate over all klasses in dictionary
+  //   Just the classes from defining class loaders
+  static void classes_do(void f(klassOop));
+  // Added for initialize_itable_for_klass to handle exceptions
+  static void classes_do(void f(klassOop, TRAPS), TRAPS);
+  //   All classes, and their class loaders
+  static void classes_do(void f(klassOop, oop));
+  //   All classes, and their class loaders
+  //   (added for helpers that use HandleMarks and ResourceMarks)
+  static void classes_do(void f(klassOop, oop, TRAPS), TRAPS);
+  // All entries in the placeholder table and their class loaders
+  static void placeholders_do(void f(symbolOop, oop));
+
+  // Iterate over all methods in all klasses in dictionary
+  static void methods_do(void f(methodOop));
+
+  // Garbage collection support
+
+  // This method applies "blk->do_oop" to all the pointers to "system"
+  // classes and loaders.
+  static void always_strong_oops_do(OopClosure* blk);
+  static void always_strong_classes_do(OopClosure* blk);
+  // This method applies "blk->do_oop" to all the placeholders.
+  static void placeholders_do(OopClosure* blk);
+
+  // Unload (that is, break root links to) all unmarked classes and
+  // loaders.  Returns "true" iff something was unloaded.
+  static bool do_unloading(BoolObjectClosure* is_alive);
+
+  // Applies "f->do_oop" to all root oops in the system dictionary.
+  static void oops_do(OopClosure* f);
+
+  // Applies "f->do_oop" to root oops that are loaded from a shared heap.
+  static void shared_oops_do(OopClosure* f);
+
+  // System loader lock
+  static oop system_loader_lock()	    { return _system_loader_lock_obj; }
+
+private:
+  //    Traverses preloaded oops: various system classes.  These are
+  //    guaranteed to be in the perm gen.
+  static void preloaded_oops_do(OopClosure* f);
+  static void lazily_loaded_oops_do(OopClosure* f);
+
+public:
+  // Sharing support.
+  static void reorder_dictionary();
+  static void copy_buckets(char** top, char* end);
+  static void copy_table(char** top, char* end);
+  static void reverse();
+  static void set_shared_dictionary(HashtableBucket* t, int length,
+                                    int number_of_entries);
+  // Printing
+  static void print()                   PRODUCT_RETURN;
+  static void print_class_statistics()  PRODUCT_RETURN;
+  static void print_method_statistics() PRODUCT_RETURN;
+
+  // Number of contained klasses
+  // This is both fully loaded classes and classes in the process
+  // of being loaded
+  static int number_of_classes();
+
+  // Monotonically increasing counter which grows as classes are
+  // loaded or modifications such as hot-swapping or setting/removing
+  // of breakpoints are performed
+  static inline int number_of_modifications()     { assert_locked_or_safepoint(Compile_lock); return _number_of_modifications; }
+  // Needed by evolution and breakpoint code
+  static inline void notice_modification()        { assert_locked_or_safepoint(Compile_lock); ++_number_of_modifications;      }
+
+  // Verification
+  static void verify();
+
+#ifdef ASSERT
+  static bool is_internal_format(symbolHandle class_name);
+#endif
+
+  // Verify class is in dictionary
+  static void verify_obj_klass_present(Handle obj,
+                                       symbolHandle class_name,
+                                       Handle class_loader);
+
+  // Initialization
+  static void initialize(TRAPS);
+
+  // Fast access to commonly used classes (preloaded)
+  static klassOop check_klass(klassOop k) {
+    assert(k != NULL, "preloaded klass not initialized"); 
+    return k;
+  }
+
+public:
+  static klassOop object_klass()            { return check_klass(_object_klass); }
+  static klassOop string_klass()            { return check_klass(_string_klass); }
+  static klassOop class_klass()             { return check_klass(_class_klass); }
+  static klassOop cloneable_klass()         { return check_klass(_cloneable_klass); }
+  static klassOop classloader_klass()       { return check_klass(_classloader_klass); }
+  static klassOop serializable_klass()      { return check_klass(_serializable_klass); }
+  static klassOop system_klass()            { return check_klass(_system_klass); }
+
+  static klassOop throwable_klass()         { return check_klass(_throwable_klass); }
+  static klassOop error_klass()             { return check_klass(_error_klass); }
+  static klassOop threaddeath_klass()       { return check_klass(_threaddeath_klass); }
+  static klassOop exception_klass()         { return check_klass(_exception_klass); }
+  static klassOop runtime_exception_klass() { return check_klass(_runtime_exception_klass); }
+  static klassOop classNotFoundException_klass() { return check_klass(_classNotFoundException_klass); }
+  static klassOop noClassDefFoundError_klass()   { return check_klass(_noClassDefFoundError_klass); }
+  static klassOop linkageError_klass()       { return check_klass(_linkageError_klass); }
+  static klassOop ClassCastException_klass() { return check_klass(_classCastException_klass); }
+  static klassOop ArrayStoreException_klass() { return check_klass(_arrayStoreException_klass); }
+  static klassOop virtualMachineError_klass()  { return check_klass(_virtualMachineError_klass); }
+  static klassOop OutOfMemoryError_klass()  { return check_klass(_outOfMemoryError_klass); }
+  static klassOop StackOverflowError_klass() { return check_klass(_StackOverflowError_klass); }
+  static klassOop IllegalMonitorStateException_klass() { return check_klass(_illegalMonitorStateException_klass); }
+  static klassOop protectionDomain_klass()  { return check_klass(_protectionDomain_klass); }
+  static klassOop AccessControlContext_klass() { return check_klass(_AccessControlContext_klass); }
+  static klassOop reference_klass()         { return check_klass(_reference_klass); }
+  static klassOop soft_reference_klass()    { return check_klass(_soft_reference_klass); }
+  static klassOop weak_reference_klass()    { return check_klass(_weak_reference_klass); }
+  static klassOop final_reference_klass()   { return check_klass(_final_reference_klass); }
+  static klassOop phantom_reference_klass() { return check_klass(_phantom_reference_klass); }
+  static klassOop finalizer_klass()         { return check_klass(_finalizer_klass); }
+  
+  static klassOop thread_klass()            { return check_klass(_thread_klass); }
+  static klassOop threadGroup_klass()       { return check_klass(_threadGroup_klass); }
+  static klassOop properties_klass()        { return check_klass(_properties_klass); }  
+  static klassOop reflect_accessible_object_klass() { return check_klass(_reflect_accessible_object_klass); }
+  static klassOop reflect_field_klass()     { return check_klass(_reflect_field_klass); }
+  static klassOop reflect_method_klass()    { return check_klass(_reflect_method_klass); }
+  static klassOop reflect_constructor_klass() { return check_klass(_reflect_constructor_klass); }
+  static klassOop reflect_method_accessor_klass() { 
+    assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only");
+    return check_klass(_reflect_method_accessor_klass);
+  }
+  static klassOop reflect_constructor_accessor_klass() {
+    assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only");
+    return check_klass(_reflect_constructor_accessor_klass);
+  }
+  // NOTE: needed too early in bootstrapping process to have checks based on JDK version
+  static klassOop reflect_magic_klass()     { return _reflect_magic_klass; }
+  static klassOop reflect_delegating_classloader_klass() { return _reflect_delegating_classloader_klass; }
+  static klassOop reflect_constant_pool_klass() {
+    assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only");
+    return _reflect_constant_pool_klass;
+  }
+  static klassOop reflect_unsafe_static_field_accessor_impl_klass() {
+    assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only");
+    return _reflect_unsafe_static_field_accessor_impl_klass;
+  }
+
+  static klassOop vector_klass()            { return check_klass(_vector_klass); }
+  static klassOop hashtable_klass()         { return check_klass(_hashtable_klass); }
+  static klassOop stringBuffer_klass()      { return check_klass(_stringBuffer_klass); }
+  static klassOop stackTraceElement_klass() { return check_klass(_stackTraceElement_klass); }
+
+  static klassOop java_nio_Buffer_klass()   { return check_klass(_java_nio_Buffer_klass); }
+
+  static klassOop sun_misc_AtomicLongCSImpl_klass() { return _sun_misc_AtomicLongCSImpl_klass; }
+
+  static klassOop boolean_klass()           { return check_klass(_boolean_klass); }
+  static klassOop char_klass()              { return check_klass(_char_klass); }
+  static klassOop float_klass()             { return check_klass(_float_klass); }
+  static klassOop double_klass()            { return check_klass(_double_klass); }
+  static klassOop byte_klass()              { return check_klass(_byte_klass); }
+  static klassOop short_klass()             { return check_klass(_short_klass); }
+  static klassOop int_klass()               { return check_klass(_int_klass); }
+  static klassOop long_klass()              { return check_klass(_long_klass); } 
+
+  static klassOop box_klass(BasicType t) {
+    assert((uint)t < T_VOID+1, "range check");
+    return check_klass(_box_klasses[t]);
+  }
+  static BasicType box_klass_type(klassOop k);  // inverse of box_klass
+
+  // methods returning lazily loaded klasses
+  // The corresponding method to load the class must be called before calling them.
+  static klassOop abstract_ownable_synchronizer_klass() { return check_klass(_abstract_ownable_synchronizer_klass); }
+
+  static void load_abstract_ownable_synchronizer_klass(TRAPS);
+
+private:
+  // Tells whether ClassLoader.loadClassInternal is present
+  static bool has_loadClassInternal()       { return _has_loadClassInternal; }
+
+public:
+  // Tells whether ClassLoader.checkPackageAccess is present
+  static bool has_checkPackageAccess()      { return _has_checkPackageAccess; }
+
+  static bool class_klass_loaded()          { return _class_klass != NULL; }
+  static bool cloneable_klass_loaded()      { return _cloneable_klass != NULL; }
+  
+  // Returns default system loader
+  static oop java_system_loader();
+
+  // Compute the default system loader
+  static void compute_java_system_loader(TRAPS);
+
+private:
+  // Mirrors for primitive classes (created eagerly)
+  static oop check_mirror(oop m) {
+    assert(m != NULL, "mirror not initialized"); 
+    return m;
+  }
+
+public:
+  static oop int_mirror()                   { return check_mirror(_int_mirror); }
+  static oop float_mirror()                 { return check_mirror(_float_mirror); }
+  static oop double_mirror()                { return check_mirror(_double_mirror); }
+  static oop byte_mirror()                  { return check_mirror(_byte_mirror); }
+  static oop bool_mirror()                  { return check_mirror(_bool_mirror); }
+  static oop char_mirror()                  { return check_mirror(_char_mirror); }
+  static oop long_mirror()                  { return check_mirror(_long_mirror); }
+  static oop short_mirror()                 { return check_mirror(_short_mirror); }
+  static oop void_mirror()                  { return check_mirror(_void_mirror); }
+
+  static oop java_mirror(BasicType t) {
+    assert((uint)t < T_VOID+1, "range check");
+    return check_mirror(_mirrors[t]);
+  }
+  // Note:  java_lang_Class::primitive_type is the inverse of java_mirror
+
+  // Check class loader constraints
+  static bool add_loader_constraint(symbolHandle name, Handle loader1,
+                                    Handle loader2, TRAPS);
+  static char* check_signature_loaders(symbolHandle signature, Handle loader1,
+				       Handle loader2, bool is_method, TRAPS);
+
+  // Utility for printing loader "name" as part of tracing constraints
+  static const char* loader_name(oop loader) {
+    return ((loader) == NULL ? "<bootloader>" : 
+	    instanceKlass::cast((loader)->klass())->name()->as_C_string() );
+  }
+
+  // Record the error when the first attempt to resolve a reference from a constant
+  // pool entry to a class fails.
+  static void add_resolution_error(constantPoolHandle pool, int which, symbolHandle error);
+  static symbolOop find_resolution_error(constantPoolHandle pool, int which);
+
+ private:
+
+  enum Constants {
+    _loader_constraint_size = 107,                     // number of entries in constraint table
+    _resolution_error_size  = 107,		       // number of entries in resolution error table
+    _nof_buckets            = 1009                     // number of buckets in hash table
+  };
+
+
+  // Static variables
+
+  // Hashtable holding loaded classes.
+  static Dictionary*            _dictionary;
+
+  // Hashtable holding placeholders for classes being loaded.
+  static PlaceholderTable*       _placeholders;
+
+  // Hashtable holding classes from the shared archive.
+  static Dictionary*             _shared_dictionary;
+
+  // Monotonically increasing counter which grows with
+  // _number_of_classes as well as hot-swapping and breakpoint setting
+  // and removal.
+  static int                     _number_of_modifications;
+
+  // Lock object for system class loader
+  static oop                     _system_loader_lock_obj;
+
+  // Constraints on class loaders
+  static LoaderConstraintTable*  _loader_constraints;
+
+  // Resolution errors
+  static ResolutionErrorTable*	 _resolution_errors;
+
+public:
+  // for VM_CounterDecay iteration support
+  friend class CounterDecay;
+  static klassOop try_get_next_class();
+
+private:
+  static void validate_protection_domain(instanceKlassHandle klass,
+                                         Handle class_loader,
+                                         Handle protection_domain, TRAPS);
+
+  friend class VM_PopulateDumpSharedSpace;
+  friend class TraversePlaceholdersClosure;
+  static Dictionary*         dictionary() { return _dictionary; }
+  static Dictionary*         shared_dictionary() { return _shared_dictionary; }
+  static PlaceholderTable*   placeholders() { return _placeholders; }
+  static LoaderConstraintTable* constraints() { return _loader_constraints; }
+  static ResolutionErrorTable* resolution_errors() { return _resolution_errors; }
+
+  // Basic loading operations
+  static klassOop resolve_instance_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
+  static klassOop resolve_array_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
+  static instanceKlassHandle handle_parallel_super_load(symbolHandle class_name, symbolHandle supername, Handle class_loader, Handle protection_domain, Handle lockObject, TRAPS);
+  // Wait on SystemDictionary_lock; unlocks lockObject before 
+  // waiting; relocks lockObject with correct recursion count
+  // after waiting, but before reentering SystemDictionary_lock
+  // to preserve lock order semantics.
+  static void double_lock_wait(Handle lockObject, TRAPS);
+  static void define_instance_class(instanceKlassHandle k, TRAPS);
+  static instanceKlassHandle find_or_define_instance_class(symbolHandle class_name, 
+                                                Handle class_loader, 
+                                                instanceKlassHandle k, TRAPS);
+  static instanceKlassHandle load_shared_class(symbolHandle class_name,
+                                               Handle class_loader, TRAPS);
+  static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
+                                               Handle class_loader, TRAPS);
+  static instanceKlassHandle load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS);
+  static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
+  static void check_loader_lock_contention(Handle loader_lock, TRAPS);
+
+  static klassOop find_shared_class(symbolHandle class_name);
+
+  // Setup link to hierarchy
+  static void add_to_hierarchy(instanceKlassHandle k, TRAPS);  
+ 
+private:
+  // We pass in the hashtable index so we can calculate it outside of
+  // the SystemDictionary_lock.   
+
+  // Basic find on loaded classes 
+  static klassOop find_class(int index, unsigned int hash,
+                             symbolHandle name, Handle loader);
+
+  // Basic find on classes in the midst of being loaded
+  static symbolOop find_placeholder(int index, unsigned int hash,
+                                    symbolHandle name, Handle loader);
+
+  // Basic find operation of loaded classes and classes in the midst
+  // of loading;  used for assertions and verification only.
+  static oop find_class_or_placeholder(symbolHandle class_name,
+                                       Handle class_loader);
+
+  // Updating entry in dictionary
+  // Add a completely loaded class 
+  static void add_klass(int index, symbolHandle class_name,
+                        Handle class_loader, KlassHandle obj);
+
+  // Add a placeholder for a class being loaded
+  static void add_placeholder(int index, 
+                              symbolHandle class_name, 
+                              Handle class_loader);
+  static void remove_placeholder(int index,
+                                 symbolHandle class_name, 
+                                 Handle class_loader);
+
+  // Performs cleanups after resolve_super_or_fail. This typically needs
+  // to be called on failure.
+  // Won't throw, but can block.
+  static void resolution_cleanups(symbolHandle class_name,
+                                  Handle class_loader,
+                                  TRAPS);
+  
+  // Initialization
+  static void initialize_preloaded_classes(TRAPS);
+  static void initialize_basic_type_mirrors(TRAPS);
+    
+  // Class loader constraints
+  static void check_constraints(int index, unsigned int hash,
+                                instanceKlassHandle k, Handle loader, 
+                                bool defining, TRAPS);
+  static void update_dictionary(int d_index, unsigned int d_hash,
+                                int p_index, unsigned int p_hash,
+                                instanceKlassHandle k, Handle loader, TRAPS);
+
+  // Variables holding commonly used klasses (preloaded)
+  static klassOop _object_klass;
+  static klassOop _string_klass;
+  static klassOop _class_klass;
+  static klassOop _cloneable_klass;
+  static klassOop _classloader_klass;
+  static klassOop _serializable_klass;
+  static klassOop _system_klass;
+  
+  static klassOop _throwable_klass;
+  static klassOop _error_klass;
+  static klassOop _threaddeath_klass;
+  static klassOop _exception_klass;
+  static klassOop _runtime_exception_klass;
+  static klassOop _classNotFoundException_klass;
+  static klassOop _noClassDefFoundError_klass;
+  static klassOop _linkageError_klass;
+  static klassOop _classCastException_klass;
+  static klassOop _arrayStoreException_klass;
+  static klassOop _virtualMachineError_klass;
+  static klassOop _outOfMemoryError_klass;
+  static klassOop _StackOverflowError_klass;
+  static klassOop _illegalMonitorStateException_klass;
+  static klassOop _protectionDomain_klass;
+  static klassOop _AccessControlContext_klass;
+  static klassOop _reference_klass;
+  static klassOop _soft_reference_klass;
+  static klassOop _weak_reference_klass;
+  static klassOop _final_reference_klass;
+  static klassOop _phantom_reference_klass;
+  static klassOop _finalizer_klass;
+
+  static klassOop _thread_klass;
+  static klassOop _threadGroup_klass;
+  static klassOop _properties_klass;      
+  static klassOop _reflect_accessible_object_klass;
+  static klassOop _reflect_field_klass;
+  static klassOop _reflect_method_klass;
+  static klassOop _reflect_constructor_klass;
+  // 1.4 reflection implementation
+  static klassOop _reflect_magic_klass;
+  static klassOop _reflect_method_accessor_klass;
+  static klassOop _reflect_constructor_accessor_klass;
+  static klassOop _reflect_delegating_classloader_klass;
+  // 1.5 annotations implementation
+  static klassOop _reflect_constant_pool_klass;
+  static klassOop _reflect_unsafe_static_field_accessor_impl_klass;
+
+  static klassOop _stringBuffer_klass;
+  static klassOop _vector_klass;
+  static klassOop _hashtable_klass;
+
+  static klassOop _stackTraceElement_klass;
+
+  static klassOop _java_nio_Buffer_klass;
+
+  static klassOop _sun_misc_AtomicLongCSImpl_klass;
+
+  // Lazily loaded klasses
+  static volatile klassOop _abstract_ownable_synchronizer_klass;
+
+  // Box klasses
+  static klassOop _boolean_klass;
+  static klassOop _char_klass;
+  static klassOop _float_klass;
+  static klassOop _double_klass;
+  static klassOop _byte_klass;
+  static klassOop _short_klass;
+  static klassOop _int_klass;
+  static klassOop _long_klass;
+
+  // table of same
+  static klassOop _box_klasses[T_VOID+1];
+
+  static oop  _java_system_loader;
+
+  static bool _has_loadClassInternal;
+  static bool _has_checkPackageAccess;
+
+  // Primitive classes
+  static oop _int_mirror;
+  static oop _float_mirror;
+  static oop _double_mirror;
+  static oop _byte_mirror;
+  static oop _bool_mirror;
+  static oop _char_mirror;
+  static oop _long_mirror;
+  static oop _short_mirror;
+  static oop _void_mirror;
+
+  // table of same
+  static oop _mirrors[T_VOID+1];
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/verificationType.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,139 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)verificationType.cpp	1.16 07/05/05 17:07:01 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_verificationType.cpp.incl"
+
+VerificationType VerificationType::from_tag(u1 tag) {
+  switch (tag) {
+    case ITEM_Top:     return bogus_type();
+    case ITEM_Integer: return integer_type();
+    case ITEM_Float:   return float_type();
+    case ITEM_Double:  return double_type();
+    case ITEM_Long:    return long_type();
+    case ITEM_Null:    return null_type();
+    default:
+      ShouldNotReachHere();
+      return bogus_type();
+  }
+}
+
+bool VerificationType::is_reference_assignable_from(
+    const VerificationType& from, instanceKlassHandle context, TRAPS) const {
+  if (from.is_null()) {
+    // null is assignable to any reference
+    return true;
+  } else if (is_null()) {
+    return false;
+  } else if (name() == from.name()) {
+    return true;
+  } else if (is_object()) {
+    // We need check the class hierarchy to check assignability
+    if (name() == vmSymbols::java_lang_Object()) {
+      // any object or array is assignable to java.lang.Object 
+      return true;
+    }
+    klassOop this_class = SystemDictionary::resolve_or_fail(
+        name_handle(), Handle(THREAD, context->class_loader()), 
+        Handle(THREAD, context->protection_domain()), true, CHECK_false);
+    if (this_class->klass_part()->is_interface()) {
+      // We treat interfaces as java.lang.Object, including 
+      // java.lang.Cloneable and java.io.Serializable
+      return true;
+    } else if (from.is_object()) {
+      klassOop from_class = SystemDictionary::resolve_or_fail(
+          from.name_handle(), Handle(THREAD, context->class_loader()), 
+          Handle(THREAD, context->protection_domain()), true, CHECK_false);
+      return instanceKlass::cast(from_class)->is_subclass_of(this_class);
+    }
+  } else if (is_array() && from.is_array()) {
+    VerificationType comp_this = get_component(CHECK_false);
+    VerificationType comp_from = from.get_component(CHECK_false);
+    return comp_this.is_assignable_from(comp_from, context, CHECK_false);
+  }
+  return false;
+}
+
+VerificationType VerificationType::get_component(TRAPS) const {
+  assert(is_array() && name()->utf8_length() >= 2, "Must be a valid array");
+  symbolOop component;
+  switch (name()->byte_at(1)) {
+    case 'Z': return VerificationType(Boolean);
+    case 'B': return VerificationType(Byte);
+    case 'C': return VerificationType(Char);
+    case 'S': return VerificationType(Short);
+    case 'I': return VerificationType(Integer);
+    case 'J': return VerificationType(Long);
+    case 'F': return VerificationType(Float);
+    case 'D': return VerificationType(Double);
+    case '[': 
+      component = SymbolTable::lookup(
+        name(), 1, name()->utf8_length(), 
+        CHECK_(VerificationType::bogus_type()));
+      return VerificationType::reference_type(component);
+    case 'L': 
+      component = SymbolTable::lookup(
+        name(), 2, name()->utf8_length() - 1, 
+        CHECK_(VerificationType::bogus_type()));
+      return VerificationType::reference_type(component);
+    default:
+      ShouldNotReachHere();
+      return VerificationType::bogus_type();
+  }
+}
+
+#ifndef PRODUCT
+
+void VerificationType::print_on(outputStream* st) const {
+  switch (_u._data) {
+    case Bogus:            st->print(" bogus "); break;
+    case Category1:        st->print(" category1 "); break;
+    case Category2:        st->print(" category2 "); break;
+    case Category2_2nd:    st->print(" category2_2nd "); break;
+    case Boolean:          st->print(" boolean "); break;
+    case Byte:             st->print(" byte "); break;
+    case Short:            st->print(" short "); break;
+    case Char:             st->print(" char "); break;
+    case Integer:          st->print(" integer "); break;
+    case Float:            st->print(" float "); break;
+    case Long:             st->print(" long "); break;
+    case Double:           st->print(" double "); break;
+    case Long_2nd:         st->print(" long_2nd "); break;
+    case Double_2nd:       st->print(" double_2nd "); break;
+    case Null:             st->print(" null "); break;
+    default:
+      if (is_uninitialized_this()) {
+        st->print(" uninitializedThis "); 
+      } else if (is_uninitialized()) {
+        st->print(" uninitialized %d ", bci()); 
+      } else {
+        st->print(" class %s ", name()->as_klass_external_name());
+      }
+  }
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/verificationType.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,308 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)verificationType.hpp	1.17 07/05/05 17:07:01 JVM"
+#endif
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+enum {
+  // As specifed in the JVM spec
+  ITEM_Top = 0, 
+  ITEM_Integer = 1, 
+  ITEM_Float = 2, 
+  ITEM_Double = 3, 
+  ITEM_Long = 4, 
+  ITEM_Null = 5, 
+  ITEM_UninitializedThis = 6,
+  ITEM_Object = 7,
+  ITEM_Uninitialized = 8,
+  ITEM_Bogus = (uint)-1  
+};
+
+class VerificationType VALUE_OBJ_CLASS_SPEC {
+  private:
+    // Least significant bits of _handle are always 0, so we use these as 
+    // the indicator that the _handle is valid.  Otherwise, the _data field
+    // contains encoded data (as specified below).  Should the VM change 
+    // and the lower bits on oops aren't 0, the assert in the constructor
+    // will catch this and we'll have to add a descriminator tag to this 
+    // structure.
+    union {
+      symbolOop* _handle;
+      uintptr_t _data; 
+    } _u;
+
+    enum {
+      // These rest are not found in classfiles, but used by the verifier
+      ITEM_Boolean = 9, ITEM_Byte, ITEM_Short, ITEM_Char,
+      ITEM_Long_2nd, ITEM_Double_2nd
+    };
+
+    // Enum for the _data field
+    enum { 
+      // Bottom two bits determine if the type is a reference, primitive, 
+      // uninitialized or a query-type.
+      TypeMask           = 0x00000003, 
+
+      // Topmost types encoding
+      Reference          = 0x0,        // _handle contains the name
+      Primitive          = 0x1,        // see below for primitive list
+      Uninitialized      = 0x2,        // 0x00ffff00 contains bci
+      TypeQuery          = 0x3,        // Meta-types used for category testing
+
+      // Utility flags 
+      ReferenceFlag      = 0x00,       // For reference query types
+      Category1Flag      = 0x01,       // One-word values
+      Category2Flag      = 0x02,       // First word of a two-word value
+      Category2_2ndFlag  = 0x04,       // Second word of a two-word value
+
+      // special reference values
+      Null               = 0x00000000, // A reference with a 0 handle is null
+
+      // Primitives categories (the second byte determines the category)
+      Category1          = (Category1Flag     << 1 * BitsPerByte) | Primitive,
+      Category2          = (Category2Flag     << 1 * BitsPerByte) | Primitive,
+      Category2_2nd      = (Category2_2ndFlag << 1 * BitsPerByte) | Primitive,
+
+      // Primitive values (type descriminator stored in most-signifcant bytes)
+      Bogus              = (ITEM_Bogus      << 2 * BitsPerByte) | Category1,
+      Boolean            = (ITEM_Boolean    << 2 * BitsPerByte) | Category1,
+      Byte               = (ITEM_Byte       << 2 * BitsPerByte) | Category1,
+      Short              = (ITEM_Short      << 2 * BitsPerByte) | Category1,
+      Char               = (ITEM_Char       << 2 * BitsPerByte) | Category1,
+      Integer            = (ITEM_Integer    << 2 * BitsPerByte) | Category1,
+      Float              = (ITEM_Float      << 2 * BitsPerByte) | Category1,
+      Long               = (ITEM_Long       << 2 * BitsPerByte) | Category2,
+      Double             = (ITEM_Double     << 2 * BitsPerByte) | Category2,
+      Long_2nd           = (ITEM_Long_2nd   << 2 * BitsPerByte) | Category2_2nd,
+      Double_2nd         = (ITEM_Double_2nd << 2 * BitsPerByte) | Category2_2nd,
+
+      // Used by Uninitialized (second and third bytes hold the bci)
+      BciMask            = 0xffff << 1 * BitsPerByte,
+      BciForThis         = ((u2)-1),   // A bci of -1 is an Unintialized-This
+
+      // Query values
+      ReferenceQuery     = (ReferenceFlag     << 1 * BitsPerByte) | TypeQuery,
+      Category1Query     = (Category1Flag     << 1 * BitsPerByte) | TypeQuery,
+      Category2Query     = (Category2Flag     << 1 * BitsPerByte) | TypeQuery,
+      Category2_2ndQuery = (Category2_2ndFlag << 1 * BitsPerByte) | TypeQuery
+    };
+
+  VerificationType(uintptr_t raw_data) {
+    _u._data = raw_data;
+  }
+
+ public:
+
+  VerificationType() { *this = bogus_type(); }
+
+  // Create verification types
+  static VerificationType bogus_type() { return VerificationType(Bogus); }
+  static VerificationType null_type() { return VerificationType(Null); }
+  static VerificationType integer_type() { return VerificationType(Integer); }
+  static VerificationType float_type() { return VerificationType(Float); }
+  static VerificationType long_type() { return VerificationType(Long); }
+  static VerificationType long2_type() { return VerificationType(Long_2nd); }
+  static VerificationType double_type() { return VerificationType(Double); }
+  static VerificationType boolean_type() { return VerificationType(Boolean); }
+  static VerificationType byte_type() { return VerificationType(Byte); }
+  static VerificationType char_type() { return VerificationType(Char); }
+  static VerificationType short_type() { return VerificationType(Short); }
+  static VerificationType double2_type() 
+    { return VerificationType(Double_2nd); }
+
+  // "check" types are used for queries.  A "check" type is not assignable
+  // to anything, but the specified types are assignable to a "check".  For 
+  // example, any category1 primitive is assignable to category1_check and 
+  // any reference is assignable to reference_check.
+  static VerificationType reference_check() 
+    { return VerificationType(ReferenceQuery); }
+  static VerificationType category1_check() 
+    { return VerificationType(Category1Query); }
+  static VerificationType category2_check() 
+    { return VerificationType(Category2Query); }
+  static VerificationType category2_2nd_check() 
+    { return VerificationType(Category2_2ndQuery); }
+
+  // For reference types, store the actual oop* handle 
+  static VerificationType reference_type(symbolHandle sh) { 
+      assert(((uintptr_t)sh.raw_value() & 0x3) == 0, "Oops must be aligned");
+      // If the above assert fails in the future because oop* isn't aligned, 
+      // then this type encoding system will have to change to have a tag value
+      // to descriminate between oops and primitives.
+      return VerificationType((uintptr_t)((symbolOop*)sh.raw_value())); 
+  }
+  static VerificationType reference_type(symbolOop s, TRAPS) 
+    { return reference_type(symbolHandle(THREAD, s)); }
+
+  static VerificationType uninitialized_type(u2 bci) 
+    { return VerificationType(bci << 1 * BitsPerByte | Uninitialized); }
+  static VerificationType uninitialized_this_type() 
+    { return uninitialized_type(BciForThis); }
+
+  // Create based on u1 read from classfile
+  static VerificationType from_tag(u1 tag);
+
+  bool is_bogus() const     { return (_u._data == Bogus); }
+  bool is_null() const      { return (_u._data == Null); }
+  bool is_boolean() const   { return (_u._data == Boolean); }
+  bool is_byte() const      { return (_u._data == Byte); }
+  bool is_char() const      { return (_u._data == Char); }
+  bool is_short() const     { return (_u._data == Short); }
+  bool is_integer() const   { return (_u._data == Integer); }
+  bool is_long() const      { return (_u._data == Long); }
+  bool is_float() const     { return (_u._data == Float); }
+  bool is_double() const    { return (_u._data == Double); }
+  bool is_long2() const     { return (_u._data == Long_2nd); }
+  bool is_double2() const   { return (_u._data == Double_2nd); }
+  bool is_reference() const { return ((_u._data & TypeMask) == Reference); }
+  bool is_category1() const { 
+    // This should return true for all one-word types, which are category1 
+    // primitives, and references (including uninitialized refs).  Though 
+    // the 'query' types should technically return 'false' here, if we 
+    // allow this to return true, we can perform the test using only
+    // 2 operations rather than 8 (3 masks, 3 compares and 2 logical 'ands').
+    // Since noone should call this on a query type anyway, this is ok.
+    assert(!is_check(), "Must not be a check type (wrong value returned)");
+    return ((_u._data & Category1) != Primitive);
+    // should only return false if it's a primitive, and the category1 flag
+    // is not set.
+  }
+  bool is_category2() const { return ((_u._data & Category2) == Category2); }
+  bool is_category2_2nd() const { 
+    return ((_u._data & Category2_2nd) == Category2_2nd); 
+  }
+  bool is_reference_check() const { return _u._data == ReferenceQuery; }
+  bool is_category1_check() const { return _u._data == Category1Query; }
+  bool is_category2_check() const { return _u._data == Category2Query; }
+  bool is_category2_2nd_check() const { return _u._data == Category2_2ndQuery; }
+  bool is_check() const { return (_u._data & TypeQuery) == TypeQuery; }
+
+  bool is_x_array(char sig) const { 
+    return is_null() || (is_array() && (name()->byte_at(1) == sig));
+  }
+  bool is_int_array() const { return is_x_array('I'); }
+  bool is_byte_array() const { return is_x_array('B'); }
+  bool is_bool_array() const { return is_x_array('Z'); }
+  bool is_char_array() const { return is_x_array('C'); }
+  bool is_short_array() const { return is_x_array('S'); }
+  bool is_long_array() const { return is_x_array('J'); }
+  bool is_float_array() const { return is_x_array('F'); }
+  bool is_double_array() const { return is_x_array('D'); }
+  bool is_object_array() const { return is_x_array('L'); }
+  bool is_array_array() const { return is_x_array('['); }
+  bool is_reference_array() const 
+    { return is_object_array() || is_array_array(); }
+  bool is_object() const 
+    { return (is_reference() && !is_null() && name()->utf8_length() >= 1 && 
+              name()->byte_at(0) != '['); }
+  bool is_array() const 
+    { return (is_reference() && !is_null() && name()->utf8_length() >= 2 && 
+              name()->byte_at(0) == '['); }
+  bool is_uninitialized() const 
+    { return ((_u._data & Uninitialized) == Uninitialized); }
+  bool is_uninitialized_this() const 
+    { return is_uninitialized() && bci() == BciForThis; }
+
+  VerificationType to_category2_2nd() const {
+    assert(is_category2(), "Must be a double word");
+    return VerificationType(is_long() ? Long_2nd : Double_2nd);
+  }
+
+  u2 bci() const {
+    assert(is_uninitialized(), "Must be uninitialized type");
+    return ((_u._data & BciMask) >> 1 * BitsPerByte);
+  }
+
+  symbolHandle name_handle() const { 
+    assert(is_reference() && !is_null(), "Must be a non-null reference");
+    return symbolHandle(_u._handle, true); 
+  }
+  symbolOop name() const { 
+    assert(is_reference() && !is_null(), "Must be a non-null reference");
+    return *(_u._handle); 
+  }
+
+  bool equals(const VerificationType& t) const {
+    return (_u._data == t._u._data ||
+      (is_reference() && t.is_reference() && !is_null() && !t.is_null() && 
+       name() == t.name()));
+  }
+
+  bool operator ==(const VerificationType& t) const {
+    return equals(t);
+  }
+
+  bool operator !=(const VerificationType& t) const {
+    return !equals(t);
+  }
+
+  // The whole point of this type system - check to see if one type
+  // is assignable to another.  Returns true if one can assign 'from' to 
+  // this.
+  bool is_assignable_from(
+      const VerificationType& from, instanceKlassHandle context, TRAPS) const {
+    if (equals(from) || is_bogus()) {
+      return true;
+    } else {
+      switch(_u._data) {
+        case Category1Query:
+          return from.is_category1();
+        case Category2Query:
+          return from.is_category2();
+        case Category2_2ndQuery:
+          return from.is_category2_2nd();
+        case ReferenceQuery:
+          return from.is_reference() || from.is_uninitialized();
+        case Boolean:
+        case Byte:
+        case Char:
+        case Short:
+          // An int can be assigned to boolean, byte, char or short values.
+          return from.is_integer(); 
+        default: 
+          if (is_reference() && from.is_reference()) {
+            return is_reference_assignable_from(from, context, CHECK_false);
+          } else {
+            return false;
+          }
+      }
+    }
+  }
+
+  VerificationType get_component(TRAPS) const;
+
+  int dimensions() const {
+    assert(is_array(), "Must be an array");
+    int index = 0;
+    while (name()->byte_at(index++) == '[');
+    return index;
+  }
+
+  void print_on(outputStream* st) const PRODUCT_RETURN;
+
+ private:
+
+  bool is_reference_assignable_from(
+    const VerificationType&, instanceKlassHandle, TRAPS) const;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,2196 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)verifier.cpp	1.112 07/05/05 17:07:02 JVM"
+#endif
+/*
+ * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_verifier.cpp.incl"
+
+// Access to external entry for VerifyClassCodes - old byte code verifier
+
+extern "C" {
+  typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint);
+  typedef jboolean (*verify_byte_codes_fn_new_t)(JNIEnv *, jclass, char *, jint, jint);
+}
+
+static void* volatile _verify_byte_codes_fn = NULL;
+
+static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
+
+static void* verify_byte_codes_fn() {
+  if (_verify_byte_codes_fn == NULL) {
+    void *lib_handle = os::native_java_library();
+    void *func = hpi::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
+    OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
+    if (func == NULL) {
+      OrderAccess::release_store(&_is_new_verify_byte_codes_fn, false);
+      func = hpi::dll_lookup(lib_handle, "VerifyClassCodes");
+      OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
+    }
+  }
+  return (void*)_verify_byte_codes_fn;
+}
+
+
+// Methods in Verifier
+
+bool Verifier::should_verify_for(oop class_loader) {
+  return class_loader == NULL ? 
+    BytecodeVerificationLocal : BytecodeVerificationRemote;
+}
+
+bool Verifier::relax_verify_for(oop loader) {
+  bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
+  bool need_verify = 
+    // verifyAll
+    (BytecodeVerificationLocal && BytecodeVerificationRemote) || 
+    // verifyRemote
+    (!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted); 
+  return !need_verify;
+}
+
+bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, TRAPS) {
+  ResourceMark rm(THREAD);
+  HandleMark hm;
+
+  symbolHandle exception_name;
+  const size_t message_buffer_len = klass->name()->utf8_length() + 1024;
+  char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len);
+
+  const char* klassName = klass->external_name();
+
+  // If the class should be verified, first see if we can use the split
+  // verifier.  If not, or if verification fails and FailOverToOldVerifier
+  // is set, then call the inference verifier.
+  if (is_eligible_for_verification(klass)) {
+    if (TraceClassInitialization) {
+      tty->print_cr("Start class verification for: %s", klassName);
+    }
+    if (UseSplitVerifier && 
+        klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
+        ClassVerifier split_verifier(
+          klass, message_buffer, message_buffer_len, THREAD);
+        split_verifier.verify_class(THREAD);
+        exception_name = split_verifier.result();
+      if (FailOverToOldVerifier && !HAS_PENDING_EXCEPTION && 
+          (exception_name == vmSymbols::java_lang_VerifyError() ||
+           exception_name == vmSymbols::java_lang_ClassFormatError())) {
+        if (TraceClassInitialization) {
+          tty->print_cr(
+            "Fail over class verification to old verifier for: %s", klassName);
+        }
+        exception_name = inference_verify(
+          klass, message_buffer, message_buffer_len, THREAD);
+      }
+    } else {
+      exception_name = inference_verify(
+          klass, message_buffer, message_buffer_len, THREAD);
+    }
+
+    if (TraceClassInitialization) {
+      if (HAS_PENDING_EXCEPTION) {
+        tty->print("Verification for %s has", klassName);
+        tty->print_cr(" exception pending %s ",
+          instanceKlass::cast(PENDING_EXCEPTION->klass())->external_name());
+      } else if (!exception_name.is_null()) {
+        tty->print_cr("Verification for %s failed", klassName);
+      }
+      tty->print_cr("End class verification for: %s", klassName);
+    }
+  }
+
+  if (HAS_PENDING_EXCEPTION) {
+    return false; // use the existing exception
+  } else if (exception_name.is_null()) {
+    return true; // verifcation succeeded
+  } else { // VerifyError or ClassFormatError to be created and thrown
+    ResourceMark rm(THREAD);
+    instanceKlassHandle kls = 
+      SystemDictionary::resolve_or_fail(exception_name, true, CHECK_false);
+    while (!kls.is_null()) {
+      if (kls == klass) {
+        // If the class being verified is the exception we're creating 
+        // or one of it's superclasses, we're in trouble and are going 
+        // to infinitely recurse when we try to initialize the exception.
+        // So bail out here by throwing the preallocated VM error.
+        THROW_OOP_(Universe::virtual_machine_error_instance(), false);
+      }
+      kls = kls->super();
+    }
+    message_buffer[message_buffer_len - 1] = '\0'; // just to be sure
+    THROW_MSG_(exception_name, message_buffer, false);
+  }
+}
+
+bool Verifier::is_eligible_for_verification(instanceKlassHandle klass) {
+  symbolOop name = klass->name();
+  klassOop refl_magic_klass = SystemDictionary::reflect_magic_klass();
+
+  return (should_verify_for(klass->class_loader()) && 
+    // return if the class is a bootstrapping class
+    // We need to skip the following four for bootstraping
+    name != vmSymbols::java_lang_Object() &&
+    name != vmSymbols::java_lang_Class() &&
+    name != vmSymbols::java_lang_String() &&
+    name != vmSymbols::java_lang_Throwable() &&
+
+    // Can not verify the bytecodes for shared classes because they have
+    // already been rewritten to contain constant pool cache indices,
+    // which the verifier can't understand.
+    // Shared classes shouldn't have stackmaps either.
+    !klass()->is_shared() &&
+
+    // As of the fix for 4486457 we disable verification for all of the
+    // dynamically-generated bytecodes associated with the 1.4
+    // reflection implementation, not just those associated with
+    // sun/reflect/SerializationConstructorAccessor.
+    // NOTE: this is called too early in the bootstrapping process to be
+    // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
+    (refl_magic_klass == NULL || 
+     !klass->is_subtype_of(refl_magic_klass) ||
+     VerifyReflectionBytecodes)
+  );
+}
+
+symbolHandle Verifier::inference_verify(
+    instanceKlassHandle klass, char* message, size_t message_len, TRAPS) {
+  JavaThread* thread = (JavaThread*)THREAD;
+  JNIEnv *env = thread->jni_environment();
+
+  void* verify_func = verify_byte_codes_fn();
+
+  if (verify_func == NULL) {
+    jio_snprintf(message, message_len, "Could not link verifier");
+    return vmSymbols::java_lang_VerifyError();
+  }
+
+  ResourceMark rm(THREAD);
+  if (ClassVerifier::_verify_verbose) {
+    tty->print_cr("Verifying class %s with old format", klass->external_name());
+  }
+
+  jclass cls = (jclass) JNIHandles::make_local(env, klass->java_mirror());
+  jint result;
+
+  {
+    HandleMark hm(thread);
+    ThreadToNativeFromVM ttn(thread);
+    // ThreadToNativeFromVM takes care of changing thread_state, so safepoint
+    // code knows that we have left the VM
+
+    if (_is_new_verify_byte_codes_fn) {
+      verify_byte_codes_fn_new_t func =
+        CAST_TO_FN_PTR(verify_byte_codes_fn_new_t, verify_func);
+      result = (*func)(env, cls, message, (int)message_len,
+          klass->major_version());
+    } else {
+      verify_byte_codes_fn_t func =
+        CAST_TO_FN_PTR(verify_byte_codes_fn_t, verify_func);
+      result = (*func)(env, cls, message, (int)message_len);
+    }
+  }
+
+  JNIHandles::destroy_local(cls);
+
+  // These numbers are chosen so that VerifyClassCodes interface doesn't need
+  // to be changed (still return jboolean (unsigned char)), and result is
+  // 1 when verification is passed. 
+  symbolHandle nh(NULL);
+  if (result == 0) {
+    return vmSymbols::java_lang_VerifyError();
+  } else if (result == 1) {
+    return nh; // verified.
+  } else if (result == 2) {
+    THROW_MSG_(vmSymbols::java_lang_OutOfMemoryError(), message, nh);
+  } else if (result == 3) {
+    return vmSymbols::java_lang_ClassFormatError();
+  } else {
+    ShouldNotReachHere();
+    return nh;
+  }
+}
+
+// Methods in ClassVerifier
+
+bool ClassVerifier::_verify_verbose = false;
+
+ClassVerifier::ClassVerifier(
+    instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS)
+    : _thread(THREAD), _exception_type(symbolHandle()), _message(msg), 
+      _message_buffer_len(msg_len), _klass(klass) {
+  _this_type = VerificationType::reference_type(klass->name());
+}
+
+ClassVerifier::~ClassVerifier() {
+}
+
+void ClassVerifier::verify_class(TRAPS) {
+  if (_verify_verbose) {
+    tty->print_cr("Verifying class %s with new format", 
+      _klass->external_name());
+  }
+
+  objArrayHandle methods(THREAD, _klass->methods());
+  int num_methods = methods->length();
+
+  for (int index = 0; index < num_methods; index++) {
+    methodOop m = (methodOop)methods->obj_at(index);
+    if (m->is_native() || m->is_abstract()) {
+      // If m is native or abstract, skip it.  It is checked in class file
+      // parser that methods do not override a final method.
+      continue;
+    }
+    verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
+  }
+}
+
+void ClassVerifier::verify_method(methodHandle m, TRAPS) {
+  ResourceMark rm(THREAD);
+  _method = m;   // initialize _method
+  if (_verify_verbose) {
+    tty->print_cr("Verifying method %s", m->name_and_sig_as_C_string());
+  }
+
+  const char* bad_type_msg = "Bad type on operand stack in %s";
+
+  int32_t max_stack = m->max_stack();
+  int32_t max_locals = m->max_locals();
+  constantPoolHandle cp(THREAD, m->constants());
+
+  if (!SignatureVerifier::is_valid_method_signature(m->signature())) {
+    class_format_error("Invalid method signature");
+    return;
+  }
+
+  // Initial stack map frame: offset is 0, stack is initially empty.
+  StackMapFrame current_frame(max_locals, max_stack, this);
+  // Set initial locals
+  VerificationType return_type = current_frame.set_locals_from_arg(
+    m, current_type(), CHECK_VERIFY(this));
+
+  int32_t stackmap_index = 0; // index to the stackmap array
+
+  u4 code_length = m->code_size();
+
+  // Scan the bytecode and map each instruction's start offset to a number.
+  char* code_data = generate_code_data(m, code_length, CHECK_VERIFY(this));
+
+  int ex_min = code_length;
+  int ex_max = -1;
+  // Look through each item on the exception table. Each of the fields must refer
+  // to a legal instruction.
+  verify_exception_handler_table(
+    code_length, code_data, ex_min, ex_max, CHECK_VERIFY(this));
+
+  // Look through each entry on the local variable table and make sure
+  // its range of code array offsets is valid. (4169817)
+  if (m->has_localvariable_table()) {
+    verify_local_variable_table(code_length, code_data, CHECK_VERIFY(this));
+  }
+
+  typeArrayHandle stackmap_data(THREAD, m->stackmap_data());
+  StackMapStream stream(stackmap_data);
+  StackMapReader reader(this, &stream, code_data, code_length, THREAD);
+  StackMapTable stackmap_table(&reader, &current_frame, max_locals, max_stack,
+                               code_data, code_length, CHECK_VERIFY(this));
+
+  if (_verify_verbose) {
+    stackmap_table.print();
+  }
+
+  RawBytecodeStream bcs(m);
+
+  // Scan the byte code linearly from the start to the end
+  bool no_control_flow = false; // Set to true when there is no direct control
+                                // flow from current instruction to the next
+                                // instruction in sequence
+  Bytecodes::Code opcode;
+  while (!bcs.is_last_bytecode()) {
+    opcode = bcs.raw_next();
+    u2 bci = bcs.bci();
+
+    // Set current frame's offset to bci
+    current_frame.set_offset(bci);
+
+    // Make sure every offset in stackmap table point to the beginning to
+    // an instruction. Match current_frame to stackmap_table entry with
+    // the same offset if exists.
+    stackmap_index = verify_stackmap_table(
+      stackmap_index, bci, &current_frame, &stackmap_table,
+      no_control_flow, CHECK_VERIFY(this));
+
+    bool this_uninit = false;  // Set to true when invokespecial <init> initialized 'this'
+
+    // Merge with the next instruction
+    {
+      u2 index;
+      int target;
+      VerificationType type, type2;
+      VerificationType atype;
+
+#ifndef PRODUCT
+      if (_verify_verbose) {
+        current_frame.print();
+        tty->print_cr("offset = %d,  opcode = %s", bci, Bytecodes::name(opcode));
+      }
+#endif
+
+      // Make sure wide instruction is in correct format
+      if (bcs.is_wide()) {
+        if (opcode != Bytecodes::_iinc   && opcode != Bytecodes::_iload  &&
+            opcode != Bytecodes::_aload  && opcode != Bytecodes::_lload  &&
+            opcode != Bytecodes::_istore && opcode != Bytecodes::_astore &&
+            opcode != Bytecodes::_lstore && opcode != Bytecodes::_fload  &&
+            opcode != Bytecodes::_dload  && opcode != Bytecodes::_fstore &&
+            opcode != Bytecodes::_dstore) {
+          verify_error(bci, "Bad wide instruction"); 
+          return;
+        }
+      }
+
+      switch (opcode) {
+        case Bytecodes::_nop :
+          no_control_flow = false; break;
+        case Bytecodes::_aconst_null :
+          current_frame.push_stack(
+            VerificationType::null_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_iconst_m1 :
+        case Bytecodes::_iconst_0 :
+        case Bytecodes::_iconst_1 :
+        case Bytecodes::_iconst_2 :
+        case Bytecodes::_iconst_3 :
+        case Bytecodes::_iconst_4 :
+        case Bytecodes::_iconst_5 :
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_lconst_0 :
+        case Bytecodes::_lconst_1 :
+          current_frame.push_stack_2(
+            VerificationType::long_type(), 
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_fconst_0 :
+        case Bytecodes::_fconst_1 :
+        case Bytecodes::_fconst_2 :
+          current_frame.push_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dconst_0 :
+        case Bytecodes::_dconst_1 :
+          current_frame.push_stack_2(
+            VerificationType::double_type(), 
+            VerificationType::double2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_sipush :
+        case Bytecodes::_bipush :
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_ldc :
+          verify_ldc(
+            opcode, bcs.get_index(), &current_frame, 
+            cp, bci, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_ldc_w :
+        case Bytecodes::_ldc2_w :
+          verify_ldc(
+            opcode, bcs.get_index_big(), &current_frame, 
+            cp, bci, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_iload :
+          verify_iload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_iload_0 :
+        case Bytecodes::_iload_1 :
+        case Bytecodes::_iload_2 :
+        case Bytecodes::_iload_3 :
+          index = opcode - Bytecodes::_iload_0;
+          verify_iload(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_lload :
+          verify_lload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_lload_0 :
+        case Bytecodes::_lload_1 :
+        case Bytecodes::_lload_2 :
+        case Bytecodes::_lload_3 :
+          index = opcode - Bytecodes::_lload_0;
+          verify_lload(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_fload :
+          verify_fload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_fload_0 :
+        case Bytecodes::_fload_1 :
+        case Bytecodes::_fload_2 :
+        case Bytecodes::_fload_3 :
+          index = opcode - Bytecodes::_fload_0;
+          verify_fload(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dload :
+          verify_dload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dload_0 :
+        case Bytecodes::_dload_1 :
+        case Bytecodes::_dload_2 :
+        case Bytecodes::_dload_3 :
+          index = opcode - Bytecodes::_dload_0;
+          verify_dload(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_aload :
+          verify_aload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_aload_0 :
+        case Bytecodes::_aload_1 :
+        case Bytecodes::_aload_2 :
+        case Bytecodes::_aload_3 :
+          index = opcode - Bytecodes::_aload_0;
+          verify_aload(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_iaload :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_int_array()) {
+            verify_error(bci, bad_type_msg, "iaload");
+            return;               
+          }
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_baload :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_bool_array() && !atype.is_byte_array()) {
+            verify_error(bci, bad_type_msg, "baload");
+            return;
+          }
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_caload :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_char_array()) {
+            verify_error(bci, bad_type_msg, "caload");
+            return;
+          }
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_saload :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_short_array()) {
+            verify_error(bci, bad_type_msg, "saload");
+            return;
+          }
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_laload :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_long_array()) {
+            verify_error(bci, bad_type_msg, "laload");
+            return;
+          }
+          current_frame.push_stack_2(
+            VerificationType::long_type(), 
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_faload :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_float_array()) {
+            verify_error(bci, bad_type_msg, "faload");
+            return;
+          }
+          current_frame.push_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_daload :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_double_array()) {
+            verify_error(bci, bad_type_msg, "daload");
+            return;
+          }
+          current_frame.push_stack_2(
+            VerificationType::double_type(), 
+            VerificationType::double2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_aaload : {
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_reference_array()) {
+            verify_error(bci, bad_type_msg, "aaload");
+            return;
+          }
+          if (atype.is_null()) {
+            current_frame.push_stack(
+              VerificationType::null_type(), CHECK_VERIFY(this));
+          } else {
+            VerificationType component = 
+              atype.get_component(CHECK_VERIFY(this));
+            current_frame.push_stack(component, CHECK_VERIFY(this));
+          }
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_istore :
+          verify_istore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_istore_0 :
+        case Bytecodes::_istore_1 :
+        case Bytecodes::_istore_2 :
+        case Bytecodes::_istore_3 :
+          index = opcode - Bytecodes::_istore_0;
+          verify_istore(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_lstore :
+          verify_lstore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_lstore_0 :
+        case Bytecodes::_lstore_1 :
+        case Bytecodes::_lstore_2 :
+        case Bytecodes::_lstore_3 :
+          index = opcode - Bytecodes::_lstore_0;
+          verify_lstore(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_fstore :
+          verify_fstore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_fstore_0 :
+        case Bytecodes::_fstore_1 :
+        case Bytecodes::_fstore_2 :
+        case Bytecodes::_fstore_3 :
+          index = opcode - Bytecodes::_fstore_0;
+          verify_fstore(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dstore :
+          verify_dstore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dstore_0 :
+        case Bytecodes::_dstore_1 :
+        case Bytecodes::_dstore_2 :
+        case Bytecodes::_dstore_3 :
+          index = opcode - Bytecodes::_dstore_0;
+          verify_dstore(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_astore :
+          verify_astore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_astore_0 :
+        case Bytecodes::_astore_1 :
+        case Bytecodes::_astore_2 :
+        case Bytecodes::_astore_3 :
+          index = opcode - Bytecodes::_astore_0;
+          verify_astore(index, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_iastore :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          type2 = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_int_array()) {
+            verify_error(bci, bad_type_msg, "iastore");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_bastore :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          type2 = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_bool_array() && !atype.is_byte_array()) {
+            verify_error(bci, bad_type_msg, "bastore");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_castore :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_char_array()) {
+            verify_error(bci, bad_type_msg, "castore");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_sastore :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_short_array()) {
+            verify_error(bci, bad_type_msg, "sastore");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_lastore :
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_long_array()) {
+            verify_error(bci, bad_type_msg, "lastore");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_fastore :
+          current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack
+            (VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_float_array()) {
+            verify_error(bci, bad_type_msg, "fastore");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_dastore :
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!atype.is_double_array()) {
+            verify_error(bci, bad_type_msg, "dastore");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_aastore :
+          type = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          type2 = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          atype = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          // more type-checking is done at runtime
+          if (!atype.is_reference_array()) {
+            verify_error(bci, bad_type_msg, "aastore");
+            return;
+          }
+          // 4938384: relaxed constraint in JVMS 3nd edition.
+          no_control_flow = false; break;
+        case Bytecodes::_pop :
+          current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_pop2 :
+          type = current_frame.pop_stack(CHECK_VERIFY(this));
+          if (type.is_category1()) {
+            current_frame.pop_stack(
+              VerificationType::category1_check(), CHECK_VERIFY(this));
+          } else if (type.is_category2_2nd()) {
+            current_frame.pop_stack(
+              VerificationType::category2_check(), CHECK_VERIFY(this));
+          } else {
+            verify_error(bci, bad_type_msg, "pop2");
+            return;
+          }
+          no_control_flow = false; break;
+        case Bytecodes::_dup :
+          type = current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dup_x1 :
+          type = current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          type2 = current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dup_x2 :
+        {
+          VerificationType type3;
+          type = current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          type2 = current_frame.pop_stack(CHECK_VERIFY(this));
+          if (type2.is_category1()) {
+            type3 = current_frame.pop_stack(
+              VerificationType::category1_check(), CHECK_VERIFY(this));
+          } else if (type2.is_category2_2nd()) {
+            type3 = current_frame.pop_stack(
+              VerificationType::category2_check(), CHECK_VERIFY(this));
+          } else {
+            verify_error(bci, bad_type_msg, "dup_x2");
+            return;
+          }
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          current_frame.push_stack(type3, CHECK_VERIFY(this));
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_dup2 :
+          type = current_frame.pop_stack(CHECK_VERIFY(this));
+          if (type.is_category1()) {
+            type2 = current_frame.pop_stack(
+              VerificationType::category1_check(), CHECK_VERIFY(this));
+          } else if (type.is_category2_2nd()) {
+            type2 = current_frame.pop_stack(
+              VerificationType::category2_check(), CHECK_VERIFY(this));
+          } else {
+            verify_error(bci, bad_type_msg, "dup2");
+            return;
+          }
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dup2_x1 :
+        {
+          VerificationType type3;
+          type = current_frame.pop_stack(CHECK_VERIFY(this));
+          if (type.is_category1()) {
+            type2 = current_frame.pop_stack(
+              VerificationType::category1_check(), CHECK_VERIFY(this));
+          } else if(type.is_category2_2nd()) {
+            type2 = current_frame.pop_stack
+              (VerificationType::category2_check(), CHECK_VERIFY(this));
+          } else {
+            verify_error(bci, bad_type_msg, "dup2_x1");
+            return;
+          }
+          type3 = current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          current_frame.push_stack(type3, CHECK_VERIFY(this));
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_dup2_x2 :
+        {
+          VerificationType type3, type4;
+          type = current_frame.pop_stack(CHECK_VERIFY(this));
+          if (type.is_category1()) {
+            type2 = current_frame.pop_stack(
+              VerificationType::category1_check(), CHECK_VERIFY(this));
+          } else if (type.is_category2_2nd()) {
+            type2 = current_frame.pop_stack(
+              VerificationType::category2_check(), CHECK_VERIFY(this));
+          } else {
+            verify_error(bci, bad_type_msg, "dup2_x2");
+            return;
+          }
+          type3 = current_frame.pop_stack(CHECK_VERIFY(this));
+          if (type3.is_category1()) {
+            type4 = current_frame.pop_stack(
+              VerificationType::category1_check(), CHECK_VERIFY(this));
+          } else if (type3.is_category2_2nd()) {
+            type4 = current_frame.pop_stack(
+              VerificationType::category2_check(), CHECK_VERIFY(this));
+          } else {
+            verify_error(bci, bad_type_msg, "dup2_x2");
+            return;
+          }
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          current_frame.push_stack(type4, CHECK_VERIFY(this));
+          current_frame.push_stack(type3, CHECK_VERIFY(this));
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_swap :
+          type = current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          type2 = current_frame.pop_stack(
+            VerificationType::category1_check(), CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          current_frame.push_stack(type2, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_iadd :
+        case Bytecodes::_isub :
+        case Bytecodes::_imul :
+        case Bytecodes::_idiv :
+        case Bytecodes::_irem :
+        case Bytecodes::_ishl :
+        case Bytecodes::_ishr :
+        case Bytecodes::_iushr :
+        case Bytecodes::_ior :
+        case Bytecodes::_ixor :
+        case Bytecodes::_iand :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          // fall through
+        case Bytecodes::_ineg :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_ladd :
+        case Bytecodes::_lsub :
+        case Bytecodes::_lmul :
+        case Bytecodes::_ldiv :
+        case Bytecodes::_lrem :
+        case Bytecodes::_land :
+        case Bytecodes::_lor :
+        case Bytecodes::_lxor :
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          // fall through
+        case Bytecodes::_lneg :
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::long_type(), 
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_lshl :
+        case Bytecodes::_lshr :
+        case Bytecodes::_lushr :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::long_type(), 
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_fadd :
+        case Bytecodes::_fsub :
+        case Bytecodes::_fmul :
+        case Bytecodes::_fdiv :
+        case Bytecodes::_frem :
+          current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          // fall through
+        case Bytecodes::_fneg :
+          current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dadd :
+        case Bytecodes::_dsub :
+        case Bytecodes::_dmul :
+        case Bytecodes::_ddiv :
+        case Bytecodes::_drem :
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          // fall through
+        case Bytecodes::_dneg :
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::double_type(), 
+            VerificationType::double2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_iinc :
+          verify_iinc(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_i2l :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::long_type(), 
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+       case Bytecodes::_l2i :
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_i2f :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_i2d :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::double_type(), 
+            VerificationType::double2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_l2f :
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_l2d :
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::double_type(), 
+            VerificationType::double2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_f2i :
+          current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_f2l :
+          current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::long_type(), 
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_f2d :
+          current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::double_type(), 
+            VerificationType::double2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_d2i :
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_d2l :
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          current_frame.push_stack_2(
+            VerificationType::long_type(), 
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_d2f :
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_i2b :
+        case Bytecodes::_i2c :
+        case Bytecodes::_i2s :
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_lcmp :
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack_2(
+            VerificationType::long2_type(), 
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_fcmpl :
+        case Bytecodes::_fcmpg :
+          current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack( 
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_dcmpl :
+        case Bytecodes::_dcmpg :
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          current_frame.pop_stack_2(
+            VerificationType::double2_type(), 
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_if_icmpeq:
+        case Bytecodes::_if_icmpne:
+        case Bytecodes::_if_icmplt:
+        case Bytecodes::_if_icmpge:
+        case Bytecodes::_if_icmpgt:
+        case Bytecodes::_if_icmple:
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          // fall through
+        case Bytecodes::_ifeq:
+        case Bytecodes::_ifne:
+        case Bytecodes::_iflt:
+        case Bytecodes::_ifge:
+        case Bytecodes::_ifgt:
+        case Bytecodes::_ifle:
+          current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          target = bcs.dest();
+          stackmap_table.check_jump_target(
+            &current_frame, target, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_if_acmpeq :
+        case Bytecodes::_if_acmpne :
+          current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          // fall through
+        case Bytecodes::_ifnull :
+        case Bytecodes::_ifnonnull :
+          current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          target = bcs.dest();
+          stackmap_table.check_jump_target
+            (&current_frame, target, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_goto :
+          target = bcs.dest();
+          stackmap_table.check_jump_target(
+            &current_frame, target, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_goto_w :
+          target = bcs.dest_w();
+          stackmap_table.check_jump_target(
+            &current_frame, target, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_tableswitch :
+        case Bytecodes::_lookupswitch :
+          verify_switch(
+            &bcs, code_length, code_data, &current_frame, 
+            &stackmap_table, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_ireturn :
+          type = current_frame.pop_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_lreturn :
+          type2 = current_frame.pop_stack(
+            VerificationType::long2_type(), CHECK_VERIFY(this));
+          type = current_frame.pop_stack(
+            VerificationType::long_type(), CHECK_VERIFY(this));
+          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_freturn :
+          type = current_frame.pop_stack(
+            VerificationType::float_type(), CHECK_VERIFY(this));
+          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_dreturn :
+          type2 = current_frame.pop_stack(
+            VerificationType::double2_type(),  CHECK_VERIFY(this));
+          type = current_frame.pop_stack(
+            VerificationType::double_type(), CHECK_VERIFY(this));
+          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_areturn :
+          type = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        case Bytecodes::_return :
+          if (return_type != VerificationType::bogus_type()) {
+            verify_error(bci, "Method expects no return value");
+            return;
+          }
+          // Make sure "this" has been initialized if current method is an
+          // <init>
+          if (_method->name() == vmSymbols::object_initializer_name() && 
+              current_frame.flag_this_uninit()) {
+            verify_error(bci,
+              "Constructor must call super() or this() before return");
+            return;
+          }
+          no_control_flow = true; break;
+        case Bytecodes::_getstatic :
+        case Bytecodes::_putstatic :
+        case Bytecodes::_getfield :
+        case Bytecodes::_putfield :
+          verify_field_instructions(
+            &bcs, &current_frame, cp, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_invokevirtual :
+        case Bytecodes::_invokespecial :
+        case Bytecodes::_invokestatic :
+          verify_invoke_instructions(
+            &bcs, code_length, &current_frame,
+            &this_uninit, return_type, cp, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_invokeinterface :
+          verify_invoke_instructions(
+            &bcs, code_length, &current_frame,
+            &this_uninit, return_type, cp, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_new :
+        {
+          index = bcs.get_index_big();
+          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
+          VerificationType new_class_type = 
+            cp_index_to_type(index, cp, CHECK_VERIFY(this));
+          if (!new_class_type.is_object()) {
+            verify_error(bci, "Illegal new instruction");
+            return;
+          }
+          type = VerificationType::uninitialized_type(bci);
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_newarray :
+          type = get_newarray_type(bcs.get_index(), bci, CHECK_VERIFY(this));
+          current_frame.pop_stack(
+            VerificationType::integer_type(),  CHECK_VERIFY(this));
+          current_frame.push_stack(type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_anewarray :
+          verify_anewarray(
+            bcs.get_index_big(), cp, &current_frame, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_arraylength :
+          type = current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          if (!type.is_array()) {
+            verify_error(bci, bad_type_msg, "arraylength");
+          }
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_checkcast :
+        {
+          index = bcs.get_index_big();
+          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
+          current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          VerificationType klass_type = cp_index_to_type(
+            index, cp, CHECK_VERIFY(this));
+          current_frame.push_stack(klass_type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_instanceof : {
+          index = bcs.get_index_big();
+          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
+          current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          current_frame.push_stack(
+            VerificationType::integer_type(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_monitorenter :
+        case Bytecodes::_monitorexit :
+          current_frame.pop_stack(
+            VerificationType::reference_check(), CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        case Bytecodes::_multianewarray :
+        {
+          index = bcs.get_index_big();
+          u2 dim = *(bcs.bcp()+3);
+          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
+          VerificationType new_array_type = 
+            cp_index_to_type(index, cp, CHECK_VERIFY(this));
+          if (!new_array_type.is_array()) {
+            verify_error(bci,
+              "Illegal constant pool index in multianewarray instruction");
+            return;  
+          }
+          if (dim < 1 || new_array_type.dimensions() < dim) {
+            verify_error(bci,
+              "Illegal dimension in multianewarray instruction");
+            return;
+          }
+          for (int i = 0; i < dim; i++) {
+            current_frame.pop_stack(
+              VerificationType::integer_type(), CHECK_VERIFY(this));
+          }
+          current_frame.push_stack(new_array_type, CHECK_VERIFY(this));
+          no_control_flow = false; break;
+        }
+        case Bytecodes::_athrow :
+          type = VerificationType::reference_type(
+            vmSymbols::java_lang_Throwable());
+          current_frame.pop_stack(type, CHECK_VERIFY(this));
+          no_control_flow = true; break;
+        default:
+          // We only need to check the valid bytecodes in class file.
+          // And jsr and ret are not in the new class file format in JDK1.5.
+          verify_error(bci, "Bad instruction");
+          no_control_flow = false;
+          return;
+      }  // end switch
+    }  // end Merge with the next instruction
+
+    // Look for possible jump target in exception handlers and see if it
+    // matches current_frame
+    if (bci >= ex_min && bci < ex_max) {
+      verify_exception_handler_targets(
+        bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
+    }
+  } // end while
+
+  // Make sure that control flow does not fall through end of the method
+  if (!no_control_flow) {
+    verify_error(code_length, "Control flow falls through code end");
+    return;
+  }
+}
+
+char* ClassVerifier::generate_code_data(methodHandle m, u4 code_length, TRAPS) {
+  char* code_data = NEW_RESOURCE_ARRAY(char, code_length);
+  memset(code_data, 0, sizeof(char) * code_length);
+  RawBytecodeStream bcs(m);
+
+  while (!bcs.is_last_bytecode()) {
+    if (bcs.raw_next() != Bytecodes::_illegal) {
+      int bci = bcs.bci();
+      if (bcs.code() == Bytecodes::_new) {
+        code_data[bci] = NEW_OFFSET;
+      } else {
+        code_data[bci] = BYTECODE_OFFSET;
+      }
+    } else {
+      verify_error(bcs.bci(), "Bad instruction");
+      return NULL;
+    }
+  }
+
+  return code_data;
+}
+
+void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS) {
+  typeArrayHandle exhandlers (THREAD, _method->exception_table());
+  constantPoolHandle cp (THREAD, _method->constants());
+
+  if (exhandlers() != NULL) {
+    for(int i = 0; i < exhandlers->length();) {
+      u2 start_pc = exhandlers->int_at(i++);
+      u2 end_pc = exhandlers->int_at(i++);
+      u2 handler_pc = exhandlers->int_at(i++);
+      if (start_pc >= code_length || code_data[start_pc] == 0) {
+        class_format_error("Illegal exception table start_pc %d", start_pc);
+        return;
+      }
+      if (end_pc != code_length) {   // special case: end_pc == code_length
+        if (end_pc > code_length || code_data[end_pc] == 0) {
+          class_format_error("Illegal exception table end_pc %d", end_pc); 
+          return;
+        }
+      }
+      if (handler_pc >= code_length || code_data[handler_pc] == 0) {
+        class_format_error("Illegal exception table handler_pc %d", handler_pc);
+        return;
+      }
+      int catch_type_index = exhandlers->int_at(i++);
+      if (catch_type_index != 0) {
+        VerificationType catch_type = cp_index_to_type(
+          catch_type_index, cp, CHECK_VERIFY(this));
+        VerificationType throwable = 
+          VerificationType::reference_type(vmSymbols::java_lang_Throwable());
+        bool is_subclass = throwable.is_assignable_from(
+          catch_type, current_class(), CHECK_VERIFY(this));
+        if (!is_subclass) {
+          // 4286534: should throw VerifyError according to recent spec change
+          verify_error(
+            "Catch type is not a subclass of Throwable in handler %d",
+            handler_pc);
+          return;
+        }
+      }
+      if (start_pc < min) min = start_pc;
+      if (end_pc > max) max = end_pc;
+    }
+  }
+}
+
+void ClassVerifier::verify_local_variable_table(u4 code_length, char* code_data, TRAPS) {
+  int localvariable_table_length = _method()->localvariable_table_length();
+  if (localvariable_table_length > 0) {
+    LocalVariableTableElement* table = _method()->localvariable_table_start();
+    for (int i = 0; i < localvariable_table_length; i++) {
+      u2 start_bci = table[i].start_bci;
+      u2 length = table[i].length;
+
+      if (start_bci >= code_length || code_data[start_bci] == 0) {
+        class_format_error(
+          "Illegal local variable table start_pc %d", start_bci);
+        return;
+      }
+      u4 end_bci = (u4)(start_bci + length);
+      if (end_bci != code_length) {
+        if (end_bci >= code_length || code_data[end_bci] == 0) {
+          class_format_error( "Illegal local variable table length %d", length);
+          return;
+        }
+      }
+    }
+  }
+}
+
+u2 ClassVerifier::verify_stackmap_table(u2 stackmap_index, u2 bci,
+                                        StackMapFrame* current_frame,
+                                        StackMapTable* stackmap_table,
+                                        bool no_control_flow, TRAPS) {
+  if (stackmap_index < stackmap_table->get_frame_count()) {
+    u2 this_offset = stackmap_table->get_offset(stackmap_index);
+    if (no_control_flow && this_offset > bci) {
+      verify_error(bci, "Expecting a stack map frame");
+      return 0;
+    }
+    if (this_offset == bci) {
+      // See if current stack map can be assigned to the frame in table.
+      // current_frame is the stackmap frame got from the last instruction.
+      // If matched, current_frame will be updated by this method.
+      bool match = stackmap_table->match_stackmap(
+        current_frame, this_offset, stackmap_index, 
+        !no_control_flow, true, CHECK_VERIFY_(this, 0));
+      if (!match) {
+        // report type error
+        verify_error(bci, "Instruction type does not match stack map");
+        return 0;
+      }
+      stackmap_index++;
+    } else if (this_offset < bci) {
+      // current_offset should have met this_offset.
+      class_format_error("Bad stack map offset %d", this_offset);
+      return 0;
+    }
+  } else if (no_control_flow) {
+    verify_error(bci, "Expecting a stack map frame");
+    return 0;
+  }
+  return stackmap_index;
+}
+
+void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, StackMapFrame* current_frame,
+                                                     StackMapTable* stackmap_table, TRAPS) {
+  constantPoolHandle cp (THREAD, _method->constants());
+  typeArrayHandle exhandlers (THREAD, _method->exception_table());
+  if (exhandlers() != NULL) {
+    for(int i = 0; i < exhandlers->length();) {
+      u2 start_pc = exhandlers->int_at(i++);
+      u2 end_pc = exhandlers->int_at(i++);
+      u2 handler_pc = exhandlers->int_at(i++);
+      int catch_type_index = exhandlers->int_at(i++);
+      if(bci >= start_pc && bci < end_pc) {
+        u1 flags = current_frame->flags();
+        if (this_uninit) {  flags |= FLAG_THIS_UNINIT; }
+
+        ResourceMark rm(THREAD);
+        StackMapFrame* new_frame = current_frame->frame_in_exception_handler(flags);
+        if (catch_type_index != 0) {
+          // We know that this index refers to a subclass of Throwable
+          VerificationType catch_type = cp_index_to_type(
+            catch_type_index, cp, CHECK_VERIFY(this));
+          new_frame->push_stack(catch_type, CHECK_VERIFY(this));
+        } else {
+          VerificationType throwable = 
+            VerificationType::reference_type(vmSymbols::java_lang_Throwable());
+          new_frame->push_stack(throwable, CHECK_VERIFY(this));
+        }
+        bool match = stackmap_table->match_stackmap(
+          new_frame, handler_pc, true, false, CHECK_VERIFY(this));
+        if (!match) {
+          verify_error(bci,
+            "Stack map does not match the one at exception handler %d", 
+            handler_pc);
+          return;
+        }
+      }
+    }
+  }
+}
+
+void ClassVerifier::verify_cp_index(constantPoolHandle cp, int index, TRAPS) {
+  int nconstants = cp->length();
+  if ((index <= 0) || (index >= nconstants)) {
+    verify_error("Illegal constant pool index %d in class %s", 
+      index, instanceKlass::cast(cp->pool_holder())->external_name());
+    return;
+  }
+}
+
+void ClassVerifier::verify_cp_type(
+    int index, constantPoolHandle cp, unsigned int types, TRAPS) {
+
+  // In some situations, bytecode rewriting may occur while we're verifying.
+  // In this case, a constant pool cache exists and some indices refer to that
+  // instead.  Get the original index for the tag check
+  constantPoolCacheOop cache = cp->cache();
+  if (cache != NULL &&
+       ((types == (1 <<  JVM_CONSTANT_InterfaceMethodref)) || 
+        (types == (1 <<  JVM_CONSTANT_Methodref)) || 
+        (types == (1 <<  JVM_CONSTANT_Fieldref)))) {
+    assert((index >= 0) && (index < cache->length()), 
+      "Must be a legal index into the cp cache");
+    index = cache->entry_at(index)->constant_pool_index();
+  }
+
+  verify_cp_index(cp, index, CHECK_VERIFY(this));
+  unsigned int tag = cp->tag_at(index).value();
+  if ((types & (1 << tag)) == 0) {
+    verify_error(
+      "Illegal type at constant pool entry %d in class %s", 
+      index, instanceKlass::cast(cp->pool_holder())->external_name());
+    return;
+  }
+}
+
+void ClassVerifier::verify_cp_class_type(
+    int index, constantPoolHandle cp, TRAPS) {
+  verify_cp_index(cp, index, CHECK_VERIFY(this));
+  constantTag tag = cp->tag_at(index);
+  if (!tag.is_klass() && !tag.is_unresolved_klass()) {
+    verify_error("Illegal type at constant pool entry %d in class %s", 
+      index, instanceKlass::cast(cp->pool_holder())->external_name());
+    return;
+  }
+}
+
+void ClassVerifier::format_error_message(
+    const char* fmt, int offset, va_list va) {
+  ResourceMark rm(_thread);
+  stringStream message(_message, _message_buffer_len);
+  message.vprint(fmt, va);
+  if (!_method.is_null()) {
+    message.print(" in method %s", _method->name_and_sig_as_C_string());
+  }
+  if (offset != -1) {
+    message.print(" at offset %d", offset);
+  }
+}
+
+void ClassVerifier::verify_error(u2 offset, const char* fmt, ...) {
+  _exception_type = vmSymbols::java_lang_VerifyError();
+  va_list va;
+  va_start(va, fmt);
+  format_error_message(fmt, offset, va);
+  va_end(va);
+}
+
+void ClassVerifier::verify_error(const char* fmt, ...) {
+  _exception_type = vmSymbols::java_lang_VerifyError();
+  va_list va;
+  va_start(va, fmt);
+  format_error_message(fmt, -1, va);
+  va_end(va);
+}
+
+void ClassVerifier::class_format_error(const char* msg, ...) {
+  _exception_type = vmSymbols::java_lang_ClassFormatError();
+  va_list va;
+  va_start(va, msg);
+  format_error_message(msg, -1, va);
+  va_end(va);
+}
+
+klassOop ClassVerifier::load_class(symbolHandle name, TRAPS) {
+  // Get current loader and protection domain first.
+  oop loader = current_class()->class_loader();
+  oop protection_domain = current_class()->protection_domain();
+
+  return SystemDictionary::resolve_or_fail(
+    name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
+    true, CHECK_NULL);
+}
+
+bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
+                                        klassOop target_class,
+                                        symbolOop field_name,
+                                        symbolOop field_sig,
+                                        bool is_method) {
+  No_Safepoint_Verifier nosafepoint;
+
+  // If target class isn't a super class of this class, we don't worry about this case
+  if (!this_class->is_subclass_of(target_class)) {
+    return false;
+  }
+  // Check if the specified method or field is protected
+  instanceKlass* target_instance = instanceKlass::cast(target_class);
+  fieldDescriptor fd;
+  if (is_method) {
+    methodOop m = target_instance->uncached_lookup_method(field_name, field_sig);
+    if (m != NULL && m->is_protected()) {
+      if (!this_class->is_same_class_package(m->method_holder())) {
+        return true;
+      }
+    }
+  } else {
+    klassOop member_klass = target_instance->find_field(field_name, field_sig, &fd);
+    if(member_klass != NULL && fd.is_protected()) {
+      if (!this_class->is_same_class_package(member_klass)) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+void ClassVerifier::verify_ldc(
+    int opcode, u2 index, StackMapFrame *current_frame,
+     constantPoolHandle cp, u2 bci, TRAPS) {
+  verify_cp_index(cp, index, CHECK_VERIFY(this));
+  constantTag tag = cp->tag_at(index);
+  unsigned int types;
+  if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) {
+    if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) {
+      types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float)
+            | (1 << JVM_CONSTANT_String)  | (1 << JVM_CONSTANT_Class);
+      verify_cp_type(index, cp, types, CHECK_VERIFY(this));
+    }
+  } else {
+    assert(opcode == Bytecodes::_ldc2_w, "must be ldc2_w");
+    types = (1 << JVM_CONSTANT_Double) | (1 << JVM_CONSTANT_Long);
+    verify_cp_type(index, cp, types, CHECK_VERIFY(this));
+  }
+  if (tag.is_string() || tag.is_unresolved_string()) {
+    current_frame->push_stack(
+      VerificationType::reference_type(
+        vmSymbols::java_lang_String()), CHECK_VERIFY(this));
+  } else if (tag.is_klass() || tag.is_unresolved_klass()) {
+    current_frame->push_stack(
+      VerificationType::reference_type(
+        vmSymbols::java_lang_Class()), CHECK_VERIFY(this));
+  } else if (tag.is_int()) {
+    current_frame->push_stack(
+      VerificationType::integer_type(), CHECK_VERIFY(this));
+  } else if (tag.is_float()) {
+    current_frame->push_stack(
+      VerificationType::float_type(), CHECK_VERIFY(this));
+  } else if (tag.is_double()) {
+    current_frame->push_stack_2(
+      VerificationType::double_type(), 
+      VerificationType::double2_type(), CHECK_VERIFY(this));
+  } else if (tag.is_long()) {
+    current_frame->push_stack_2(
+      VerificationType::long_type(), 
+      VerificationType::long2_type(), CHECK_VERIFY(this));
+  } else {
+    verify_error(bci, "Invalid index in ldc");
+    return;
+  }
+}
+
+void ClassVerifier::verify_switch(
+    RawBytecodeStream* bcs, u4 code_length, char* code_data,
+    StackMapFrame* current_frame, StackMapTable* stackmap_table, TRAPS) {
+  int bci = bcs->bci();
+  address bcp = bcs->bcp();
+  address aligned_bcp = (address) round_to((intptr_t)(bcp + 1), jintSize);
+
+  // 4639449 & 4647081: padding bytes must be 0
+  u2 padding_offset = 1;
+  while ((bcp + padding_offset) < aligned_bcp) {
+    if(*(bcp + padding_offset) != 0) {
+      verify_error(bci, "Nonzero padding byte in lookswitch or tableswitch");
+      return;
+    }
+    padding_offset++;
+  }
+  int default_offset = (int) Bytes::get_Java_u4(aligned_bcp);
+  int keys, delta;
+  current_frame->pop_stack(
+    VerificationType::integer_type(), CHECK_VERIFY(this));
+  if (bcs->code() == Bytecodes::_tableswitch) {
+    jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
+    jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
+    if (low > high) {
+      verify_error(bci,
+        "low must be less than or equal to high in tableswitch");
+      return;
+    }
+    keys = high - low + 1;
+    if (keys < 0) {
+      verify_error(bci, "too many keys in tableswitch");
+      return;
+    }
+    delta = 1;
+  } else {
+    keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
+    if (keys < 0) {
+      verify_error(bci, "number of keys in lookupswitch less than 0");
+      return;
+    }
+    delta = 2;
+    // Make sure that the lookupswitch items are sorted
+    for (int i = 0; i < (keys - 1); i++) {
+      jint this_key = Bytes::get_Java_u4(aligned_bcp + (2+2*i)*jintSize);
+      jint next_key = Bytes::get_Java_u4(aligned_bcp + (2+2*i+2)*jintSize);
+      if (this_key >= next_key) {
+        verify_error(bci, "Bad lookupswitch instruction");
+        return;
+      }
+    }
+  }
+  int target = bci + default_offset;
+  stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
+  for (int i = 0; i < keys; i++) {
+    target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
+    stackmap_table->check_jump_target(
+      current_frame, target, CHECK_VERIFY(this));
+  }
+}
+
+bool ClassVerifier::name_in_supers(
+    symbolOop ref_name, instanceKlassHandle current) {
+  klassOop super = current->super();
+  while (super != NULL) {
+    if (super->klass_part()->name() == ref_name) {
+      return true;
+    }
+    super = super->klass_part()->super();
+  }
+  return false;
+}
+
+void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
+                                              StackMapFrame* current_frame,
+                                              constantPoolHandle cp,
+                                              TRAPS) {
+  u2 index = bcs->get_index_big();
+  verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
+
+  // Get field name and signature
+  symbolHandle field_name = symbolHandle(THREAD, cp->name_ref_at(index));
+  symbolHandle field_sig = symbolHandle(THREAD, cp->signature_ref_at(index));
+
+  if (!SignatureVerifier::is_valid_type_signature(field_sig)) {
+    class_format_error(
+      "Invalid signature for field in class %s referenced "
+      "from constant pool index %d", _klass->external_name(), index);
+    return;
+  }
+
+  // Get referenced class type
+  VerificationType ref_class_type = cp_ref_index_to_type(
+    index, cp, CHECK_VERIFY(this));
+  if (!ref_class_type.is_object()) {
+    verify_error(
+      "Expecting reference to class in class %s at constant pool index %d",
+      _klass->external_name(), index);
+    return;
+  }
+  VerificationType target_class_type = ref_class_type;
+
+  assert(sizeof(VerificationType) == sizeof(uintptr_t), 
+        "buffer type must match VerificationType size");
+  uintptr_t field_type_buffer[2];
+  VerificationType* field_type = (VerificationType*)field_type_buffer;
+  // If we make a VerificationType[2] array directly, the compiler calls
+  // to the c-runtime library to do the allocation instead of just 
+  // stack allocating it.  Plus it would run constructors.  This shows up
+  // in performance profiles.
+
+  SignatureStream sig_stream(field_sig, false);
+  VerificationType stack_object_type;
+  int n = change_sig_to_verificationType(
+    &sig_stream, field_type, CHECK_VERIFY(this));
+  u2 bci = bcs->bci();
+  bool is_assignable;
+  switch (bcs->code()) {
+    case Bytecodes::_getstatic: {
+      for (int i = 0; i < n; i++) {
+        current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
+      }
+      break;
+    }
+    case Bytecodes::_putstatic: {
+      for (int i = n - 1; i >= 0; i--) {
+        current_frame->pop_stack(field_type[i], CHECK_VERIFY(this));
+      }
+      break;
+    }
+    case Bytecodes::_getfield: {
+      stack_object_type = current_frame->pop_stack(
+        target_class_type, CHECK_VERIFY(this));
+      for (int i = 0; i < n; i++) {
+        current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
+      }
+      goto check_protected;
+    }
+    case Bytecodes::_putfield: {
+      for (int i = n - 1; i >= 0; i--) {
+        current_frame->pop_stack(field_type[i], CHECK_VERIFY(this));
+      }
+      stack_object_type = current_frame->pop_stack(CHECK_VERIFY(this));
+
+      // The JVMS 2nd edition allows field initialization before the superclass
+      // initializer, if the field is defined within the current class.
+      fieldDescriptor fd;
+      if (stack_object_type == VerificationType::uninitialized_this_type() &&
+          target_class_type.equals(current_type()) &&
+          _klass->find_local_field(field_name(), field_sig(), &fd)) {
+        stack_object_type = current_type();
+      }
+      is_assignable = target_class_type.is_assignable_from(
+        stack_object_type, current_class(), CHECK_VERIFY(this));
+      if (!is_assignable) {
+        verify_error(bci, "Bad type on operand stack in putfield");
+        return;
+      }
+    }
+    check_protected: {
+      if (_this_type == stack_object_type)
+        break; // stack_object_type must be assignable to _current_class_type
+      symbolHandle ref_class_name = symbolHandle(THREAD,
+        cp->klass_name_at(cp->klass_ref_index_at(index)));
+      if (!name_in_supers(ref_class_name(), current_class()))
+        // stack_object_type must be assignable to _current_class_type since:
+        // 1. stack_object_type must be assignable to ref_class.
+        // 2. ref_class must be _current_class or a subclass of it. It can't
+        //    be a superclass of it. See revised JVMS 5.4.4.
+        break;
+
+      klassOop ref_class_oop = load_class(ref_class_name, CHECK);
+      if (is_protected_access(current_class(), ref_class_oop, field_name(), 
+                              field_sig(), false)) {
+        // It's protected access, check if stack object is assignable to
+        // current class.
+        is_assignable = current_type().is_assignable_from(
+          stack_object_type, current_class(), CHECK_VERIFY(this));
+        if (!is_assignable) {
+          verify_error(bci, "Bad access to protected data in getfield");
+          return;
+        }
+      }
+      break;
+    }
+    default: ShouldNotReachHere();
+  }
+}
+
+void ClassVerifier::verify_invoke_init(
+    RawBytecodeStream* bcs, VerificationType ref_class_type, 
+    StackMapFrame* current_frame, u4 code_length, bool *this_uninit, 
+    constantPoolHandle cp, TRAPS) {
+  u2 bci = bcs->bci();
+  VerificationType type = current_frame->pop_stack(
+    VerificationType::reference_check(), CHECK_VERIFY(this));
+  if (type == VerificationType::uninitialized_this_type()) {
+    // The method must be an <init> method of either this class, or one of its
+    // superclasses
+    klassOop oop = current_class()();
+    Klass* klass = oop->klass_part();
+    while (klass != NULL && ref_class_type.name() != klass->name()) {
+      klass = klass->super()->klass_part();
+    }
+    if (klass == NULL) {
+      verify_error(bci, "Bad <init> method call");
+      return;
+    }
+    current_frame->initialize_object(type, current_type());
+    *this_uninit = true;
+  } else if (type.is_uninitialized()) {
+    u2 new_offset = type.bci();
+    address new_bcp = bcs->bcp() - bci + new_offset;
+    if (new_offset > (code_length - 3) || (*new_bcp) != Bytecodes::_new) {
+      verify_error(new_offset, "Expecting new instruction");
+      return;
+    }
+    u2 new_class_index = Bytes::get_Java_u2(new_bcp + 1);
+    verify_cp_class_type(new_class_index, cp, CHECK_VERIFY(this));
+
+    // The method must be an <init> method of the indicated class
+    VerificationType new_class_type = cp_index_to_type(
+      new_class_index, cp, CHECK_VERIFY(this));
+    if (!new_class_type.equals(ref_class_type)) {
+      verify_error(bci, "Call to wrong <init> method");
+      return;
+    }
+    // According to the VM spec, if the referent class is a superclass of the
+    // current class, and is in a different runtime package, and the method is
+    // protected, then the objectref must be the current class or a subclass
+    // of the current class.
+    VerificationType objectref_type = new_class_type;
+    if (name_in_supers(ref_class_type.name(), current_class())) { 
+      klassOop ref_klass = load_class(
+        ref_class_type.name(), CHECK_VERIFY(this));
+      methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
+        vmSymbols::object_initializer_name(), 
+        cp->signature_ref_at(bcs->get_index_big()));
+      instanceKlassHandle mh(THREAD, m->method_holder());
+      if (m->is_protected() && !mh->is_same_class_package(_klass())) {
+        bool assignable = current_type().is_assignable_from(
+          objectref_type, current_class(), CHECK_VERIFY(this));
+        if (!assignable) {
+          verify_error(bci, "Bad access to protected <init> method");
+          return;
+        }
+      }
+    }
+    current_frame->initialize_object(type, new_class_type);
+  } else {
+    verify_error(bci, "Bad operand type when invoking <init>");
+    return;
+  }
+}
+
+void ClassVerifier::verify_invoke_instructions(
+    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame, 
+    bool *this_uninit, VerificationType return_type, 
+    constantPoolHandle cp, TRAPS) {
+  // Make sure the constant pool item is the right type
+  u2 index = bcs->get_index_big();
+  Bytecodes::Code opcode = bcs->code();
+  unsigned int types = (opcode == Bytecodes::_invokeinterface
+                                ? 1 << JVM_CONSTANT_InterfaceMethodref
+                                : 1 << JVM_CONSTANT_Methodref);
+  verify_cp_type(index, cp, types, CHECK_VERIFY(this));
+
+  // Get method name and signature
+  symbolHandle method_name(THREAD, cp->name_ref_at(index));
+  symbolHandle method_sig(THREAD, cp->signature_ref_at(index));
+
+  if (!SignatureVerifier::is_valid_method_signature(method_sig)) {
+    class_format_error(
+      "Invalid method signature in class %s referenced "
+      "from constant pool index %d", _klass->external_name(), index);
+    return;
+  }
+
+  // Get referenced class type
+  VerificationType ref_class_type = cp_ref_index_to_type(
+    index, cp, CHECK_VERIFY(this));
+
+  // For a small signature length, we just allocate 128 bytes instead
+  // of parsing the signature once to find its size.
+  // -3 is for '(', ')' and return descriptor; multiply by 2 is for
+  // longs/doubles to be consertive.
+  assert(sizeof(VerificationType) == sizeof(uintptr_t), 
+        "buffer type must match VerificationType size");
+  uintptr_t on_stack_sig_types_buffer[128];
+  // If we make a VerificationType[128] array directly, the compiler calls
+  // to the c-runtime library to do the allocation instead of just 
+  // stack allocating it.  Plus it would run constructors.  This shows up
+  // in performance profiles.
+
+  VerificationType* sig_types;
+  int size = (method_sig->utf8_length() - 3) * 2;
+  if (size > 128) {
+    // Long and double occupies two slots here.
+    ArgumentSizeComputer size_it(method_sig);
+    size = size_it.size();
+    sig_types = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, VerificationType, size);
+  } else{
+    sig_types = (VerificationType*)on_stack_sig_types_buffer;
+  }
+  SignatureStream sig_stream(method_sig);
+  int sig_i = 0;
+  while (!sig_stream.at_return_type()) {
+    sig_i += change_sig_to_verificationType(
+      &sig_stream, &sig_types[sig_i], CHECK_VERIFY(this));
+    sig_stream.next();
+  }
+  int nargs = sig_i;
+
+#ifdef ASSERT
+  {
+    ArgumentSizeComputer size_it(method_sig);
+    assert(nargs == size_it.size(), "Argument sizes do not match");
+    assert(nargs <= (method_sig->utf8_length() - 3) * 2, "estimate of max size isn't conservative enough");
+  }
+#endif
+
+  // Check instruction operands
+  u2 bci = bcs->bci();
+  if (opcode == Bytecodes::_invokeinterface) {
+    address bcp = bcs->bcp();
+    // 4905268: count operand in invokeinterface should be nargs+1, not nargs.
+    // JSR202 spec: The count operand of an invokeinterface instruction is valid if it is
+    // the difference between the size of the operand stack before and after the instruction
+    // executes.
+    if (*(bcp+3) != (nargs+1)) {
+      verify_error(bci, "Inconsistent args count operand in invokeinterface");
+      return;
+    }
+    if (*(bcp+4) != 0) {
+      verify_error(bci, "Fourth operand byte of invokeinterface must be zero");
+      return;
+    }
+  }
+
+  if (method_name->byte_at(0) == '<') {
+    // Make sure <init> can only be invoked by invokespecial
+    if (opcode != Bytecodes::_invokespecial || 
+        method_name() != vmSymbols::object_initializer_name()) {
+      verify_error(bci, "Illegal call to internal method");
+      return;
+    }
+  } else if (opcode == Bytecodes::_invokespecial
+             && !ref_class_type.equals(current_type())
+             && !ref_class_type.equals(VerificationType::reference_type(
+                  current_class()->super()->klass_part()->name()))) {
+    bool subtype = ref_class_type.is_assignable_from(
+      current_type(), current_class(), CHECK_VERIFY(this));
+    if (!subtype) {
+      verify_error(bci, "Bad invokespecial instruction: "
+          "current class isn't assignable to reference class.");
+       return;
+    }
+  }
+  // Match method descriptor with operand stack
+  for (int i = nargs - 1; i >= 0; i--) {  // Run backwards
+    current_frame->pop_stack(sig_types[i], CHECK_VERIFY(this));
+  }
+  // Check objectref on operand stack
+  if (opcode != Bytecodes::_invokestatic) {
+    if (method_name() == vmSymbols::object_initializer_name()) {  // <init> method
+      verify_invoke_init(bcs, ref_class_type, current_frame, 
+        code_length, this_uninit, cp, CHECK_VERIFY(this));
+    } else {   // other methods
+      // Ensures that target class is assignable to method class.
+      if (opcode == Bytecodes::_invokespecial) {
+        current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
+      } else if (opcode == Bytecodes::_invokevirtual) {
+        VerificationType stack_object_type =
+          current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
+        if (current_type() != stack_object_type) {
+          assert(cp->cache() == NULL, "not rewritten yet");
+          symbolHandle ref_class_name = symbolHandle(THREAD,
+            cp->klass_name_at(cp->klass_ref_index_at(index)));
+          // See the comments in verify_field_instructions() for
+          // the rationale behind this.
+          if (name_in_supers(ref_class_name(), current_class())) {
+            klassOop ref_class = load_class(ref_class_name, CHECK);
+            if (is_protected_access(
+                  _klass, ref_class, method_name(), method_sig(), true)) {
+              // It's protected access, check if stack object is
+              // assignable to current class.
+              bool is_assignable = current_type().is_assignable_from(
+                stack_object_type, current_class(), CHECK_VERIFY(this));
+              if (!is_assignable) {
+                if (ref_class_type.name() == vmSymbols::java_lang_Object()
+                    && stack_object_type.is_array()
+                    && method_name() == vmSymbols::clone_name()) {
+                  // Special case: arrays pretend to implement public Object
+                  // clone().
+                } else {
+                  verify_error(bci,
+                    "Bad access to protected data in invokevirtual");
+                  return;
+                }
+              }
+            }
+          }
+        }
+      } else {
+        assert(opcode == Bytecodes::_invokeinterface, "Unexpected opcode encountered");
+        current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
+      }
+    }
+  }
+  // Push the result type.
+  if (sig_stream.type() != T_VOID) {
+    if (method_name() == vmSymbols::object_initializer_name()) {
+      // <init> method must have a void return type
+      verify_error(bci, "Return type must be void in <init> method");
+      return;
+    }
+    VerificationType return_type[2];
+    int n = change_sig_to_verificationType(
+      &sig_stream, return_type, CHECK_VERIFY(this));
+    for (int i = 0; i < n; i++) {
+      current_frame->push_stack(return_type[i], CHECK_VERIFY(this)); // push types backwards
+    }
+  }
+}
+
+VerificationType ClassVerifier::get_newarray_type(
+    u2 index, u2 bci, TRAPS) {
+  const char* from_bt[] = {
+    NULL, NULL, NULL, NULL, "[Z", "[C", "[F", "[D", "[B", "[S", "[I", "[J", 
+  };
+  if (index < T_BOOLEAN || index > T_LONG) {
+    verify_error(bci, "Illegal newarray instruction");
+    return VerificationType::bogus_type();
+  }
+
+  // from_bt[index] contains the array signature which has a length of 2
+  symbolHandle sig = oopFactory::new_symbol_handle(
+    from_bt[index], 2, CHECK_(VerificationType::bogus_type()));
+  return VerificationType::reference_type(sig);
+}
+
+void ClassVerifier::verify_anewarray(
+    u2 index, constantPoolHandle cp, StackMapFrame* current_frame, TRAPS) {
+  verify_cp_class_type(index, cp, CHECK_VERIFY(this));
+  current_frame->pop_stack(
+    VerificationType::integer_type(), CHECK_VERIFY(this));
+
+  VerificationType component_type = 
+    cp_index_to_type(index, cp, CHECK_VERIFY(this));
+  ResourceMark rm(THREAD);
+  int length;
+  char* arr_sig_str;
+  if (component_type.is_array()) {     // it's an array
+    const char* component_name = component_type.name()->as_utf8();
+    // add one dimension to component
+    length = (int)strlen(component_name) + 1;
+    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length);
+    arr_sig_str[0] = '[';
+    strncpy(&arr_sig_str[1], component_name, length - 1);
+  } else {         // it's an object or interface
+    const char* component_name = component_type.name()->as_utf8();
+    // add one dimension to component with 'L' prepended and ';' postpended.
+    length = (int)strlen(component_name) + 3;
+    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length);
+    arr_sig_str[0] = '[';
+    arr_sig_str[1] = 'L';
+    strncpy(&arr_sig_str[2], component_name, length - 2);
+    arr_sig_str[length - 1] = ';';
+  }
+  symbolHandle arr_sig = oopFactory::new_symbol_handle(
+    arr_sig_str, length, CHECK_VERIFY(this));
+  VerificationType new_array_type = VerificationType::reference_type(arr_sig);
+  current_frame->push_stack(new_array_type, CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_iload(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->get_local(
+    index, VerificationType::integer_type(), CHECK_VERIFY(this));
+  current_frame->push_stack(
+    VerificationType::integer_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_lload(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->get_local_2(
+    index, VerificationType::long_type(), 
+    VerificationType::long2_type(), CHECK_VERIFY(this));
+  current_frame->push_stack_2(
+    VerificationType::long_type(), 
+    VerificationType::long2_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_fload(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->get_local(
+    index, VerificationType::float_type(), CHECK_VERIFY(this));
+  current_frame->push_stack(
+    VerificationType::float_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_dload(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->get_local_2(
+    index, VerificationType::double_type(), 
+    VerificationType::double2_type(), CHECK_VERIFY(this));
+  current_frame->push_stack_2(
+    VerificationType::double_type(), 
+    VerificationType::double2_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_aload(u2 index, StackMapFrame* current_frame, TRAPS) {
+  VerificationType type = current_frame->get_local(
+    index, VerificationType::reference_check(), CHECK_VERIFY(this));
+  current_frame->push_stack(type, CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_istore(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->pop_stack(
+    VerificationType::integer_type(), CHECK_VERIFY(this));
+  current_frame->set_local(
+    index, VerificationType::integer_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_lstore(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->pop_stack_2(
+    VerificationType::long2_type(), 
+    VerificationType::long_type(), CHECK_VERIFY(this));
+  current_frame->set_local_2(
+    index, VerificationType::long_type(), 
+    VerificationType::long2_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_fstore(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->pop_stack(VerificationType::float_type(), CHECK_VERIFY(this));
+  current_frame->set_local(
+    index, VerificationType::float_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_dstore(u2 index, StackMapFrame* current_frame, TRAPS) {
+  current_frame->pop_stack_2(
+    VerificationType::double2_type(), 
+    VerificationType::double_type(), CHECK_VERIFY(this));
+  current_frame->set_local_2(
+    index, VerificationType::double_type(), 
+    VerificationType::double2_type(), CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_astore(u2 index, StackMapFrame* current_frame, TRAPS) {
+  VerificationType type = current_frame->pop_stack(
+    VerificationType::reference_check(), CHECK_VERIFY(this));
+  current_frame->set_local(index, type, CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_iinc(u2 index, StackMapFrame* current_frame, TRAPS) {
+  VerificationType type = current_frame->get_local(
+    index, VerificationType::integer_type(), CHECK_VERIFY(this));
+  current_frame->set_local(index, type, CHECK_VERIFY(this));
+}
+
+void ClassVerifier::verify_return_value(
+    VerificationType return_type, VerificationType type, u2 bci, TRAPS) {
+  if (return_type == VerificationType::bogus_type()) {
+    verify_error(bci, "Method expects a return value");
+    return;
+  }
+  bool match = return_type.is_assignable_from(type, _klass, CHECK_VERIFY(this));
+  if (!match) {
+    verify_error(bci, "Bad return type");
+    return;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,242 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)verifier.hpp	1.41 07/05/05 17:07:02 JVM"
+#endif
+/*
+ * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// The verifier class
+class Verifier : AllStatic {
+ public:
+  enum { STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50 };
+  typedef enum { ThrowException, NoException } Mode;
+
+  /**
+   * Verify the bytecodes for a class.  If 'throw_exception' is true
+   * then the appropriate VerifyError or ClassFormatError will be thrown.
+   * Otherwise, no exception is thrown and the return indicates the 
+   * error.
+   */
+  static bool verify(instanceKlassHandle klass, Mode mode, TRAPS);
+
+  // Return false if the class is loaded by the bootstrap loader.
+  static bool should_verify_for(oop class_loader);
+
+  // Relax certain verifier checks to enable some broken 1.1 apps to run on 1.2.
+  static bool relax_verify_for(oop class_loader);
+
+ private:
+  static bool is_eligible_for_verification(instanceKlassHandle klass);
+  static symbolHandle inference_verify(
+    instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS);
+};
+
+class RawBytecodeStream;
+class StackMapFrame;
+class StackMapTable;
+
+// Summary of verifier's memory usage:
+// StackMapTable is stack allocated.
+// StackMapFrame are resource allocated. There is one ResourceMark
+// for each method.
+// There is one mutable StackMapFrame (current_frame) which is updated
+// by abstract bytecode interpretation. frame_in_exception_handler() returns
+// a frame that has a mutable one-item stack (ready for pushing the
+// catch type exception object). All the other StackMapFrame's
+// are immutable (including their locals and stack arrays) after
+// their constructions.
+// locals/stack arrays in StackMapFrame are resource allocated.
+// locals/stack arrays can be shared between StackMapFrame's, except
+// the mutable StackMapFrame (current_frame).
+// Care needs to be taken to make sure resource objects don't outlive
+// the lifetime of their ResourceMark.
+
+// These macros are used similarly to CHECK macros but also check 
+// the status of the verifier and return if that has an error.
+#define CHECK_VERIFY(verifier) \
+  CHECK); if ((verifier)->has_error()) return; (0
+#define CHECK_VERIFY_(verifier, result) \
+  CHECK_(result)); if ((verifier)->has_error()) return (result); (0
+
+// A new instance of this class is created for each class being verified
+class ClassVerifier : public StackObj {
+ private:
+  Thread* _thread;
+  symbolHandle _exception_type;
+  char* _message;
+  size_t _message_buffer_len;
+
+  void verify_method(methodHandle method, TRAPS);
+  char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
+  void verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS);
+  void verify_local_variable_table(u4 code_length, char* code_data, TRAPS);
+
+  VerificationType cp_ref_index_to_type(
+      int index, constantPoolHandle cp, TRAPS) {
+    return cp_index_to_type(cp->klass_ref_index_at(index), cp, THREAD);
+  }
+
+  bool is_protected_access(
+    instanceKlassHandle this_class, klassOop target_class,
+    symbolOop field_name, symbolOop field_sig, bool is_method);
+
+  void verify_cp_index(constantPoolHandle cp, int index, TRAPS);
+  void verify_cp_type(
+    int index, constantPoolHandle cp, unsigned int types, TRAPS);
+  void verify_cp_class_type(int index, constantPoolHandle cp, TRAPS);
+
+  u2 verify_stackmap_table(
+    u2 stackmap_index, u2 bci, StackMapFrame* current_frame, 
+    StackMapTable* stackmap_table, bool no_control_flow, TRAPS);
+
+  void verify_exception_handler_targets(
+    u2 bci, bool this_uninit, StackMapFrame* current_frame, 
+    StackMapTable* stackmap_table, TRAPS);
+
+  void verify_ldc(
+    int opcode, u2 index, StackMapFrame *current_frame, 
+    constantPoolHandle cp, u2 bci, TRAPS);
+
+  void verify_switch(
+    RawBytecodeStream* bcs, u4 code_length, char* code_data, 
+    StackMapFrame* current_frame, StackMapTable* stackmap_table, TRAPS);
+
+  void verify_field_instructions(
+    RawBytecodeStream* bcs, StackMapFrame* current_frame, 
+    constantPoolHandle cp, TRAPS);
+
+  void verify_invoke_init(
+    RawBytecodeStream* bcs, VerificationType ref_class_type, 
+    StackMapFrame* current_frame, u4 code_length, bool* this_uninit, 
+    constantPoolHandle cp, TRAPS);
+
+  void verify_invoke_instructions(
+    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame, 
+    bool* this_uninit, VerificationType return_type, 
+    constantPoolHandle cp, TRAPS);
+
+  VerificationType get_newarray_type(u2 index, u2 bci, TRAPS);
+  void verify_anewarray(
+    u2 index, constantPoolHandle cp, StackMapFrame* current_frame, TRAPS);
+  void verify_return_value(
+    VerificationType return_type, VerificationType type, u2 offset, TRAPS);
+
+  void verify_iload (u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_lload (u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_fload (u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_dload (u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_aload (u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_istore(u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_lstore(u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_fstore(u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_dstore(u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_astore(u2 index, StackMapFrame* current_frame, TRAPS);
+  void verify_iinc  (u2 index, StackMapFrame* current_frame, TRAPS);
+
+  bool name_in_supers(symbolOop ref_name, instanceKlassHandle current);
+
+  instanceKlassHandle _klass;  // the class being verified
+  methodHandle        _method; // current method being verified
+  VerificationType    _this_type; // the verification type of the current class
+
+ public:
+  enum {
+    BYTECODE_OFFSET = 1,
+    NEW_OFFSET = 2
+  };
+
+  // constructor
+  ClassVerifier(instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS);
+
+  // destructor
+  ~ClassVerifier();
+
+  Thread* thread()             { return _thread; }
+  methodHandle method()        { return _method; }
+  instanceKlassHandle current_class() const { return _klass; }
+  VerificationType current_type() const { return _this_type; }
+
+  // Verifies the class.  If a verify or class file format error occurs, 
+  // the '_exception_name' symbols will set to the exception name and 
+  // the message_buffer will be filled in with the exception message.
+  void verify_class(TRAPS);
+
+  // Return status modes
+  symbolHandle result() const { return _exception_type; }
+  bool has_error() const { return !(result().is_null()); }
+
+  // Called when verify or class format errors are encountered.  
+  // May throw an exception based upon the mode.
+  void verify_error(u2 offset, const char* fmt, ...);
+  void verify_error(const char* fmt, ...);
+  void class_format_error(const char* fmt, ...);
+  void format_error_message(const char* fmt, int offset, va_list args);
+
+  klassOop load_class(symbolHandle name, TRAPS);
+
+  int change_sig_to_verificationType(
+    SignatureStream* sig_type, VerificationType* inference_type, TRAPS);
+
+  VerificationType cp_index_to_type(int index, constantPoolHandle cp, TRAPS) {
+    return VerificationType::reference_type(
+      symbolHandle(THREAD, cp->klass_name_at(index)));
+  }
+
+  static bool _verify_verbose;  // for debugging
+};
+
+inline int ClassVerifier::change_sig_to_verificationType(
+    SignatureStream* sig_type, VerificationType* inference_type, TRAPS) {
+  BasicType bt = sig_type->type();
+  switch (bt) {
+    case T_OBJECT:
+    case T_ARRAY:
+      { 
+        symbolOop name = sig_type->as_symbol(CHECK_0); 
+        *inference_type = 
+          VerificationType::reference_type(symbolHandle(THREAD, name));
+        return 1;
+      }
+    case T_LONG: 
+      *inference_type = VerificationType::long_type();
+      *++inference_type = VerificationType::long2_type();
+      return 2;
+    case T_DOUBLE:
+      *inference_type = VerificationType::double_type();
+      *++inference_type = VerificationType::double2_type();
+      return 2;
+    case T_INT:
+    case T_BOOLEAN:
+    case T_BYTE:
+    case T_CHAR:
+    case T_SHORT:
+      *inference_type = VerificationType::integer_type();
+      return 1;
+    case T_FLOAT:
+      *inference_type = VerificationType::float_type();
+      return 1;
+    default:
+      ShouldNotReachHere();
+      return 1;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,494 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)vmSymbols.cpp	1.28 07/05/17 15:50:36 JVM"
+#endif
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vmSymbols.cpp.incl"
+
+
+symbolOop vmSymbols::_symbols[vmSymbols::SID_LIMIT];
+
+symbolOop vmSymbols::_type_signatures[T_VOID+1] = { NULL /*, NULL...*/ };
+
+inline int compare_symbol(symbolOop a, symbolOop b) {
+  if (a == b)  return 0;
+  return (intptr_t)a > (intptr_t)b ? +1 : -1;
+}
+
+static vmSymbols::SID vm_symbol_index[vmSymbols::SID_LIMIT];
+extern "C" {
+  static int compare_vmsymbol_sid(const void* void_a, const void* void_b) {
+    symbolOop a = vmSymbols::symbol_at(*((vmSymbols::SID*) void_a));
+    symbolOop b = vmSymbols::symbol_at(*((vmSymbols::SID*) void_b));
+    return compare_symbol(a, b);
+  }
+}
+
+#ifndef PRODUCT
+#define VM_SYMBOL_ENUM_NAME_BODY(name, string) #name "\0"
+static const char* vm_symbol_enum_names =
+  VM_SYMBOLS_DO(VM_SYMBOL_ENUM_NAME_BODY, VM_ALIAS_IGNORE)
+  "\0";
+static const char* vm_symbol_enum_name(vmSymbols::SID sid) {
+  const char* string = &vm_symbol_enum_names[0];
+  int skip = (int)sid - (int)vmSymbols::FIRST_SID;
+  for (; skip != 0; skip--) {
+    size_t skiplen = strlen(string);
+    if (skiplen == 0)  return "<unknown>";  // overflow
+    string += skiplen+1;
+  }
+  return string;
+}
+#endif //PRODUCT
+
+// Put all the VM symbol strings in one place.
+// Makes for a more compact libjvm.
+#define VM_SYMBOL_BODY(name, string) string "\0"
+static const char* vm_symbol_bodies = VM_SYMBOLS_DO(VM_SYMBOL_BODY, VM_ALIAS_IGNORE);
+
+void vmSymbols::initialize(TRAPS) {
+  assert((int)SID_LIMIT <= (1<<log2_SID_LIMIT), "must fit in this bitfield");
+  assert((int)SID_LIMIT*5 > (1<<log2_SID_LIMIT), "make the bitfield smaller, please");
+
+  if (!UseSharedSpaces) {
+    const char* string = &vm_symbol_bodies[0];
+    for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
+      symbolOop sym = oopFactory::new_symbol(string, CHECK);
+      _symbols[index] = sym;
+      string += strlen(string); // skip string body
+      string += 1;              // skip trailing null
+    }
+
+    _type_signatures[T_BYTE]    = byte_signature();
+    _type_signatures[T_CHAR]    = char_signature();
+    _type_signatures[T_DOUBLE]  = double_signature();
+    _type_signatures[T_FLOAT]   = float_signature();
+    _type_signatures[T_INT]     = int_signature();
+    _type_signatures[T_LONG]    = long_signature();
+    _type_signatures[T_SHORT]   = short_signature();
+    _type_signatures[T_BOOLEAN] = bool_signature();
+    _type_signatures[T_VOID]    = void_signature();
+    // no single signatures for T_OBJECT or T_ARRAY
+  }
+
+#ifdef ASSERT
+  // Check for duplicates:
+  for (int i1 = (int)FIRST_SID; i1 < (int)SID_LIMIT; i1++) {
+    symbolOop sym = symbol_at((SID)i1);
+    for (int i2 = (int)FIRST_SID; i2 < i1; i2++) {
+      if (symbol_at((SID)i2) == sym) {
+        tty->print("*** Duplicate VM symbol SIDs %s(%d) and %s(%d): \"",
+                   vm_symbol_enum_name((SID)i2), i2,
+                   vm_symbol_enum_name((SID)i1), i1);
+        sym->print_symbol_on(tty);
+        tty->print_cr("\"");
+      }
+    }
+  }
+#endif //ASSERT
+
+  // Create an index for find_id:
+  {
+    for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
+      vm_symbol_index[index] = (SID)index;
+    }
+    int num_sids = SID_LIMIT-FIRST_SID;
+    qsort(&vm_symbol_index[FIRST_SID], num_sids, sizeof(vm_symbol_index[0]),
+          compare_vmsymbol_sid);
+  }
+
+#ifdef ASSERT
+  {
+    // Spot-check correspondence between strings, symbols, and enums:
+    assert(_symbols[NO_SID] == NULL, "must be");
+    const char* str = "java/lang/Object";
+    symbolOop sym = oopFactory::new_symbol(str, CHECK);
+    assert(strcmp(str, (char*)sym->base()) == 0, "");
+    assert(sym == java_lang_Object(), "");
+    SID sid = VM_SYMBOL_ENUM_NAME(java_lang_Object);
+    assert(find_sid(sym) == sid, "");
+    assert(symbol_at(sid) == sym, "");
+
+    // Make sure find_sid produces the right answer in each case.
+    for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
+      sym = symbol_at((SID)index);
+      sid = find_sid(sym);
+      assert(sid == (SID)index, "symbol index works");
+      // Note:  If there are duplicates, this assert will fail.
+      // A "Duplicate VM symbol" message will have already been printed.
+    }
+
+    // The string "format" happens (at the moment) not to be a vmSymbol,
+    // though it is a method name in java.lang.String.
+    str = "format";
+    sym = oopFactory::new_symbol(str, CHECK);
+    sid = find_sid(sym);
+    assert(sid == NO_SID, "symbol index works (negative test)");
+  }
+#endif
+}
+
+
+#ifndef PRODUCT
+const char* vmSymbols::name_for(vmSymbols::SID sid) {
+  if (sid == NO_SID)
+    return "NO_SID";
+  const char* string = &vm_symbol_bodies[0];
+  for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
+    if (index == (int)sid)
+      return string;
+    string += strlen(string); // skip string body
+    string += 1;              // skip trailing null
+  }
+  return "BAD_SID";
+}
+#endif
+
+
+
+void vmSymbols::oops_do(OopClosure* f, bool do_all) {
+  for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
+    f->do_oop((oop*) &_symbols[index]);
+  }
+  for (int i = 0; i < T_VOID+1; i++) {
+    if (_type_signatures[i] != NULL) {
+      assert(i >= T_BOOLEAN, "checking");
+      f->do_oop((oop*)&_type_signatures[i]);
+    } else if (do_all) {
+      f->do_oop((oop*)&_type_signatures[i]);
+    }
+  }
+}
+
+
+BasicType vmSymbols::signature_type(symbolOop s) {
+  assert(s != NULL, "checking");
+  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
+    if (s == _type_signatures[i]) {
+      return (BasicType)i;
+    }
+  }
+  return T_OBJECT;
+}
+
+
+static int mid_hint = (int)vmSymbols::FIRST_SID+1;
+
+#ifndef PRODUCT
+static int find_sid_calls, find_sid_probes;
+// (Typical counts are calls=7000 and probes=17000.)
+#endif
+
+vmSymbols::SID vmSymbols::find_sid(symbolOop symbol) {
+  // Handle the majority of misses by a bounds check.
+  // Then, use a binary search over the index.
+  // Expected trip count is less than log2_SID_LIMIT, about eight.
+  // This is slow but acceptable, given that calls are not
+  // dynamically common.  (methodOop::intrinsic_id has a cache.)
+  NOT_PRODUCT(find_sid_calls++);
+  int min = (int)FIRST_SID, max = (int)SID_LIMIT - 1;
+  SID sid = NO_SID, sid1;
+  int cmp1;
+  sid1 = vm_symbol_index[min];
+  cmp1 = compare_symbol(symbol, symbol_at(sid1));
+  if (cmp1 <= 0) {              // before the first
+    if (cmp1 == 0)  sid = sid1;
+  } else {
+    sid1 = vm_symbol_index[max];
+    cmp1 = compare_symbol(symbol, symbol_at(sid1));
+    if (cmp1 >= 0) {            // after the last
+      if (cmp1 == 0)  sid = sid1;
+    } else {
+      // After checking the extremes, do a binary search.
+      ++min; --max;             // endpoints are done
+      int mid = mid_hint;       // start at previous success
+      while (max >= min) {
+        assert(mid >= min && mid <= max, "");
+        NOT_PRODUCT(find_sid_probes++);
+        sid1 = vm_symbol_index[mid];
+        cmp1 = compare_symbol(symbol, symbol_at(sid1));
+        if (cmp1 == 0) {
+          mid_hint = mid;
+          sid = sid1;
+          break;
+        }
+        if (cmp1 < 0)
+          max = mid - 1;        // symbol < symbol_at(sid)
+        else
+          min = mid + 1;
+
+        // Pick a new probe point:
+        mid = (max + min) / 2;
+      }
+    }
+  }
+
+#ifdef ASSERT
+  // Perform the exhaustive self-check the first 1000 calls,
+  // and every 100 calls thereafter.
+  static int find_sid_check_count = -2000;
+  if ((uint)++find_sid_check_count > (uint)100) {
+    if (find_sid_check_count > 0)  find_sid_check_count = 0;
+
+    // Make sure this is the right answer, using linear search.
+    // (We have already proven that there are no duplicates in the list.)
+    SID sid2 = NO_SID;
+    for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
+      symbolOop sym2 = symbol_at((SID)index);
+      if (sym2 == symbol) {
+        sid2 = (SID)index;
+        break;
+      }
+    }
+    // Unless it's a duplicate, assert that the sids are the same.
+    if (_symbols[sid] != _symbols[sid2]) {
+      assert(sid == sid2, "binary same as linear search");
+    }
+  }
+#endif //ASSERT
+
+  return sid;
+}
+
+
+#define VM_INTRINSIC_INITIALIZE(id, klass, name, sig, flags) #id "\0"
+static const char* vm_intrinsic_name_bodies = 
+  VM_INTRINSICS_DO(VM_INTRINSIC_INITIALIZE,
+                   VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+
+static const char* vm_intrinsic_name_table[vmIntrinsics::ID_LIMIT];
+
+const char* vmIntrinsics::name_at(vmIntrinsics::ID id) {
+  const char** nt = &vm_intrinsic_name_table[0];
+  if (nt[_none] == NULL) {
+    char* string = (char*) &vm_intrinsic_name_bodies[0];
+    for (int index = FIRST_ID; index < ID_LIMIT; index++) {
+      nt[index] = string;
+      string += strlen(string); // skip string body
+      string += 1;              // skip trailing null
+    }
+    assert(!strcmp(nt[_hashCode], "_hashCode"), "lined up");
+    nt[_none] = "_none";
+  }
+  if ((uint)id < (uint)ID_LIMIT)
+    return vm_intrinsic_name_table[(uint)id];
+  else
+    return "(unknown intrinsic)";
+}
+
+// These are flag-matching functions:
+inline bool match_F_R(jshort flags) {
+  const int req = 0;
+  const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
+  return (flags & (req | neg)) == req;
+}
+inline bool match_F_RN(jshort flags) {
+  const int req = JVM_ACC_NATIVE;
+  const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
+  return (flags & (req | neg)) == req;
+}
+inline bool match_F_S(jshort flags) {
+  const int req = JVM_ACC_STATIC;
+  const int neg = JVM_ACC_SYNCHRONIZED;
+  return (flags & (req | neg)) == req;
+}
+inline bool match_F_SN(jshort flags) {
+  const int req = JVM_ACC_STATIC | JVM_ACC_NATIVE;
+  const int neg = JVM_ACC_SYNCHRONIZED;
+  return (flags & (req | neg)) == req;
+}
+
+// These are for forming case labels:
+#define ID3(x, y, z) (( jint)(z) +                                  \
+                      ((jint)(y) <<    vmSymbols::log2_SID_LIMIT) + \
+                      ((jint)(x) << (2*vmSymbols::log2_SID_LIMIT))  )
+#define SID_ENUM(n) vmSymbols::VM_SYMBOL_ENUM_NAME(n)
+
+vmIntrinsics::ID vmIntrinsics::find_id(vmSymbols::SID holder,
+                                       vmSymbols::SID name,
+                                       vmSymbols::SID sig,
+                                       jshort flags) {
+  assert((int)vmSymbols::SID_LIMIT <= (1<<vmSymbols::log2_SID_LIMIT), "must fit");
+
+  // Let the C compiler build the decision tree.
+
+#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
+  case ID3(SID_ENUM(klass), SID_ENUM(name), SID_ENUM(sig)): \
+    if (!match_##fcode(flags))  break; \
+    return id;
+
+  switch (ID3(holder, name, sig)) {
+    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
+                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+  }
+  return vmIntrinsics::_none;
+
+#undef VM_INTRINSIC_CASE
+}
+
+
+const char* vmIntrinsics::short_name_as_C_string(vmIntrinsics::ID id, char* buf, int buflen) {
+  const char* str = name_at(id);
+#ifndef PRODUCT
+  const char* kname = vmSymbols::name_for(class_for(id));
+  const char* mname = vmSymbols::name_for(name_for(id));
+  const char* sname = vmSymbols::name_for(signature_for(id));
+  const char* fname = "";
+  switch (flags_for(id)) {
+  case F_RN: fname = "native ";        break;
+  case F_SN: fname = "native static "; break;
+  case F_S:  fname = "static ";        break;
+  }
+  const char* kptr = strrchr(kname, '/');
+  if (kptr != NULL)  kname = kptr + 1;
+  int len = jio_snprintf(buf, buflen, "%s: %s%s.%s%s",
+                         str, fname, kname, mname, sname);
+  if (len < buflen)
+    str = buf;
+#endif //PRODUCT
+  return str;
+}
+
+
+// These are for friendly printouts of intrinsics:
+
+vmSymbols::SID vmIntrinsics::class_for(vmIntrinsics::ID id) {
+#ifndef PRODUCT
+#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
+  case id: return SID_ENUM(klass);
+
+  switch (id) {
+    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
+                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+  }
+#undef VM_INTRINSIC_CASE
+#endif //PRODUCT
+  return vmSymbols::NO_SID;
+}
+
+vmSymbols::SID vmIntrinsics::name_for(vmIntrinsics::ID id) {
+#ifndef PRODUCT
+#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
+  case id: return SID_ENUM(name);
+
+  switch (id) {
+    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
+                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+  }
+#undef VM_INTRINSIC_CASE
+#endif //PRODUCT
+  return vmSymbols::NO_SID;
+}
+
+vmSymbols::SID vmIntrinsics::signature_for(vmIntrinsics::ID id) {
+#ifndef PRODUCT
+#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
+  case id: return SID_ENUM(sig);
+
+  switch (id) {
+    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
+                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+  }
+#undef VM_INTRINSIC_CASE
+#endif //PRODUCT
+  return vmSymbols::NO_SID;
+}
+
+vmIntrinsics::Flags vmIntrinsics::flags_for(vmIntrinsics::ID id) {
+#ifndef PRODUCT
+#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
+  case id: return fcode;
+
+  switch (id) {
+    VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
+                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+  }
+#undef VM_INTRINSIC_CASE
+#endif //PRODUCT
+  return F_none;
+}
+
+
+#ifndef PRODUCT
+// verify_method performs an extra check on a matched intrinsic method
+
+static bool match_method(methodOop m, symbolOop n, symbolOop s) {
+  return (m->name() == n &&
+          m->signature() == s);
+}
+
+static vmIntrinsics::ID match_method_with_klass(methodOop m, symbolOop mk) {
+#define VM_INTRINSIC_MATCH(id, klassname, namepart, sigpart, flags) \
+  { symbolOop k = vmSymbols::klassname(); \
+    if (mk == k) { \
+      symbolOop n = vmSymbols::namepart(); \
+      symbolOop s = vmSymbols::sigpart(); \
+      if (match_method(m, n, s)) \
+        return vmIntrinsics::id; \
+    } }
+  VM_INTRINSICS_DO(VM_INTRINSIC_MATCH,
+                   VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
+  return vmIntrinsics::_none;
+#undef VM_INTRINSIC_MATCH
+}
+
+void vmIntrinsics::verify_method(ID actual_id, methodOop m) {
+  symbolOop mk = Klass::cast(m->method_holder())->name();
+  ID declared_id = match_method_with_klass(m, mk);
+
+  if (declared_id == actual_id)  return; // success
+
+  if (declared_id == _none && actual_id != _none && mk == vmSymbols::java_lang_StrictMath()) {
+    // Here are a few special cases in StrictMath not declared in vmSymbols.hpp.
+    switch (actual_id) {
+    case _min:
+    case _max:
+    case _dsqrt:
+      declared_id = match_method_with_klass(m, vmSymbols::java_lang_Math());
+      if (declared_id == actual_id)  return; // acceptable alias
+      break;
+    }
+  }
+
+  const char* declared_name = name_at(declared_id);
+  const char* actual_name   = name_at(actual_id);
+  methodHandle mh = m;
+  m = NULL;
+  ttyLocker ttyl;
+  if (xtty != NULL) {
+    xtty->begin_elem("intrinsic_misdeclared actual='%s' declared='%s'",
+                     actual_name, declared_name);
+    xtty->method(mh);
+    xtty->end_elem("");
+  }
+  if (PrintMiscellaneous && (WizardMode || Verbose)) {
+    tty->print_cr("*** misidentified method; %s(%d) should be %s(%d):",
+                  declared_name, declared_id, actual_name, actual_id);
+    m->print_short_name(tty);
+    tty->cr();
+  }
+}
+#endif //PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,894 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)vmSymbols.hpp	1.162 07/05/17 15:50:40 JVM"
+#endif
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// The classes vmSymbols and vmSymbolHandles are a name spaces for fast lookup of 
+// symbols commonly used in the VM. The first class return a symbolOop, while the
+// second class returns a SymbolHandle. The underlying data structure is shared
+// between the two classes.
+//
+// Sample usage:
+//
+//   symbolOop obj       = vmSymbols::java_lang_Object()();
+//   SymbolHandle handle = vmSymbolHandles::java_lang_Object();
+
+
+// Useful sub-macros exported by this header file:
+
+#define VM_SYMBOL_ENUM_NAME(name)    name##_enum
+#define VM_INTRINSIC_IGNORE(id, class, name, sig, flags) /*ignored*/
+#define VM_SYMBOL_IGNORE(id, name)                       /*ignored*/
+#define VM_ALIAS_IGNORE(id, id2)                         /*ignored*/
+
+
+// Mapping function names to values. New entries should be added below.
+
+#define VM_SYMBOLS_DO(template, do_alias)                                                         \
+  /* commonly used class names */                                                                 \
+  template(java_lang_System,                          "java/lang/System")                         \
+  template(java_lang_Object,                          "java/lang/Object")                         \
+  template(java_lang_Class,                           "java/lang/Class")                          \
+  template(java_lang_String,                          "java/lang/String")                         \
+  template(java_lang_Thread,                          "java/lang/Thread")                         \
+  template(java_lang_ThreadGroup,                     "java/lang/ThreadGroup")                    \
+  template(java_lang_Cloneable,                       "java/lang/Cloneable")                      \
+  template(java_lang_Throwable,                       "java/lang/Throwable")                      \
+  template(java_lang_ClassLoader,                     "java/lang/ClassLoader")                    \
+  template(java_lang_ClassLoader_NativeLibrary,       "java/lang/ClassLoader\x024NativeLibrary")  \
+  template(java_lang_ThreadDeath,                     "java/lang/ThreadDeath")                    \
+  template(java_lang_Boolean,                         "java/lang/Boolean")                        \
+  template(java_lang_Character,                       "java/lang/Character")                      \
+  template(java_lang_Float,                           "java/lang/Float")                          \
+  template(java_lang_Double,                          "java/lang/Double")                         \
+  template(java_lang_Byte,                            "java/lang/Byte")                           \
+  template(java_lang_Short,                           "java/lang/Short")                          \
+  template(java_lang_Integer,                         "java/lang/Integer")                        \
+  template(java_lang_Long,                            "java/lang/Long")                           \
+  template(java_lang_Shutdown,                        "java/lang/Shutdown")                       \
+  template(java_lang_ref_Reference,                   "java/lang/ref/Reference")                  \
+  template(java_lang_ref_SoftReference,               "java/lang/ref/SoftReference")              \
+  template(java_lang_ref_WeakReference,               "java/lang/ref/WeakReference")              \
+  template(java_lang_ref_FinalReference,              "java/lang/ref/FinalReference")             \
+  template(java_lang_ref_PhantomReference,            "java/lang/ref/PhantomReference")           \
+  template(java_lang_ref_Finalizer,                   "java/lang/ref/Finalizer")                  \
+  template(java_lang_reflect_AccessibleObject,        "java/lang/reflect/AccessibleObject")       \
+  template(java_lang_reflect_Method,                  "java/lang/reflect/Method")                 \
+  template(java_lang_reflect_Constructor,             "java/lang/reflect/Constructor")            \
+  template(java_lang_reflect_Field,                   "java/lang/reflect/Field")                  \
+  template(java_lang_reflect_Array,                   "java/lang/reflect/Array")                  \
+  template(java_lang_StringBuffer,                    "java/lang/StringBuffer")                   \
+  template(java_lang_CharSequence,                    "java/lang/CharSequence")                   \
+  template(java_security_AccessControlContext,        "java/security/AccessControlContext")       \
+  template(java_security_ProtectionDomain,            "java/security/ProtectionDomain")           \
+  template(java_io_OutputStream,                      "java/io/OutputStream")                     \
+  template(java_io_Reader,                            "java/io/Reader")                           \
+  template(java_io_BufferedReader,                    "java/io/BufferedReader")                   \
+  template(java_io_FileInputStream,                   "java/io/FileInputStream")                  \
+  template(java_io_ByteArrayInputStream,              "java/io/ByteArrayInputStream")             \
+  template(java_io_Serializable,                      "java/io/Serializable")                     \
+  template(java_util_Arrays,                          "java/util/Arrays")                         \
+  template(java_util_Properties,                      "java/util/Properties")                     \
+  template(java_util_Vector,                          "java/util/Vector")                         \
+  template(java_util_AbstractList,                    "java/util/AbstractList")                   \
+  template(java_util_Hashtable,                       "java/util/Hashtable")                      \
+  template(java_lang_Compiler,                        "java/lang/Compiler")                       \
+  template(sun_misc_Signal,                           "sun/misc/Signal")                          \
+  template(java_lang_AssertionStatusDirectives,       "java/lang/AssertionStatusDirectives")      \
+                                                                                                  \
+  /* class file format tags */                                                                    \
+  template(tag_source_file,                           "SourceFile")                               \
+  template(tag_inner_classes,                         "InnerClasses")                             \
+  template(tag_constant_value,                        "ConstantValue")                            \
+  template(tag_code,                                  "Code")                                     \
+  template(tag_exceptions,                            "Exceptions")                               \
+  template(tag_line_number_table,                     "LineNumberTable")                          \
+  template(tag_local_variable_table,                  "LocalVariableTable")                       \
+  template(tag_local_variable_type_table,             "LocalVariableTypeTable")                   \
+  template(tag_stack_map_table,                       "StackMapTable")                            \
+  template(tag_synthetic,                             "Synthetic")                                \
+  template(tag_deprecated,                            "Deprecated")                               \
+  template(tag_source_debug_extension,                "SourceDebugExtension")                     \
+  template(tag_signature,                             "Signature")                                \
+  template(tag_runtime_visible_annotations,           "RuntimeVisibleAnnotations")                \
+  template(tag_runtime_invisible_annotations,         "RuntimeInvisibleAnnotations")              \
+  template(tag_runtime_visible_parameter_annotations, "RuntimeVisibleParameterAnnotations")       \
+  template(tag_runtime_invisible_parameter_annotations,"RuntimeInvisibleParameterAnnotations")    \
+  template(tag_annotation_default,                    "AnnotationDefault")                        \
+  template(tag_enclosing_method,                      "EnclosingMethod")                          \
+                                                                                                  \
+  /* exception klasses: at least all exceptions thrown by the VM have entries here */             \
+  template(java_lang_ArithmeticException,             "java/lang/ArithmeticException")            \
+  template(java_lang_ArrayIndexOutOfBoundsException,  "java/lang/ArrayIndexOutOfBoundsException") \
+  template(java_lang_ArrayStoreException,             "java/lang/ArrayStoreException")            \
+  template(java_lang_ClassCastException,              "java/lang/ClassCastException")             \
+  template(java_lang_ClassNotFoundException,          "java/lang/ClassNotFoundException")         \
+  template(java_lang_CloneNotSupportedException,      "java/lang/CloneNotSupportedException")     \
+  template(java_lang_IllegalAccessException,          "java/lang/IllegalAccessException")         \
+  template(java_lang_IllegalArgumentException,        "java/lang/IllegalArgumentException")       \
+  template(java_lang_IllegalMonitorStateException,    "java/lang/IllegalMonitorStateException")   \
+  template(java_lang_IllegalThreadStateException,     "java/lang/IllegalThreadStateException")    \
+  template(java_lang_IndexOutOfBoundsException,       "java/lang/IndexOutOfBoundsException")      \
+  template(java_lang_InstantiationException,          "java/lang/InstantiationException")         \
+  template(java_lang_InstantiationError,              "java/lang/InstantiationError")             \
+  template(java_lang_InterruptedException,            "java/lang/InterruptedException")           \
+  template(java_lang_LinkageError,                    "java/lang/LinkageError")                   \
+  template(java_lang_NegativeArraySizeException,      "java/lang/NegativeArraySizeException")     \
+  template(java_lang_NoSuchFieldException,            "java/lang/NoSuchFieldException")           \
+  template(java_lang_NoSuchMethodException,           "java/lang/NoSuchMethodException")          \
+  template(java_lang_NullPointerException,            "java/lang/NullPointerException")           \
+  template(java_lang_StringIndexOutOfBoundsException, "java/lang/StringIndexOutOfBoundsException")\
+  template(java_lang_InvalidClassException,           "java/lang/InvalidClassException")          \
+  template(java_lang_reflect_InvocationTargetException, "java/lang/reflect/InvocationTargetException") \
+  template(java_lang_Exception,                       "java/lang/Exception")                      \
+  template(java_lang_RuntimeException,                "java/lang/RuntimeException")               \
+  template(java_io_IOException,                       "java/io/IOException")                      \
+  template(java_security_PrivilegedActionException,   "java/security/PrivilegedActionException")  \
+                                                                                                  \
+  /* error klasses: at least all errors thrown by the VM have entries here */                     \
+  template(java_lang_AbstractMethodError,             "java/lang/AbstractMethodError")            \
+  template(java_lang_ClassCircularityError,           "java/lang/ClassCircularityError")          \
+  template(java_lang_ClassFormatError,                "java/lang/ClassFormatError")               \
+  template(java_lang_UnsupportedClassVersionError,    "java/lang/UnsupportedClassVersionError")   \
+  template(java_lang_Error,                           "java/lang/Error")                          \
+  template(java_lang_ExceptionInInitializerError,     "java/lang/ExceptionInInitializerError")    \
+  template(java_lang_IllegalAccessError,              "java/lang/IllegalAccessError")             \
+  template(java_lang_IncompatibleClassChangeError,    "java/lang/IncompatibleClassChangeError")   \
+  template(java_lang_InternalError,                   "java/lang/InternalError")                  \
+  template(java_lang_NoClassDefFoundError,            "java/lang/NoClassDefFoundError")           \
+  template(java_lang_NoSuchFieldError,                "java/lang/NoSuchFieldError")               \
+  template(java_lang_NoSuchMethodError,               "java/lang/NoSuchMethodError")              \
+  template(java_lang_OutOfMemoryError,                "java/lang/OutOfMemoryError")               \
+  template(java_lang_UnsatisfiedLinkError,            "java/lang/UnsatisfiedLinkError")           \
+  template(java_lang_VerifyError,                     "java/lang/VerifyError")                    \
+  template(java_lang_SecurityException,               "java/lang/SecurityException")              \
+  template(java_lang_VirtualMachineError,             "java/lang/VirtualMachineError")            \
+  template(java_lang_StackOverflowError,              "java/lang/StackOverflowError")             \
+  template(java_lang_StackTraceElement,               "java/lang/StackTraceElement")              \
+  template(java_util_concurrent_locks_AbstractOwnableSynchronizer,   "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
+                                                                                                  \
+  /* class symbols needed by intrinsics */                                                        \
+  VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, template, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
+                                                                                                  \
+  /* Support for reflection based on dynamic bytecode generation (JDK 1.4 and above) */           \
+                                                                                                  \
+  template(sun_reflect_FieldInfo,                     "sun/reflect/FieldInfo")                    \
+  template(sun_reflect_MethodInfo,                    "sun/reflect/MethodInfo")                   \
+  template(sun_reflect_MagicAccessorImpl,             "sun/reflect/MagicAccessorImpl")            \
+  template(sun_reflect_MethodAccessorImpl,            "sun/reflect/MethodAccessorImpl")           \
+  template(sun_reflect_ConstructorAccessorImpl,       "sun/reflect/ConstructorAccessorImpl")      \
+  template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \
+  template(sun_reflect_DelegatingClassLoader,         "sun/reflect/DelegatingClassLoader")        \
+  template(sun_reflect_Reflection,                    "sun/reflect/Reflection")                   \
+  template(checkedExceptions_name,                    "checkedExceptions")                        \
+  template(clazz_name,                                "clazz")                                    \
+  template(exceptionTypes_name,                       "exceptionTypes")                           \
+  template(modifiers_name,                            "modifiers")                                \
+  template(newConstructor_name,                       "newConstructor")                           \
+  template(newConstructor_signature,                  "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Constructor;") \
+  template(newField_name,                             "newField")                                 \
+  template(newField_signature,                        "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \
+  template(newMethod_name,                            "newMethod")                                \
+  template(newMethod_signature,                       "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \
+  template(invoke_name,                               "invoke")                                   \
+  template(override_name,                             "override")                                 \
+  template(parameterTypes_name,                       "parameterTypes")                           \
+  template(returnType_name,                           "returnType")                               \
+  template(signature_name,                            "signature")                                \
+  template(slot_name,                                 "slot")                                     \
+                                                                                                  \
+  /* Support for annotations (JDK 1.5 and above) */                                               \
+                                                                                                  \
+  template(annotations_name,                          "annotations")                              \
+  template(parameter_annotations_name,                "parameterAnnotations")                     \
+  template(annotation_default_name,                   "annotationDefault")                        \
+  template(sun_reflect_ConstantPool,                  "sun/reflect/ConstantPool")                 \
+  template(constantPoolOop_name,                      "constantPoolOop")                          \
+  template(sun_reflect_UnsafeStaticFieldAccessorImpl, "sun/reflect/UnsafeStaticFieldAccessorImpl")\
+  template(base_name,                                 "base")                                     \
+                                                                                                  \
+  /* common method names */                                                                       \
+  template(object_initializer_name,                   "<init>")                                   \
+  template(class_initializer_name,                    "<clinit>")                                 \
+  template(println_name,                              "println")                                  \
+  template(printStackTrace_name,                      "printStackTrace")                          \
+  template(main_name,                                 "main")                                     \
+  template(name_name,                                 "name")                                     \
+  template(priority_name,                             "priority")                                 \
+  template(stillborn_name,                            "stillborn")                                \
+  template(group_name,                                "group")                                    \
+  template(daemon_name,                               "daemon")                                   \
+  template(eetop_name,                                "eetop")                                    \
+  template(thread_status_name,                        "threadStatus")                             \
+  template(run_method_name,                           "run")                                      \
+  template(exit_method_name,                          "exit")                                     \
+  template(add_method_name,                           "add")                                      \
+  template(parent_name,                               "parent")                                   \
+  template(threads_name,                              "threads")                                  \
+  template(groups_name,                               "groups")                                   \
+  template(maxPriority_name,                          "maxPriority")                              \
+  template(destroyed_name,                            "destroyed")                                \
+  template(vmAllowSuspension_name,                    "vmAllowSuspension")                        \
+  template(nthreads_name,                             "nthreads")                                 \
+  template(ngroups_name,                              "ngroups")                                  \
+  template(shutdown_method_name,                      "shutdown")                                 \
+  template(finalize_method_name,                      "finalize")                                 \
+  template(reference_lock_name,                       "lock")                                     \
+  template(reference_discovered_name,                 "discovered")                               \
+  template(run_finalizers_on_exit_name,               "runFinalizersOnExit")                      \
+  template(uncaughtException_name,                    "uncaughtException")                        \
+  template(dispatchUncaughtException_name,            "dispatchUncaughtException")                \
+  template(initializeSystemClass_name,                "initializeSystemClass")                    \
+  template(loadClass_name,                            "loadClass")                                \
+  template(loadClassInternal_name,                    "loadClassInternal")                        \
+  template(get_name,                                  "get")                                      \
+  template(put_name,                                  "put")                                      \
+  template(type_name,                                 "type")                                     \
+  template(findNative_name,                           "findNative")                               \
+  template(deadChild_name,                            "deadChild")                                \
+  template(addClass_name,                             "addClass")                                 \
+  template(getFromClass_name,                         "getFromClass")                             \
+  template(dispatch_name,                             "dispatch")                                 \
+  template(getSystemClassLoader_name,                 "getSystemClassLoader")                     \
+  template(fillInStackTrace_name,                     "fillInStackTrace")                         \
+  template(getCause_name,                             "getCause")                                 \
+  template(initCause_name,                            "initCause")                                \
+  template(setProperty_name,                          "setProperty")                              \
+  template(getProperty_name,                          "getProperty")                              \
+  template(context_name,                              "context")                                  \
+  template(privilegedContext_name,                    "privilegedContext")                        \
+  template(contextClassLoader_name,                   "contextClassLoader")                       \
+  template(inheritedAccessControlContext_name,        "inheritedAccessControlContext")            \
+  template(isPrivileged_name,                         "isPrivileged")                             \
+  template(wait_name,                                 "wait")                                     \
+  template(checkPackageAccess_name,                   "checkPackageAccess")                       \
+  template(stackSize_name,                            "stackSize")                                \
+  template(thread_id_name,                            "tid")                                      \
+  template(newInstance0_name,                         "newInstance0")                             \
+  template(limit_name,                                "limit")                                    \
+  template(forName_name,                              "forName")                                  \
+  template(forName0_name,                             "forName0")                                 \
+  template(isJavaIdentifierStart_name,                "isJavaIdentifierStart")                    \
+  template(isJavaIdentifierPart_name,                 "isJavaIdentifierPart")                     \
+  template(exclusive_owner_thread_name,               "exclusiveOwnerThread")                     \
+  template(park_blocker_name,                         "parkBlocker")                              \
+  template(park_event_name,                           "nativeParkEventPointer")                   \
+  template(value_name,                                "value")                                    \
+                                                                                                  \
+  /* non-intrinsic name/signature pairs: */                                                       \
+  template(register_method_name,                      "register")                                 \
+  do_alias(register_method_signature,         object_void_signature)                              \
+                                                                                                  \
+  /* name symbols needed by intrinsics */                                                         \
+  VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
+                                                                                                  \
+  /* common signatures names */                                                                   \
+  template(void_method_signature,                     "()V")                                      \
+  template(void_int_signature,                        "()I")                                      \
+  template(void_long_signature,                       "()J")                                      \
+  template(void_boolean_signature,                    "()Z")                                      \
+  template(int_void_signature,                        "(I)V")                                     \
+  template(int_int_signature,                         "(I)I")                                     \
+  template(int_bool_signature,                        "(I)Z")                                     \
+  template(float_int_signature,                       "(F)I")                                     \
+  template(double_long_signature,                     "(D)J")                                     \
+  template(double_double_signature,                   "(D)D")                                     \
+  template(int_float_signature,                       "(I)F")                                     \
+  template(long_long_signature,                       "(J)J")                                     \
+  template(long_double_signature,                     "(J)D")                                     \
+  template(byte_signature,                            "B")                                        \
+  template(char_signature,                            "C")                                        \
+  template(double_signature,                          "D")                                        \
+  template(float_signature,                           "F")                                        \
+  template(int_signature,                             "I")                                        \
+  template(long_signature,                            "J")                                        \
+  template(short_signature,                           "S")                                        \
+  template(bool_signature,                            "Z")                                        \
+  template(void_signature,                            "V")                                        \
+  template(byte_array_signature,                      "[B")                                       \
+  template(char_array_signature,                      "[C")                                       \
+  template(object_void_signature,                     "(Ljava/lang/Object;)V")                    \
+  template(object_int_signature,                      "(Ljava/lang/Object;)I")                    \
+  template(object_boolean_signature,                  "(Ljava/lang/Object;)Z")                    \
+  template(string_void_signature,                     "(Ljava/lang/String;)V")                    \
+  template(string_int_signature,                      "(Ljava/lang/String;)I")                    \
+  template(throwable_void_signature,                  "(Ljava/lang/Throwable;)V")                 \
+  template(void_throwable_signature,                  "()Ljava/lang/Throwable;")                  \
+  template(throwable_throwable_signature,             "(Ljava/lang/Throwable;)Ljava/lang/Throwable;")             \
+  template(class_void_signature,                      "(Ljava/lang/Class;)V")                     \
+  template(class_int_signature,                       "(Ljava/lang/Class;)I")                     \
+  template(class_boolean_signature,                   "(Ljava/lang/Class;)Z")                     \
+  template(throwable_string_void_signature,           "(Ljava/lang/Throwable;Ljava/lang/String;)V")               \
+  template(string_array_void_signature,               "([Ljava/lang/String;)V")                                   \
+  template(string_array_string_array_void_signature,  "([Ljava/lang/String;[Ljava/lang/String;)V")                \
+  template(thread_throwable_void_signature,           "(Ljava/lang/Thread;Ljava/lang/Throwable;)V")               \
+  template(thread_void_signature,                     "(Ljava/lang/Thread;)V")                                    \
+  template(threadgroup_runnable_void_signature,       "(Ljava/lang/ThreadGroup;Ljava/lang/Runnable;)V")           \
+  template(threadgroup_string_void_signature,         "(Ljava/lang/ThreadGroup;Ljava/lang/String;)V")             \
+  template(string_class_signature,                    "(Ljava/lang/String;)Ljava/lang/Class;")                    \
+  template(object_object_object_signature,            "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
+  template(string_string_string_signature,            "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;") \
+  template(string_string_signature,                   "(Ljava/lang/String;)Ljava/lang/String;")                   \
+  template(classloader_string_long_signature,         "(Ljava/lang/ClassLoader;Ljava/lang/String;)J")             \
+  template(byte_array_void_signature,                 "([B)V")                                                    \
+  template(char_array_void_signature,                 "([C)V")                                                    \
+  template(int_int_void_signature,                    "(II)V")                                                    \
+  template(long_long_void_signature,                  "(JJ)V")                                                    \
+  template(void_classloader_signature,                "()Ljava/lang/ClassLoader;")                                \
+  template(void_object_signature,                     "()Ljava/lang/Object;")                                     \
+  template(void_class_signature,                      "()Ljava/lang/Class;")                                      \
+  template(object_array_object_object_signature,      "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
+  template(exception_void_signature,                  "(Ljava/lang/Exception;)V")                                 \
+  template(protectiondomain_signature,                "[Ljava/security/ProtectionDomain;")                        \
+  template(accesscontrolcontext_signature,            "Ljava/security/AccessControlContext;")                     \
+  template(class_protectiondomain_signature,          "(Ljava/lang/Class;Ljava/security/ProtectionDomain;)V")     \
+  template(thread_signature,                          "Ljava/lang/Thread;")                                       \
+  template(thread_array_signature,                    "[Ljava/lang/Thread;")                                      \
+  template(threadgroup_signature,                     "Ljava/lang/ThreadGroup;")                                  \
+  template(threadgroup_array_signature,               "[Ljava/lang/ThreadGroup;")                                 \
+  template(class_array_signature,                     "[Ljava/lang/Class;")                                       \
+  template(classloader_signature,                     "Ljava/lang/ClassLoader;")                                  \
+  template(object_signature,                          "Ljava/lang/Object;")                                       \
+  template(class_signature,                           "Ljava/lang/Class;")                                        \
+  template(string_signature,                          "Ljava/lang/String;")                                       \
+  template(reference_signature,                       "Ljava/lang/ref/Reference;")                                \
+  /* signature symbols needed by intrinsics */                                                                    \
+  VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE)            \
+                                                                                                                  \
+  /* symbol aliases needed by intrinsics */                                                                       \
+  VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, do_alias)           \
+                                                                                                                  \
+  /* returned by the C1 compiler in case there's not enough memory to allocate a new symbol*/                     \
+  template(dummy_symbol_oop,                          "illegal symbol")                                           \
+                                                                                                                  \
+  /* used by ClassFormatError when class name is not known yet */                                                 \
+  template(unknown_class_name,                        "<Unknown>")                                                \
+                                                                                                                  \
+  /* JVM monitoring and management support */                                                                     \
+  template(java_lang_StackTraceElement_array,          "[Ljava/lang/StackTraceElement;")                          \
+  template(java_lang_management_ThreadState,           "java/lang/management/ThreadState")                        \
+  template(java_lang_management_MemoryUsage,           "java/lang/management/MemoryUsage")                        \
+  template(java_lang_management_ThreadInfo,            "java/lang/management/ThreadInfo")                         \
+  template(sun_management_ManagementFactory,           "sun/management/ManagementFactory")                        \
+  template(sun_management_Sensor,                      "sun/management/Sensor")                                   \
+  template(sun_management_Agent,                       "sun/management/Agent")                                    \
+  template(createMemoryPoolMBean_name,                 "createMemoryPoolMBean")                                   \
+  template(createMemoryManagerMBean_name,              "createMemoryManagerMBean")                                \
+  template(createGarbageCollectorMBean_name,           "createGarbageCollectorMBean")                             \
+  template(createMemoryPoolMBean_signature,            "(Ljava/lang/String;ZJJ)Ljava/lang/management/MemoryPoolMBean;") \
+  template(createMemoryManagerMBean_signature,         "(Ljava/lang/String;)Ljava/lang/management/MemoryManagerMBean;") \
+  template(createGarbageCollectorMBean_signature,      "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/management/GarbageCollectorMBean;") \
+  template(trigger_name,                               "trigger")                                                 \
+  template(clear_name,                                 "clear")                                                   \
+  template(trigger_method_signature,                   "(ILjava/lang/management/MemoryUsage;)V")                                                 \
+  template(startAgent_name,                            "startAgent")                                              \
+  template(java_lang_management_ThreadInfo_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;)V") \
+  template(java_lang_management_ThreadInfo_with_locks_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;[Ljava/lang/Object;[I[Ljava/lang/Object;)V") \
+  template(long_long_long_long_void_signature,         "(JJJJ)V")                                                 \
+                                                                                                                  \
+  template(java_lang_management_MemoryPoolMXBean,      "java/lang/management/MemoryPoolMXBean")                   \
+  template(java_lang_management_MemoryManagerMXBean,   "java/lang/management/MemoryManagerMXBean")                \
+  template(java_lang_management_GarbageCollectorMXBean,"java/lang/management/GarbageCollectorMXBean")             \
+  template(createMemoryPool_name,                      "createMemoryPool")                                        \
+  template(createMemoryManager_name,                   "createMemoryManager")                                     \
+  template(createGarbageCollector_name,                "createGarbageCollector")                                  \
+  template(createMemoryPool_signature,                 "(Ljava/lang/String;ZJJ)Ljava/lang/management/MemoryPoolMXBean;") \
+  template(createMemoryManager_signature,              "(Ljava/lang/String;)Ljava/lang/management/MemoryManagerMXBean;") \
+  template(createGarbageCollector_signature,           "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/management/GarbageCollectorMXBean;") \
+  template(addThreadDumpForMonitors_name,              "addThreadDumpForMonitors")                                \
+  template(addThreadDumpForSynchronizers_name,         "addThreadDumpForSynchronizers")                           \
+  template(addThreadDumpForMonitors_signature,         "(Ljava/lang/management/ThreadInfo;[Ljava/lang/Object;[I)V") \
+  template(addThreadDumpForSynchronizers_signature,    "(Ljava/lang/management/ThreadInfo;[Ljava/lang/Object;)V")   \
+                                                                                                                  \
+  /* JVMTI/java.lang.instrument support and VM Attach mechanism */                                                \
+  template(sun_misc_VMSupport,                         "sun/misc/VMSupport")                                      \
+  template(appendToClassPathForInstrumentation_name,   "appendToClassPathForInstrumentation")                     \
+  do_alias(appendToClassPathForInstrumentation_signature, string_void_signature)                                  \
+  template(serializePropertiesToByteArray_name,        "serializePropertiesToByteArray")                          \
+  template(serializePropertiesToByteArray_signature,   "()[B")                                                    \
+  template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")                     \
+  template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
+  /*end*/
+ 
+                
+
+// Here are all the intrinsics known to the runtime and the CI.
+// Each intrinsic consists of a public enum name (like _hashCode),
+// followed by a specification of its klass, name, and signature:
+//    template(<id>,  <klass>,  <name>, <sig>, <FCODE>)
+//
+// If you add an intrinsic here, you must also define its name
+// and signature as members of the VM symbols.  The VM symbols for
+// the intrinsic name and signature may be defined above.
+//
+// Because the VM_SYMBOLS_DO macro makes reference to VM_INTRINSICS_DO,
+// you can also define an intrinsic's name and/or signature locally to the
+// intrinsic, if this makes sense.  (It often does make sense.)
+//
+// For example:
+//    do_intrinsic(_foo,  java_lang_Object,  foo_name, foo_signature, F_xx)
+//     do_name(     foo_name, "foo")
+//     do_signature(foo_signature, "()F")
+// klass      = vmSymbols::java_lang_Object()
+// name       = vmSymbols::foo_name()
+// signature  = vmSymbols::foo_signature()
+//
+// The name and/or signature might be a "well known" symbol
+// like "equal" or "()I", in which case there will be no local
+// re-definition of the symbol.
+//
+// The do_class, do_name, and do_signature calls are all used for the
+// same purpose:  Define yet another VM symbol.  They could all be merged
+// into a common 'do_symbol' call, but it seems useful to record our
+// intentions here about kinds of symbols (class vs. name vs. signature).
+//
+// The F_xx is one of the Flags enum; see below.
+//
+// for Emacs: (let ((c-backslash-column 120) (c-backslash-max-column 120)) (c-backslash-region (point) (point-max) nil t))
+#define VM_INTRINSICS_DO(do_intrinsic, do_class, do_name, do_signature, do_alias)                                       \
+  do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,      F_R)   \
+  /*    (symbol object_initializer_name defined above) */                                                               \
+                                                                                                                        \
+  do_intrinsic(_hashCode,                 java_lang_Object,       hashCode_name, void_int_signature,             F_R)   \
+   do_name(     hashCode_name,                                   "hashCode")                                            \
+  do_intrinsic(_getClass,                 java_lang_Object,       getClass_name, void_class_signature,           F_R)   \
+   do_name(     getClass_name,                                   "getClass")                                            \
+  do_intrinsic(_clone,                    java_lang_Object,       clone_name, void_object_signature,             F_R)   \
+   do_name(     clone_name,                                      "clone")                                               \
+                                                                                                                        \
+  /* Math & StrictMath intrinsics are defined in terms of just a few signatures: */                                     \
+  do_class(java_lang_Math,                "java/lang/Math")                                                             \
+  do_class(java_lang_StrictMath,          "java/lang/StrictMath")                                                       \
+  do_signature(double2_double_signature,  "(DD)D")                                                                      \
+  do_signature(int2_int_signature,        "(II)I")                                                                      \
+                                                                                                                        \
+  /* here are the math names, all together: */                                                                          \
+  do_name(abs_name,"abs")       do_name(sin_name,"sin")         do_name(cos_name,"cos")                                 \
+  do_name(tan_name,"tan")       do_name(atan2_name,"atan2")     do_name(sqrt_name,"sqrt")                               \
+  do_name(log_name,"log")       do_name(log10_name,"log10")     do_name(pow_name,"pow")                                 \
+  do_name(exp_name,"exp")       do_name(min_name,"min")         do_name(max_name,"max")                                 \
+                                                                                                                        \
+  do_intrinsic(_dabs,                     java_lang_Math,         abs_name,   double_double_signature,           F_S)   \
+  do_intrinsic(_dsin,                     java_lang_Math,         sin_name,   double_double_signature,           F_S)   \
+  do_intrinsic(_dcos,                     java_lang_Math,         cos_name,   double_double_signature,           F_S)   \
+  do_intrinsic(_dtan,                     java_lang_Math,         tan_name,   double_double_signature,           F_S)   \
+  do_intrinsic(_datan2,                   java_lang_Math,         atan2_name, double2_double_signature,          F_S)   \
+  do_intrinsic(_dsqrt,                    java_lang_Math,         sqrt_name,  double_double_signature,           F_S)   \
+  do_intrinsic(_dlog,                     java_lang_Math,         log_name,   double_double_signature,           F_S)   \
+  do_intrinsic(_dlog10,                   java_lang_Math,         log10_name, double_double_signature,           F_S)   \
+  do_intrinsic(_dpow,                     java_lang_Math,         pow_name,   double2_double_signature,          F_S)   \
+  do_intrinsic(_dexp,                     java_lang_Math,         exp_name,   double_double_signature,           F_S)   \
+  do_intrinsic(_min,                      java_lang_Math,         min_name,   int2_int_signature,                F_S)   \
+  do_intrinsic(_max,                      java_lang_Math,         max_name,   int2_int_signature,                F_S)   \
+                                                                                                                        \
+  do_intrinsic(_floatToRawIntBits,        java_lang_Float,        floatToRawIntBits_name,   float_int_signature, F_S)   \
+   do_name(     floatToRawIntBits_name,                          "floatToRawIntBits")                                   \
+  do_intrinsic(_floatToIntBits,           java_lang_Float,        floatToIntBits_name,      float_int_signature, F_S)   \
+   do_name(     floatToIntBits_name,                             "floatToIntBits")                                      \
+  do_intrinsic(_intBitsToFloat,           java_lang_Float,        intBitsToFloat_name,      int_float_signature, F_S)   \
+   do_name(     intBitsToFloat_name,                             "intBitsToFloat")                                      \
+  do_intrinsic(_doubleToRawLongBits,      java_lang_Double,       doubleToRawLongBits_name, double_long_signature, F_S) \
+   do_name(     doubleToRawLongBits_name,                        "doubleToRawLongBits")                                 \
+  do_intrinsic(_doubleToLongBits,         java_lang_Double,       doubleToLongBits_name,    double_long_signature, F_S) \
+   do_name(     doubleToLongBits_name,                           "doubleToLongBits")                                    \
+  do_intrinsic(_longBitsToDouble,         java_lang_Double,       longBitsToDouble_name,    long_double_signature, F_S) \
+   do_name(     longBitsToDouble_name,                           "longBitsToDouble")                                    \
+  do_intrinsic(_reverseBytes_i,           java_lang_Integer,      reverseBytes_name,        int_int_signature,   F_S)   \
+   do_name(     reverseBytes_name,                               "reverseBytes")                                        \
+  do_intrinsic(_reverseBytes_l,           java_lang_Long,         reverseBytes_name,        long_long_signature, F_S)   \
+    /*  (symbol reverseBytes_name defined above) */                                                                     \
+                                                                                                                        \
+  do_intrinsic(_identityHashCode,         java_lang_System,       identityHashCode_name, object_int_signature,   F_S)   \
+   do_name(     identityHashCode_name,                           "identityHashCode")                                    \
+  do_intrinsic(_currentTimeMillis,        java_lang_System,       currentTimeMillis_name, void_long_signature,   F_S)   \
+                                                                                                                        \
+   do_name(     currentTimeMillis_name,                          "currentTimeMillis")                                   \
+  do_intrinsic(_nanoTime,                 java_lang_System,       nanoTime_name,          void_long_signature,   F_S)   \
+   do_name(     nanoTime_name,                                   "nanoTime")                                            \
+                                                                                                                        \
+  do_intrinsic(_arraycopy,                java_lang_System,       arraycopy_name, arraycopy_signature,           F_S)   \
+   do_name(     arraycopy_name,                                  "arraycopy")                                           \
+   do_signature(arraycopy_signature,                             "(Ljava/lang/Object;ILjava/lang/Object;II)V")          \
+  do_intrinsic(_isInterrupted,            java_lang_Thread,       isInterrupted_name, isInterrupted_signature,   F_R)   \
+   do_name(     isInterrupted_name,                              "isInterrupted")                                       \
+   do_signature(isInterrupted_signature,                         "(Z)Z")                                                \
+  do_intrinsic(_currentThread,            java_lang_Thread,       currentThread_name, currentThread_signature,   F_S)   \
+   do_name(     currentThread_name,                              "currentThread")                                       \
+   do_signature(currentThread_signature,                         "()Ljava/lang/Thread;")                                \
+                                                                                                                        \
+  /* reflective intrinsics, for java/lang/Class, etc. */                                                                \
+  do_intrinsic(_isAssignableFrom,         java_lang_Class,        isAssignableFrom_name, class_boolean_signature, F_RN) \
+   do_name(     isAssignableFrom_name,                           "isAssignableFrom")                                    \
+  do_intrinsic(_isInstance,               java_lang_Class,        isInstance_name, object_boolean_signature,     F_RN)  \
+   do_name(     isInstance_name,                                 "isInstance")                                          \
+  do_intrinsic(_getModifiers,             java_lang_Class,        getModifiers_name, void_int_signature,         F_RN)  \
+   do_name(     getModifiers_name,                               "getModifiers")                                        \
+  do_intrinsic(_isInterface,              java_lang_Class,        isInterface_name, void_boolean_signature,      F_RN)  \
+   do_name(     isInterface_name,                                "isInterface")                                         \
+  do_intrinsic(_isArray,                  java_lang_Class,        isArray_name, void_boolean_signature,          F_RN)  \
+   do_name(     isArray_name,                                    "isArray")                                             \
+  do_intrinsic(_isPrimitive,              java_lang_Class,        isPrimitive_name, void_boolean_signature,      F_RN)  \
+   do_name(     isPrimitive_name,                                "isPrimitive")                                         \
+  do_intrinsic(_getSuperclass,            java_lang_Class,        getSuperclass_name, void_class_signature,      F_RN)  \
+   do_name(     getSuperclass_name,                              "getSuperclass")                                       \
+  do_intrinsic(_getComponentType,         java_lang_Class,        getComponentType_name, void_class_signature,   F_RN)  \
+   do_name(     getComponentType_name,                           "getComponentType")                                    \
+                                                                                                                        \
+  do_intrinsic(_getClassAccessFlags,      sun_reflect_Reflection, getClassAccessFlags_name, class_int_signature, F_SN)  \
+   do_name(     getClassAccessFlags_name,                        "getClassAccessFlags")                                 \
+  do_intrinsic(_getLength,                java_lang_reflect_Array, getLength_name, object_int_signature,         F_SN)  \
+   do_name(     getLength_name,                                   "getLength")                                          \
+                                                                                                                        \
+  do_intrinsic(_getCallerClass,           sun_reflect_Reflection, getCallerClass_name, getCallerClass_signature, F_SN)  \
+   do_name(     getCallerClass_name,                             "getCallerClass")                                      \
+   do_signature(getCallerClass_signature,                        "(I)Ljava/lang/Class;")                                \
+                                                                                                                        \
+  do_intrinsic(_newArray,                 java_lang_reflect_Array, newArray_name, newArray_signature,            F_SN)  \
+   do_name(     newArray_name,                                    "newArray")                                           \
+   do_signature(newArray_signature,                               "(Ljava/lang/Class;I)Ljava/lang/Object;")             \
+                                                                                                                        \
+  do_intrinsic(_copyOf,                   java_util_Arrays,       copyOf_name, copyOf_signature,                 F_S)   \
+   do_name(     copyOf_name,                                     "copyOf")                                              \
+   do_signature(copyOf_signature,             "([Ljava/lang/Object;ILjava/lang/Class;)[Ljava/lang/Object;")             \
+                                                                                                                        \
+  do_intrinsic(_copyOfRange,              java_util_Arrays,       copyOfRange_name, copyOfRange_signature,       F_S)   \
+   do_name(     copyOfRange_name,                                "copyOfRange")                                         \
+   do_signature(copyOfRange_signature,        "([Ljava/lang/Object;IILjava/lang/Class;)[Ljava/lang/Object;")            \
+                                                                                                                        \
+  do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
+  /*   (symbols invoke_name and invoke_signature defined above) */                                                      \
+                                                                                                                        \
+  do_intrinsic(_compareTo,                java_lang_String,       compareTo_name, string_int_signature,          F_R)   \
+   do_name(     compareTo_name,                                  "compareTo")                                           \
+  do_intrinsic(_indexOf,                  java_lang_String,       indexOf_name, string_int_signature,            F_R)   \
+   do_name(     indexOf_name,                                    "indexOf")                                             \
+                                                                                                                        \
+  do_class(java_nio_Buffer,               "java/nio/Buffer")                                                            \
+  do_intrinsic(_checkIndex,               java_nio_Buffer,        checkIndex_name, int_int_signature,            F_R)   \
+   do_name(     checkIndex_name,                                 "checkIndex")                                          \
+                                                                                                                        \
+  do_class(sun_misc_AtomicLongCSImpl,     "sun/misc/AtomicLongCSImpl")                                                  \
+  do_intrinsic(_get_AtomicLong,           sun_misc_AtomicLongCSImpl, get_name, void_long_signature,              F_R)   \
+  /*   (symbols get_name and void_long_signature defined above) */                                                      \
+                                                                                                                        \
+  do_intrinsic(_attemptUpdate,            sun_misc_AtomicLongCSImpl, attemptUpdate_name, attemptUpdate_signature, F_R)  \
+   do_name(     attemptUpdate_name,                                 "attemptUpdate")                                    \
+   do_signature(attemptUpdate_signature,                            "(JJ)Z")                                            \
+                                                                                                                        \
+  /* support for sun.misc.Unsafe */                                                                                     \
+  do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
+                                                                                                                        \
+  do_intrinsic(_allocateInstance,         sun_misc_Unsafe,        allocateInstance_name, allocateInstance_signature, F_RN) \
+   do_name(     allocateInstance_name,                           "allocateInstance")                                    \
+   do_signature(allocateInstance_signature,   "(Ljava/lang/Class;)Ljava/lang/Object;")                                  \
+  do_intrinsic(_copyMemory,               sun_misc_Unsafe,        copyMemory_name, copyMemory_signature,         F_RN)  \
+   do_name(     copyMemory_name,                                 "copyMemory")                                          \
+   do_signature(copyMemory_signature,         "(Ljava/lang/Object;JLjava/lang/Object;JJ)V")                             \
+  do_intrinsic(_park,                     sun_misc_Unsafe,        park_name, park_signature,                     F_RN)  \
+   do_name(     park_name,                                       "park")                                                \
+   do_signature(park_signature,                                  "(ZJ)V")                                               \
+  do_intrinsic(_unpark,                   sun_misc_Unsafe,        unpark_name, unpark_signature,                 F_RN)  \
+   do_name(     unpark_name,                                     "unpark")                                              \
+   do_alias(    unpark_signature,                               /*(LObject;)V*/ object_void_signature)                  \
+                                                                                                                        \
+  /* unsafe memory references (there are a lot of them...) */                                                           \
+  do_signature(getObject_signature,       "(Ljava/lang/Object;J)Ljava/lang/Object;")                                    \
+  do_signature(putObject_signature,       "(Ljava/lang/Object;JLjava/lang/Object;)V")                                   \
+  do_signature(getBoolean_signature,      "(Ljava/lang/Object;J)Z")                                                     \
+  do_signature(putBoolean_signature,      "(Ljava/lang/Object;JZ)V")                                                    \
+  do_signature(getByte_signature,         "(Ljava/lang/Object;J)B")                                                     \
+  do_signature(putByte_signature,         "(Ljava/lang/Object;JB)V")                                                    \
+  do_signature(getShort_signature,        "(Ljava/lang/Object;J)S")                                                     \
+  do_signature(putShort_signature,        "(Ljava/lang/Object;JS)V")                                                    \
+  do_signature(getChar_signature,         "(Ljava/lang/Object;J)C")                                                     \
+  do_signature(putChar_signature,         "(Ljava/lang/Object;JC)V")                                                    \
+  do_signature(getInt_signature,          "(Ljava/lang/Object;J)I")                                                     \
+  do_signature(putInt_signature,          "(Ljava/lang/Object;JI)V")                                                    \
+  do_signature(getLong_signature,         "(Ljava/lang/Object;J)J")                                                     \
+  do_signature(putLong_signature,         "(Ljava/lang/Object;JJ)V")                                                    \
+  do_signature(getFloat_signature,        "(Ljava/lang/Object;J)F")                                                     \
+  do_signature(putFloat_signature,        "(Ljava/lang/Object;JF)V")                                                    \
+  do_signature(getDouble_signature,       "(Ljava/lang/Object;J)D")                                                     \
+  do_signature(putDouble_signature,       "(Ljava/lang/Object;JD)V")                                                    \
+                                                                                                                        \
+  do_name(getObject_name,"getObject")           do_name(putObject_name,"putObject")                                     \
+  do_name(getBoolean_name,"getBoolean")         do_name(putBoolean_name,"putBoolean")                                   \
+  do_name(getByte_name,"getByte")               do_name(putByte_name,"putByte")                                         \
+  do_name(getShort_name,"getShort")             do_name(putShort_name,"putShort")                                       \
+  do_name(getChar_name,"getChar")               do_name(putChar_name,"putChar")                                         \
+  do_name(getInt_name,"getInt")                 do_name(putInt_name,"putInt")                                           \
+  do_name(getLong_name,"getLong")               do_name(putLong_name,"putLong")                                         \
+  do_name(getFloat_name,"getFloat")             do_name(putFloat_name,"putFloat")                                       \
+  do_name(getDouble_name,"getDouble")           do_name(putDouble_name,"putDouble")                                     \
+                                                                                                                        \
+  do_intrinsic(_getObject,                sun_misc_Unsafe,        getObject_name, getObject_signature,           F_RN)  \
+  do_intrinsic(_getBoolean,               sun_misc_Unsafe,        getBoolean_name, getBoolean_signature,         F_RN)  \
+  do_intrinsic(_getByte,                  sun_misc_Unsafe,        getByte_name, getByte_signature,               F_RN)  \
+  do_intrinsic(_getShort,                 sun_misc_Unsafe,        getShort_name, getShort_signature,             F_RN)  \
+  do_intrinsic(_getChar,                  sun_misc_Unsafe,        getChar_name, getChar_signature,               F_RN)  \
+  do_intrinsic(_getInt,                   sun_misc_Unsafe,        getInt_name, getInt_signature,                 F_RN)  \
+  do_intrinsic(_getLong,                  sun_misc_Unsafe,        getLong_name, getLong_signature,               F_RN)  \
+  do_intrinsic(_getFloat,                 sun_misc_Unsafe,        getFloat_name, getFloat_signature,             F_RN)  \
+  do_intrinsic(_getDouble,                sun_misc_Unsafe,        getDouble_name, getDouble_signature,           F_RN)  \
+  do_intrinsic(_putObject,                sun_misc_Unsafe,        putObject_name, putObject_signature,           F_RN)  \
+  do_intrinsic(_putBoolean,               sun_misc_Unsafe,        putBoolean_name, putBoolean_signature,         F_RN)  \
+  do_intrinsic(_putByte,                  sun_misc_Unsafe,        putByte_name, putByte_signature,               F_RN)  \
+  do_intrinsic(_putShort,                 sun_misc_Unsafe,        putShort_name, putShort_signature,             F_RN)  \
+  do_intrinsic(_putChar,                  sun_misc_Unsafe,        putChar_name, putChar_signature,               F_RN)  \
+  do_intrinsic(_putInt,                   sun_misc_Unsafe,        putInt_name, putInt_signature,                 F_RN)  \
+  do_intrinsic(_putLong,                  sun_misc_Unsafe,        putLong_name, putLong_signature,               F_RN)  \
+  do_intrinsic(_putFloat,                 sun_misc_Unsafe,        putFloat_name, putFloat_signature,             F_RN)  \
+  do_intrinsic(_putDouble,                sun_misc_Unsafe,        putDouble_name, putDouble_signature,           F_RN)  \
+                                                                                                                        \
+  do_name(getObjectVolatile_name,"getObjectVolatile")   do_name(putObjectVolatile_name,"putObjectVolatile")             \
+  do_name(getBooleanVolatile_name,"getBooleanVolatile") do_name(putBooleanVolatile_name,"putBooleanVolatile")           \
+  do_name(getByteVolatile_name,"getByteVolatile")       do_name(putByteVolatile_name,"putByteVolatile")                 \
+  do_name(getShortVolatile_name,"getShortVolatile")     do_name(putShortVolatile_name,"putShortVolatile")               \
+  do_name(getCharVolatile_name,"getCharVolatile")       do_name(putCharVolatile_name,"putCharVolatile")                 \
+  do_name(getIntVolatile_name,"getIntVolatile")         do_name(putIntVolatile_name,"putIntVolatile")                   \
+  do_name(getLongVolatile_name,"getLongVolatile")       do_name(putLongVolatile_name,"putLongVolatile")                 \
+  do_name(getFloatVolatile_name,"getFloatVolatile")     do_name(putFloatVolatile_name,"putFloatVolatile")               \
+  do_name(getDoubleVolatile_name,"getDoubleVolatile")   do_name(putDoubleVolatile_name,"putDoubleVolatile")             \
+                                                                                                                        \
+  do_intrinsic(_getObjectVolatile,        sun_misc_Unsafe,        getObjectVolatile_name, getObject_signature,   F_RN)  \
+  do_intrinsic(_getBooleanVolatile,       sun_misc_Unsafe,        getBooleanVolatile_name, getBoolean_signature, F_RN)  \
+  do_intrinsic(_getByteVolatile,          sun_misc_Unsafe,        getByteVolatile_name, getByte_signature,       F_RN)  \
+  do_intrinsic(_getShortVolatile,         sun_misc_Unsafe,        getShortVolatile_name, getShort_signature,     F_RN)  \
+  do_intrinsic(_getCharVolatile,          sun_misc_Unsafe,        getCharVolatile_name, getChar_signature,       F_RN)  \
+  do_intrinsic(_getIntVolatile,           sun_misc_Unsafe,        getIntVolatile_name, getInt_signature,         F_RN)  \
+  do_intrinsic(_getLongVolatile,          sun_misc_Unsafe,        getLongVolatile_name, getLong_signature,       F_RN)  \
+  do_intrinsic(_getFloatVolatile,         sun_misc_Unsafe,        getFloatVolatile_name, getFloat_signature,     F_RN)  \
+  do_intrinsic(_getDoubleVolatile,        sun_misc_Unsafe,        getDoubleVolatile_name, getDouble_signature,   F_RN)  \
+  do_intrinsic(_putObjectVolatile,        sun_misc_Unsafe,        putObjectVolatile_name, putObject_signature,   F_RN)  \
+  do_intrinsic(_putBooleanVolatile,       sun_misc_Unsafe,        putBooleanVolatile_name, putBoolean_signature, F_RN)  \
+  do_intrinsic(_putByteVolatile,          sun_misc_Unsafe,        putByteVolatile_name, putByte_signature,       F_RN)  \
+  do_intrinsic(_putShortVolatile,         sun_misc_Unsafe,        putShortVolatile_name, putShort_signature,     F_RN)  \
+  do_intrinsic(_putCharVolatile,          sun_misc_Unsafe,        putCharVolatile_name, putChar_signature,       F_RN)  \
+  do_intrinsic(_putIntVolatile,           sun_misc_Unsafe,        putIntVolatile_name, putInt_signature,         F_RN)  \
+  do_intrinsic(_putLongVolatile,          sun_misc_Unsafe,        putLongVolatile_name, putLong_signature,       F_RN)  \
+  do_intrinsic(_putFloatVolatile,         sun_misc_Unsafe,        putFloatVolatile_name, putFloat_signature,     F_RN)  \
+  do_intrinsic(_putDoubleVolatile,        sun_misc_Unsafe,        putDoubleVolatile_name, putDouble_signature,   F_RN)  \
+                                                                                                                        \
+  /* %%% these are redundant except perhaps for getAddress, but Unsafe has native methods for them */                   \
+  do_signature(getByte_raw_signature,     "(J)B")                                                                       \
+  do_signature(putByte_raw_signature,     "(JB)V")                                                                      \
+  do_signature(getShort_raw_signature,    "(J)S")                                                                       \
+  do_signature(putShort_raw_signature,    "(JS)V")                                                                      \
+  do_signature(getChar_raw_signature,     "(J)C")                                                                       \
+  do_signature(putChar_raw_signature,     "(JC)V")                                                                      \
+  do_signature(getInt_raw_signature,      "(J)I")                                                                       \
+  do_signature(putInt_raw_signature,      "(JI)V")                                                                      \
+      do_alias(getLong_raw_signature,    /*(J)J*/ long_long_signature)                                                  \
+      do_alias(putLong_raw_signature,    /*(JJ)V*/ long_long_void_signature)                                            \
+  do_signature(getFloat_raw_signature,    "(J)F")                                                                       \
+  do_signature(putFloat_raw_signature,    "(JF)V")                                                                      \
+      do_alias(getDouble_raw_signature,  /*(J)D*/ long_double_signature)                                                \
+  do_signature(putDouble_raw_signature,   "(JD)V")                                                                      \
+      do_alias(getAddress_raw_signature, /*(J)J*/ long_long_signature)                                                  \
+      do_alias(putAddress_raw_signature, /*(JJ)V*/ long_long_void_signature)                                            \
+                                                                                                                        \
+   do_name(    getAddress_name,           "getAddress")                                                                 \
+   do_name(    putAddress_name,           "putAddress")                                                                 \
+                                                                                                                        \
+  do_intrinsic(_getByte_raw,              sun_misc_Unsafe,        getByte_name, getByte_raw_signature,           F_RN)  \
+  do_intrinsic(_getShort_raw,             sun_misc_Unsafe,        getShort_name, getShort_raw_signature,         F_RN)  \
+  do_intrinsic(_getChar_raw,              sun_misc_Unsafe,        getChar_name, getChar_raw_signature,           F_RN)  \
+  do_intrinsic(_getInt_raw,               sun_misc_Unsafe,        getInt_name, getInt_raw_signature,             F_RN)  \
+  do_intrinsic(_getLong_raw,              sun_misc_Unsafe,        getLong_name, getLong_raw_signature,           F_RN)  \
+  do_intrinsic(_getFloat_raw,             sun_misc_Unsafe,        getFloat_name, getFloat_raw_signature,         F_RN)  \
+  do_intrinsic(_getDouble_raw,            sun_misc_Unsafe,        getDouble_name, getDouble_raw_signature,       F_RN)  \
+  do_intrinsic(_getAddress_raw,           sun_misc_Unsafe,        getAddress_name, getAddress_raw_signature,     F_RN)  \
+  do_intrinsic(_putByte_raw,              sun_misc_Unsafe,        putByte_name, putByte_raw_signature,           F_RN)  \
+  do_intrinsic(_putShort_raw,             sun_misc_Unsafe,        putShort_name, putShort_raw_signature,         F_RN)  \
+  do_intrinsic(_putChar_raw,              sun_misc_Unsafe,        putChar_name, putChar_raw_signature,           F_RN)  \
+  do_intrinsic(_putInt_raw,               sun_misc_Unsafe,        putInt_name, putInt_raw_signature,             F_RN)  \
+  do_intrinsic(_putLong_raw,              sun_misc_Unsafe,        putLong_name, putLong_raw_signature,           F_RN)  \
+  do_intrinsic(_putFloat_raw,             sun_misc_Unsafe,        putFloat_name, putFloat_raw_signature,         F_RN)  \
+  do_intrinsic(_putDouble_raw,            sun_misc_Unsafe,        putDouble_name, putDouble_raw_signature,       F_RN)  \
+  do_intrinsic(_putAddress_raw,           sun_misc_Unsafe,        putAddress_name, putAddress_raw_signature,     F_RN)  \
+                                                                                                                        \
+  do_intrinsic(_compareAndSwapObject,     sun_misc_Unsafe,        compareAndSwapObject_name, compareAndSwapObject_signature, F_RN) \
+   do_name(     compareAndSwapObject_name,                       "compareAndSwapObject")                                \
+   do_signature(compareAndSwapObject_signature,  "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z")          \
+  do_intrinsic(_compareAndSwapLong,       sun_misc_Unsafe,        compareAndSwapLong_name, compareAndSwapLong_signature, F_RN) \
+   do_name(     compareAndSwapLong_name,                         "compareAndSwapLong")                                  \
+   do_signature(compareAndSwapLong_signature,                    "(Ljava/lang/Object;JJJ)Z")                            \
+  do_intrinsic(_compareAndSwapInt,        sun_misc_Unsafe,        compareAndSwapInt_name, compareAndSwapInt_signature, F_RN) \
+   do_name(     compareAndSwapInt_name,                          "compareAndSwapInt")                                   \
+   do_signature(compareAndSwapInt_signature,                     "(Ljava/lang/Object;JII)Z")                            \
+  do_intrinsic(_putOrderedObject,         sun_misc_Unsafe,        putOrderedObject_name, putOrderedObject_signature, F_RN) \
+   do_name(     putOrderedObject_name,                           "putOrderedObject")                                    \
+   do_alias(    putOrderedObject_signature,                     /*(LObject;JLObject;)V*/ putObject_signature)           \
+  do_intrinsic(_putOrderedLong,           sun_misc_Unsafe,        putOrderedLong_name, putOrderedLong_signature, F_RN)  \
+   do_name(     putOrderedLong_name,                             "putOrderedLong")                                      \
+   do_alias(    putOrderedLong_signature,                       /*(Ljava/lang/Object;JJ)V*/ putLong_signature)          \
+  do_intrinsic(_putOrderedInt,            sun_misc_Unsafe,        putOrderedInt_name, putOrderedInt_signature,   F_RN)  \
+   do_name(     putOrderedInt_name,                              "putOrderedInt")                                       \
+   do_alias(    putOrderedInt_signature,                        /*(Ljava/lang/Object;JI)V*/ putInt_signature)           \
+                                                                                                                        \
+  /* prefetch_signature is shared by all prefetch variants */                                                           \
+  do_signature( prefetch_signature,        "(Ljava/lang/Object;J)V")                                                    \
+                                                                                                                        \
+  do_intrinsic(_prefetchRead,             sun_misc_Unsafe,        prefetchRead_name, prefetch_signature,         F_RN)  \
+   do_name(     prefetchRead_name,                               "prefetchRead")                                        \
+  do_intrinsic(_prefetchWrite,            sun_misc_Unsafe,        prefetchWrite_name, prefetch_signature,        F_RN)  \
+   do_name(     prefetchWrite_name,                              "prefetchWrite")                                       \
+  do_intrinsic(_prefetchReadStatic,       sun_misc_Unsafe,        prefetchReadStatic_name, prefetch_signature,   F_SN)  \
+   do_name(     prefetchReadStatic_name,                         "prefetchReadStatic")                                  \
+  do_intrinsic(_prefetchWriteStatic,      sun_misc_Unsafe,        prefetchWriteStatic_name, prefetch_signature,  F_SN)  \
+   do_name(     prefetchWriteStatic_name,                        "prefetchWriteStatic")                                 \
+    /*end*/
+
+
+
+// Class vmSymbols
+
+class vmSymbols: AllStatic {
+ friend class vmSymbolHandles;
+ friend class vmIntrinsics;
+ public:
+  // enum for figuring positions and size of array holding symbolOops
+  enum SID {
+    NO_SID = 0,
+
+    #define VM_SYMBOL_ENUM(name, string) VM_SYMBOL_ENUM_NAME(name),
+    VM_SYMBOLS_DO(VM_SYMBOL_ENUM, VM_ALIAS_IGNORE)
+    #undef VM_SYMBOL_ENUM
+
+    SID_LIMIT,
+
+    #define VM_ALIAS_ENUM(name, def) VM_SYMBOL_ENUM_NAME(name) = VM_SYMBOL_ENUM_NAME(def),
+    VM_SYMBOLS_DO(VM_SYMBOL_IGNORE, VM_ALIAS_ENUM)
+    #undef VM_ALIAS_ENUM
+
+    FIRST_SID = NO_SID + 1
+  };
+  enum {
+    log2_SID_LIMIT = 10         // checked by an assert at start-up
+  };
+
+ private:
+  // The symbol array
+  static symbolOop _symbols[];
+
+  // Field signatures indexed by BasicType.
+  static symbolOop _type_signatures[T_VOID+1];
+
+ public:
+  // Initialization
+  static void initialize(TRAPS);
+  // Accessing
+  #define VM_SYMBOL_DECLARE(name, ignore) \
+    static symbolOop name() { return _symbols[VM_SYMBOL_ENUM_NAME(name)]; }
+  VM_SYMBOLS_DO(VM_SYMBOL_DECLARE, VM_SYMBOL_DECLARE)
+  #undef VM_SYMBOL_DECLARE
+
+  // GC support
+  static void oops_do(OopClosure* f, bool do_all = false);
+
+  static symbolOop type_signature(BasicType t) {
+    assert((uint)t < T_VOID+1, "range check");
+    assert(_type_signatures[t] != NULL, "domain check");
+    return _type_signatures[t];
+  }
+  // inverse of type_signature; returns T_OBJECT if s is not recognized
+  static BasicType signature_type(symbolOop s);
+
+  static symbolOop symbol_at(SID id) {
+    assert(id >= FIRST_SID && id < SID_LIMIT, "oob");
+    assert(_symbols[id] != NULL, "init");
+    return _symbols[id];
+  }
+
+  // Returns symbol's SID if one is assigned, else NO_SID.
+  static SID find_sid(symbolOop symbol);
+
+#ifndef PRODUCT
+  // No need for this in the product:
+  static const char* name_for(SID sid);
+#endif //PRODUCT
+};
+
+
+// Class vmSymbolHandles
+
+class vmSymbolHandles: AllStatic {
+  friend class vmIntrinsics;
+  friend class ciObjectFactory;
+
+ public:
+  // Accessing
+  #define VM_SYMBOL_HANDLE_DECLARE(name, ignore) \
+    static symbolHandle name() { return symbol_handle_at(vmSymbols::VM_SYMBOL_ENUM_NAME(name)); }
+  VM_SYMBOLS_DO(VM_SYMBOL_HANDLE_DECLARE, VM_SYMBOL_HANDLE_DECLARE)
+  #undef VM_SYMBOL_HANDLE_DECLARE
+
+  static symbolHandle symbol_handle_at(vmSymbols::SID id) {
+    return symbolHandle(&vmSymbols::_symbols[(int)id], false);
+  }
+
+  static symbolHandle type_signature(BasicType t) {
+    assert(vmSymbols::type_signature(t) != NULL, "domain check");
+    return symbolHandle(&vmSymbols::_type_signatures[t], false);
+  }
+  // inverse of type_signature; returns T_OBJECT if s is not recognized
+  static BasicType signature_type(symbolHandle s) {
+    return vmSymbols::signature_type(s());
+  }
+};
+
+// VM Intrinsic ID's uniquely identify some very special methods
+class vmIntrinsics: AllStatic {
+  friend class vmSymbols;
+  friend class ciObjectFactory;
+
+ public:
+  // Accessing
+  enum ID {
+    _none = 0,                      // not an intrinsic (default answer)
+
+    #define VM_INTRINSIC_ENUM(id, klass, name, sig, flags)  id,
+    VM_INTRINSICS_DO(VM_INTRINSIC_ENUM,
+                     VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE)
+    #undef VM_INTRINSIC_ENUM
+
+    ID_LIMIT,
+    FIRST_ID = _none + 1
+  };
+
+  enum Flags {
+    // AccessFlags syndromes relevant to intrinsics.
+    F_none = 0,
+    F_R,                        // !static        !synchronized (R="regular")
+    F_S,                        //  static        !synchronized
+    F_RN,                       // !static native !synchronized
+    F_SN                        //  static native !synchronized
+  };
+
+public:
+  static ID ID_from(int raw_id) {
+    assert(raw_id >= (int)_none && raw_id < (int)ID_LIMIT,
+           "must be a valid intrinsic ID");
+    return (ID)raw_id;
+  }
+
+  static const char* name_at(ID id);
+
+  // Given a method's class, name, signature, and access flags, report its ID.
+  static ID find_id(vmSymbols::SID holder,
+                    vmSymbols::SID name,
+                    vmSymbols::SID sig,
+                    jshort flags);
+
+  static void verify_method(ID actual_id, methodOop m) PRODUCT_RETURN;
+
+  // No need for these in the product:
+  static vmSymbols::SID     class_for(ID id);
+  static vmSymbols::SID      name_for(ID id);
+  static vmSymbols::SID signature_for(ID id);
+  static Flags              flags_for(ID id);
+
+  static const char* short_name_as_C_string(ID id, char* buf, int size);
+};
--- a/hotspot/src/share/vm/code/nmethod.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)nmethod.cpp	1.364 07/05/05 17:05:19 JVM"
+#pragma ident "@(#)nmethod.cpp	1.365 07/05/17 15:50:44 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1223,107 +1223,10 @@
   return true;
 }
 
-#ifdef JVMPI_SUPPORT
-// JVMPI - support
-class OffsetBciPair: public ResourceObj {
- private:
-  jint _offset;
-  jint _bci;
- public:
-  OffsetBciPair(): _offset(-1), _bci(-1) {}
-  void set(jint offset, jint bci) { _offset = offset; _bci = bci; }
-  jint bci() const  { return _bci; }
-  jint offset() const { return _offset; }
-};
-
-
-// ------------------------------------------------------------------
-// Helper function for post_compiled_method_load_event
-void nmethod::build_jvmpi_line_number_mapping(compiled_method_t *cm) {
-  // Generate line numbers using PcDesc and ScopeDesc info
-  methodHandle mh(method());
-  if( mh->is_native() || !mh->has_linenumber_table() ) {  // No bci info for safepoints in "native methods"
-    cm->lineno_table_len = 0;
-    cm->lineno_table     = NULL;
-  } else {
-    enum { unused = -1 };
-    int size = mh->code_size();   // byte code length
-
-    // Build mapping from BCIs to source lines
-    GrowableArray<jint> *bci_to_line  = new GrowableArray<jint>(size, size, unused);
-    int index = 0;
-    CompressedLineNumberReadStream stream(mh->compressed_linenumber_table());
-    while( stream.read_pair() ) {
-      bci_to_line->at_put_grow(stream.bci(), stream.line(), unused);
-    }
-    // Define a line for remaining BCIs given the assumption that 
-    // the line mapping provided for a BCI is valid for all following 
-    // BCIs until another line mapping is provided.
-    int previous_line = unused;
-    for( index = 0; index < size; ++index ) {
-      int current_line = bci_to_line->at(index);
-      if( current_line == unused ) {
-        bci_to_line->at_put_grow(index, previous_line);
-      } else {
-        previous_line = current_line;
-      }
-    }
-
-    // Collect information for pairs <offset, bci> from PcDesc and ScopeDesc.
-    // Count PcDescs
-    PcDesc  *pcd = NULL;
-    int pcds_in_method = 0;
-    for( pcd = scopes_pcs_begin(); pcd < scopes_pcs_end(); ++pcd ) {
-      ++pcds_in_method;
-    }
-    // Fill array with OffsetBciPairs
-    int offset_to_bci_length = 0;
-    GrowableArray<OffsetBciPair*>* offset_to_bci = new GrowableArray<OffsetBciPair*>(pcds_in_method, pcds_in_method, NULL);
-    address scopes_data = scopes_data_begin();
-    for( pcd = scopes_pcs_begin(); pcd < scopes_pcs_end(); ++pcd ) {
-      ScopeDesc sc0(this, pcd->scope_decode_offset());
-      ScopeDesc *sd  = &sc0;
-      while( !sd->is_top() ) { sd = sd->sender(); }
-
-      int bci = sd->bci() ;
-      if( bci != InvocationEntryBci ) {
-        int offset = pcd->pc_offset();
-        OffsetBciPair* offset_bci_pair = new OffsetBciPair();
-        offset_bci_pair->set( offset, bci );
-        offset_to_bci->at_put_grow(offset_to_bci_length, offset_bci_pair);
-        ++offset_to_bci_length;
-      }
-    }
-
-    // Combine the mappings, <bci, line> and <offset, bci> into <line, offset>
-    JVMPI_Lineno* line_to_pcOffset = NEW_RESOURCE_ARRAY(JVMPI_Lineno, offset_to_bci_length);
-    index = 0;
-    for (int i = 0; i < offset_to_bci_length; i++) {
-      int offset = offset_to_bci->at(i)->offset();
-      int bci    = offset_to_bci->at(i)->bci();
-      int line   = bci_to_line->at(bci);
-      if( line != unused ) {
-        line_to_pcOffset[index].offset = offset;
-        line_to_pcOffset[index].lineno = line;
-        ++index;
-      }
-    }
-
-    cm->lineno_table_len = offset_to_bci_length;
-    cm->lineno_table     = line_to_pcOffset;
-  }
-  assert( true, "debug breakpoint");
-}
-#endif // JVMPI_SUPPORT
-
 // ------------------------------------------------------------------
 // post_compiled_method_load_event
 // new method for install_code() path
-#ifdef JVMPI_SUPPORT
-// Transfer information from compilation to jvmpi/jvmti
-#else // !JVMPI_SUPPORT
 // Transfer information from compilation to jvmti
-#endif // JVMPI_SUPPORT
 void nmethod::post_compiled_method_load_event() {
 
   methodOop moop = method();
@@ -1335,18 +1238,6 @@
       moop->signature()->bytes(), 
       moop->signature()->utf8_length(),
       code_begin(), code_size());
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_COMPILED_METHOD_LOAD)) {
-    ResourceMark rm;
-    compiled_method_t compiled_method;
-    compiled_method.method    = method();
-    compiled_method.code_addr = code_begin();
-    compiled_method.code_size = code_size();
-    build_jvmpi_line_number_mapping(&compiled_method);
-
-    jvmpi::post_compiled_method_load_event(&compiled_method);
-  }
-#endif // JVMPI_SUPPORT
 
   if (JvmtiExport::should_post_compiled_method_load()) {
     JvmtiExport::post_compiled_method_load(this);
@@ -1357,12 +1248,6 @@
   assert(_method != NULL && !is_unloaded(), "just checking");
   DTRACE_METHOD_UNLOAD_PROBE(method());
     
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_COMPILED_METHOD_UNLOAD)) {
-    jvmpi::post_compiled_method_unload_event(_method);
-  }
-#endif // JVMPI_SUPPORT
-
   // If a JVMTI agent has enabled the CompiledMethodUnload event then 
   // post the event. Sometime later this nmethod will be made a zombie by
   // the sweeper but the methodOop will not be valid at that point.
--- a/hotspot/src/share/vm/code/nmethod.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)nmethod.hpp	1.169 07/05/05 17:05:21 JVM"
+#pragma ident "@(#)nmethod.hpp	1.170 07/05/17 15:50:48 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -479,15 +479,8 @@
   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi/jvmti support:
-#else // !JVMPI_SUPPORT
   // jvmti support:
-#endif // JVMPI_SUPPORT
   void post_compiled_method_load_event();
-#ifdef JVMPI_SUPPORT
-  void build_jvmpi_line_number_mapping(compiled_method_t *cm);
-#endif // JVMPI_SUPPORT
 
   // verify operations
   void verify();
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)compileBroker.cpp	1.146 07/05/05 17:05:23 JVM"
+#pragma ident "@(#)compileBroker.cpp	1.147 07/05/17 15:50:51 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -183,11 +183,7 @@
   assert(!_lock->is_locked(), "bad locking");
 
   _compile_id = compile_id;
-#ifdef JVMPI_SUPPORT
-  _method = JNIHandles::make_global(method, false);
-#else // !JVMPI_SUPPORT
   _method = JNIHandles::make_global(method);
-#endif // JVMPI_SUPPORT
   _osr_bci = osr_bci;
   _is_blocking = is_blocking;
   _comp_level = comp_level;
@@ -208,11 +204,7 @@
       if (hot_method == method) {
         _hot_method = _method;
       } else {
-#ifdef JVMPI_SUPPORT
-        _hot_method = JNIHandles::make_global(hot_method, false);
-#else // !JVMPI_SUPPORT
         _hot_method = JNIHandles::make_global(hot_method);
-#endif // JVMPI_SUPPORT
       }
     }
   }
@@ -239,15 +231,9 @@
   set_code(NULL);
   assert(!_lock->is_locked(), "Should not be locked when freed");
   if (_hot_method != NULL && _hot_method != _method) {
-#ifdef JVMPI_SUPPORT
-    JNIHandles::destroy_global(_hot_method, false);
-  }
-  JNIHandles::destroy_global(_method, false);
-#else // !JVMPI_SUPPORT
     JNIHandles::destroy_global(_hot_method);
   }
   JNIHandles::destroy_global(_method);
-#endif // JVMPI_SUPPORT
 }
 
 
@@ -1048,17 +1034,9 @@
     return NULL;
   }
 
-#ifdef JVMPI_SUPPORT
-  // JVMTI and JVMPI -- post_compile_event requires jmethod_id() that may require
-#else // !JVMPI_SUPPORT
   // JVMTI -- post_compile_event requires jmethod_id() that may require
-#endif // JVMPI_SUPPORT
   // a lock the compiling thread can not acquire. Prefetch it here.
-#ifdef JVMPI_SUPPORT
-  if (JvmtiExport::should_post_compiled_method_load() || jvmpi::enabled()) { 
-#else // !JVMPI_SUPPORT
   if (JvmtiExport::should_post_compiled_method_load()) { 
-#endif // JVMPI_SUPPORT
     method->jmethod_id(); 
   }
 
--- a/hotspot/src/share/vm/compiler/compilerOracle.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/compiler/compilerOracle.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)compilerOracle.cpp	1.33 07/05/05 17:05:24 JVM"
+#pragma ident "@(#)compilerOracle.cpp	1.34 07/05/17 15:50:53 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -110,13 +110,8 @@
 };
 
 MethodMatcher::MethodMatcher(symbolHandle class_name, symbolHandle method_name, MethodMatcher* next) {
-#ifdef JVMPI_SUPPORT
-  _class_name  = JNIHandles::make_global(class_name,  false);
-  _method_name = JNIHandles::make_global(method_name, false);
-#else // !JVMPI_SUPPORT
   _class_name  = JNIHandles::make_global(class_name);
   _method_name = JNIHandles::make_global(method_name);
-#endif // JVMPI_SUPPORT
   _next        = next;
   _class_mode  = MethodMatcher::Exact;
   _method_mode = MethodMatcher::Exact;
@@ -130,15 +125,9 @@
     _class_mode(class_mode)
   , _method_mode(method_mode)
   , _next(next)
-#ifdef JVMPI_SUPPORT
-  , _class_name(JNIHandles::make_global(class_name(),  false))
-  , _method_name(JNIHandles::make_global(method_name(), false))
-  , _signature(JNIHandles::make_global(signature(), false)) {
-#else // !JVMPI_SUPPORT
   , _class_name(JNIHandles::make_global(class_name()))
   , _method_name(JNIHandles::make_global(method_name()))
   , _signature(JNIHandles::make_global(signature())) {
-#endif // JVMPI_SUPPORT
 }
 
 bool MethodMatcher::match(symbolHandle candidate, symbolHandle match, Mode match_mode) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/asParNewGeneration.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,633 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)asParNewGeneration.cpp	1.11 07/05/05 17:05:25 JVM"
-#endif
-/*
- * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_asParNewGeneration.cpp.incl"
-
-ASParNewGeneration::ASParNewGeneration(ReservedSpace rs, 
-				       size_t initial_byte_size, 
-				       size_t min_byte_size,
-				       int level) :
-  ParNewGeneration(rs, initial_byte_size, level), 
-  _min_gen_size(min_byte_size) {}
-
-const char* ASParNewGeneration::name() const {
-  return "adaptive size par new generation";
-}
-
-void ASParNewGeneration::adjust_desired_tenuring_threshold() {
-  assert(UseAdaptiveSizePolicy, 
-    "Should only be used with UseAdaptiveSizePolicy");
-}
-
-void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) {
-  // Resize the generation if needed. If the generation resize
-  // reports false, do not attempt to resize the spaces.
-  if (resize_generation(eden_size, survivor_size)) {
-    // Then we lay out the spaces inside the generation
-    resize_spaces(eden_size, survivor_size);
-
-    space_invariants();
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("Young generation size: "
-        "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
-        " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
-        " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
-        eden_size, survivor_size, used(), capacity(),
-        max_gen_size(), min_gen_size());
-    }
-  }
-}
-
-size_t ASParNewGeneration::available_to_min_gen() {
-  assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
-  return virtual_space()->committed_size() - min_gen_size();
-}
-
-// This method assumes that from-space has live data and that
-// any shrinkage of the young gen is limited by location of
-// from-space.
-size_t ASParNewGeneration::available_to_live() const {
-#undef SHRINKS_AT_END_OF_EDEN
-#ifdef SHRINKS_AT_END_OF_EDEN
-  size_t delta_in_survivor = 0;
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t space_alignment = heap->intra_generation_alignment();
-  const size_t gen_alignment = heap->generation_alignment();
-
-  MutableSpace* space_shrinking = NULL;
-  if (from_space()->end() > to_space()->end()) {
-    space_shrinking = from_space();
-  } else {
-    space_shrinking = to_space();
-  }
-
-  // Include any space that is committed but not included in
-  // the survivor spaces.
-  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
-    "Survivor space beyond high end");
-  size_t unused_committed = pointer_delta(virtual_space()->high(),
-    space_shrinking->end(), sizeof(char));   
-
-  if (space_shrinking->is_empty()) {
-    // Don't let the space shrink to 0
-    assert(space_shrinking->capacity_in_bytes() >= space_alignment, 
-      "Space is too small");
-    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
-  } else {
-    delta_in_survivor = pointer_delta(space_shrinking->end(), 
-				      space_shrinking->top(),
-				      sizeof(char));
-  }
-
-  size_t delta_in_bytes = unused_committed + delta_in_survivor;
-  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
-  return delta_in_bytes;
-#else
-  // The only space available for shrinking is in to-space if it
-  // is above from-space.
-  if (to()->bottom() > from()->bottom()) {
-    const size_t alignment = os::vm_page_size();
-    if (to()->capacity() < alignment) {
-      return 0;
-    } else {
-      return to()->capacity() - alignment;
-    }
-  } else {
-    return 0;
-  }
-#endif
-}
-
-// Return the number of bytes available for resizing down the young
-// generation.  This is the minimum of
-// 	input "bytes"
-//	bytes to the minimum young gen size
-//	bytes to the size currently being used + some small extra
-size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
-  // Allow shrinkage into the current eden but keep eden large enough
-  // to maintain the minimum young gen size
-  bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
-  return align_size_down(bytes, os::vm_page_size());
-}
-
-// Note that the the alignment used is the OS page size as
-// opposed to an alignment associated with the virtual space
-// (as is done in the ASPSYoungGen/ASPSOldGen)
-bool ASParNewGeneration::resize_generation(size_t eden_size, 
-					   size_t survivor_size) {
-  const size_t alignment = os::vm_page_size();
-  size_t orig_size = virtual_space()->committed_size();
-  bool size_changed = false;
-
-  // There used to be this guarantee there.
-  // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
-  // Code below forces this requirement.  In addition the desired eden
-  // size and disired survivor sizes are desired goals and may
-  // exceed the total generation size.
-
-  assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(), 
-    "just checking");
-
-  // Adjust new generation size
-  const size_t eden_plus_survivors =
-	  align_size_up(eden_size + 2 * survivor_size, alignment);
-  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()), 
-			     min_gen_size());
-  assert(desired_size <= max_gen_size(), "just checking");
-
-  if (desired_size > orig_size) {
-    // Grow the generation
-    size_t change = desired_size - orig_size;
-    assert(change % alignment == 0, "just checking");
-    if (!virtual_space()->expand_by(change)) {
-      return false; // Error if we fail to resize!
-    }
-
-    size_changed = true;
-  } else if (desired_size < orig_size) {
-    size_t desired_change = orig_size - desired_size;
-    assert(desired_change % alignment == 0, "just checking");
-
-    desired_change = limit_gen_shrink(desired_change);
-
-    if (desired_change > 0) {
-      virtual_space()->shrink_by(desired_change);
-      reset_survivors_after_shrink();
-
-      size_changed = true;
-    }
-  } else {
-    if (Verbose && PrintGC) {
-      if (orig_size == max_gen_size()) {
-        gclog_or_tty->print_cr("ASParNew generation size at maximum: "
-          SIZE_FORMAT "K", orig_size/K);
-      } else if (orig_size == min_gen_size()) {
-        gclog_or_tty->print_cr("ASParNew generation size at minium: "
-          SIZE_FORMAT "K", orig_size/K);
-      }
-    }
-  }
-
-  if (size_changed) {
-    MemRegion cmr((HeapWord*)virtual_space()->low(),
-                  (HeapWord*)virtual_space()->high());
-    GenCollectedHeap::heap()->barrier_set()->resize_covered_region(cmr);
-
-    if (Verbose && PrintGC) {
-      size_t current_size  = virtual_space()->committed_size();
-      gclog_or_tty->print_cr("ASParNew generation size changed: "
-			     SIZE_FORMAT "K->" SIZE_FORMAT "K",
-			     orig_size/K, current_size/K);
-    }
-  }
-
-  guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
-	    virtual_space()->committed_size() == max_gen_size(), "Sanity");
-
-  return true;
-}
-
-void ASParNewGeneration::reset_survivors_after_shrink() {
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  HeapWord* new_end = (HeapWord*)virtual_space()->high();
-  
-  if (from()->end() > to()->end()) {
-    assert(new_end >= from()->end(), "Shrinking past from-space");
-  } else {
-    assert(new_end >= to()->bottom(), "Shrink was too large");
-    // Was there a shrink of the survivor space?
-    if (new_end < to()->end()) {
-      MemRegion mr(to()->bottom(), new_end);
-      to()->initialize(mr, false /* clear */);
-    }
-  }
-}
-void ASParNewGeneration::resize_spaces(size_t requested_eden_size, 
-			               size_t requested_survivor_size) {
-  assert(UseAdaptiveSizePolicy, "sanity check");
-  assert(requested_eden_size > 0  && requested_survivor_size > 0, 
-	 "just checking");
-  CollectedHeap* heap = Universe::heap();
-  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
-      
-
-  // We require eden and to space to be empty
-  if ((!eden()->is_empty()) || (!to()->is_empty())) {
-    return;
-  }
-
-  size_t cur_eden_size = eden()->capacity();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: " 
-                  SIZE_FORMAT 
-                  ", requested_survivor_size: " SIZE_FORMAT ")",
-                  requested_eden_size, requested_survivor_size);
-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " 
-                  SIZE_FORMAT, 
-                  eden()->bottom(), 
-                  eden()->end(), 
-                  pointer_delta(eden()->end(),
-                                eden()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " 
-		  SIZE_FORMAT, 
-                  from()->bottom(), 
-                  from()->end(), 
-                  pointer_delta(from()->end(),
-                                from()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " 
-		  SIZE_FORMAT, 
-                  to()->bottom(),   
-                  to()->end(), 
-                  pointer_delta(  to()->end(),
-                                  to()->bottom(),
-                                  sizeof(char)));
-  }
-
-  // There's nothing to do if the new sizes are the same as the current
-  if (requested_survivor_size == to()->capacity() && 
-      requested_survivor_size == from()->capacity() &&
-      requested_eden_size == eden()->capacity()) {
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
-    }
-    return;
-  }
-  
-  char* eden_start = (char*)eden()->bottom();
-  char* eden_end   = (char*)eden()->end();   
-  char* from_start = (char*)from()->bottom();
-  char* from_end   = (char*)from()->end();
-  char* to_start   = (char*)to()->bottom();
-  char* to_end     = (char*)to()->end();
-
-  const size_t alignment = os::vm_page_size();
-  const bool maintain_minimum = 
-    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
-
-  // Check whether from space is below to space
-  if (from_start < to_start) {
-    // Eden, from, to
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, from, to:");
-    }
-
-    // Set eden
-    // "requested_eden_size" is a goal for the size of eden
-    // and may not be attainable.  "eden_size" below is
-    // calculated based on the location of from-space and
-    // the goal for the size of eden.  from-space is
-    // fixed in place because it contains live data.
-    // The calculation is done this way to avoid 32bit
-    // overflow (i.e., eden_start + requested_eden_size
-    // may too large for representation in 32bits).
-    size_t eden_size;
-    if (maintain_minimum) {
-      // Only make eden larger than the requested size if
-      // the minimum size of the generation has to be maintained.
-      // This could be done in general but policy at a higher
-      // level is determining a requested size for eden and that
-      // should be honored unless there is a fundamental reason.
-      eden_size = pointer_delta(from_start, 
-				eden_start, 
-				sizeof(char));
-    } else {
-      eden_size = MIN2(requested_eden_size,
-                       pointer_delta(from_start, eden_start, sizeof(char)));
-    }
-
-// tty->print_cr("eden_size before: " SIZE_FORMAT, eden_size);
-    eden_size = align_size_down(eden_size, alignment);
-// tty->print_cr("eden_size after: " SIZE_FORMAT, eden_size);
-    eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
-
-    // To may resize into from space as long as it is clear of live data.
-    // From space must remain page aligned, though, so we need to do some
-    // extra calculations.
-
-    // First calculate an optimal to-space
-    to_end   = (char*)virtual_space()->high();
-    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, 
-				    sizeof(char));
-
-    // Does the optimal to-space overlap from-space?
-    if (to_start < (char*)from()->end()) {
-      // Calculate the minimum offset possible for from_end
-      size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));
-
-      // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
-      if (from_size == 0) {
-        from_size = alignment;
-      } else {
-        from_size = align_size_up(from_size, alignment);
-      }
-
-      from_end = from_start + from_size;
-      assert(from_end > from_start, "addition overflow or from_size problem");
-
-      guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");
-
-      // Now update to_start with the new from_end
-      to_start = MAX2(from_end, to_start);
-    } else {
-      // If shrinking, move to-space down to abut the end of from-space
-      // so that shrinking will move to-space down.  If not shrinking
-      // to-space is moving up to allow for growth on the next expansion.
-      if (requested_eden_size <= cur_eden_size) {
-        to_start = from_end;
-        if (to_start + requested_survivor_size > to_start) {
-	  to_end = to_start + requested_survivor_size;
-        }
-      }
-      // else leave to_end pointing to the high end of the virtual space.
-    }
-
-    guarantee(to_start != to_end, "to space is zero sized");
-      
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
-                    eden_start, 
-                    eden_end, 
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
-                    from_start, 
-                    from_end, 
-                    pointer_delta(from_end, from_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
-                    to_start,   
-                    to_end, 
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-    }
-  } else {
-    // Eden, to, from
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, to, from:");
-    }
-
-    // Calculate the to-space boundaries based on
-    // the start of from-space.
-    to_end = from_start;
-    to_start = (char*)pointer_delta(from_start, 
-                                    (char*)requested_survivor_size, 
-				    sizeof(char));
-    // Calculate the ideal eden boundaries.
-    // eden_end is already at the bottom of the generation
-    assert(eden_start == virtual_space()->low(), 
-      "Eden is not starting at the low end of the virtual space");
-    if (eden_start + requested_eden_size >= eden_start) {
-      eden_end = eden_start + requested_eden_size;
-    } else {
-      eden_end = to_start;
-    }
-
-    // Does eden intrude into to-space?  to-space
-    // gets priority but eden is not allowed to shrink
-    // to 0.
-    if (eden_end > to_start) {
-      eden_end = to_start;
-    }
-
-    // Don't let eden shrink down to 0 or less.
-    eden_end = MAX2(eden_end, eden_start + alignment);
-    assert(eden_start + alignment >= eden_start, "Overflow");
-
-    size_t eden_size;
-    if (maintain_minimum) {
-      // Use all the space available.
-      eden_end = MAX2(eden_end, to_start);
-      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
-      eden_size = MIN2(eden_size, cur_eden_size);
-    } else {
-      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
-    }
-    eden_size = align_size_down(eden_size, alignment);
-    assert(maintain_minimum || eden_size <= requested_eden_size, 
-      "Eden size is too large");
-    assert(eden_size >= alignment, "Eden size is too small");
-    eden_end = eden_start + eden_size;
-
-    // Move to-space down to eden.
-    if (requested_eden_size < cur_eden_size) {
-      to_start = eden_end;
-      if (to_start + requested_survivor_size > to_start) {
-        to_end = MIN2(from_start, to_start + requested_survivor_size);
-      } else {
-        to_end = from_start;
-      }
-    }
-
-    // eden_end may have moved so again make sure
-    // the to-space and eden don't overlap.
-    to_start = MAX2(eden_end, to_start);
-
-    // from-space
-    size_t from_used = from()->used();
-    if (requested_survivor_size > from_used) {
-      if (from_start + requested_survivor_size >= from_start) {
-        from_end = from_start + requested_survivor_size;
-      }
-      if (from_end > virtual_space()->high()) {
-	from_end = virtual_space()->high();
-      }
-    }
-
-    assert(to_start >= eden_end, "to-space should be above eden");
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
-                    eden_start, 
-                    eden_end, 
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): " 
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
-                    to_start,   
-                    to_end, 
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): " 
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
-                    from_start, 
-                    from_end, 
-                    pointer_delta(from_end, from_start, sizeof(char)));
-    }
-  }
-  
-
-  guarantee((HeapWord*)from_start <= from()->bottom(), 
-            "from start moved to the right");
-  guarantee((HeapWord*)from_end >= from()->top(),
-            "from end moved into live data");
-  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
-  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
-  assert(is_object_aligned((intptr_t)to_start), "checking alignment");
-
-  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
-  MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
-  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
-
-  // Let's make sure the call to initialize doesn't reset "top"!
-  HeapWord* old_from_top = from()->top();
-
-  // For PrintAdaptiveSizePolicy block  below
-  size_t old_from = from()->capacity();
-  size_t old_to   = to()->capacity();
-
-  // The call to initialize NULL's the next compaction space
-  eden()->initialize(edenMR, true);
-  eden()->set_next_compaction_space(from());
-    to()->initialize(toMR  , true);
-  from()->initialize(fromMR, false);     // Note, not cleared!
-
-  assert(from()->top() == old_from_top, "from top changed!");
-
-  if (PrintAdaptiveSizePolicy) {
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
-
-    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
-                  "collection: %d "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
-                  gch->total_collections(),
-                  old_from, old_to,
-                  from()->capacity(),
-                  to()->capacity());
-    gclog_or_tty->cr();
-  }
-}
-
-void ASParNewGeneration::compute_new_size() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "not a CMS generational heap");
-
-
-  CMSAdaptiveSizePolicy* size_policy = 
-    (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
-  assert(size_policy->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-
-  size_t survived = from()->used();
-  if (!survivor_overflow()) {
-    // Keep running averages on how much survived
-    size_policy->avg_survived()->sample(survived);
-  } else {
-    size_t promoted = 
-      (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
-    assert(promoted < gch->capacity(), "Conversion problem?");
-    size_t survived_guess = survived + promoted;
-    size_policy->avg_survived()->sample(survived_guess);
-  }
-
-  size_t survivor_limit = max_survivor_size();
-  _tenuring_threshold =
-    size_policy->compute_survivor_space_size_and_threshold(
-                                                     _survivor_overflow,
-                                                     _tenuring_threshold,
-                                                     survivor_limit);
-  size_policy->avg_young_live()->sample(used());
-  size_policy->avg_eden_live()->sample(eden()->used());
-
-  size_policy->compute_young_generation_free_space(eden()->capacity(),
-                                                   max_gen_size());
-
-  resize(size_policy->calculated_eden_size_in_bytes(), 
-	 size_policy->calculated_survivor_size_in_bytes());
-
-  if (UsePerfData) {
-    CMSGCAdaptivePolicyCounters* counters = 
-      (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
-    assert(counters->kind() == 
-	   GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-      "Wrong kind of counters");
-    counters->update_tenuring_threshold(_tenuring_threshold);
-    counters->update_survivor_overflowed(_survivor_overflow);
-    counters->update_young_capacity(capacity());
-  }
-}
-
-
-#ifndef PRODUCT
-// Changes from PSYoungGen version
-//	value of "alignment"
-void ASParNewGeneration::space_invariants() {
-  const size_t alignment = os::vm_page_size();
-
-  // Currently, our eden size cannot shrink to zero
-  guarantee(eden()->capacity() >= alignment, "eden too small");
-  guarantee(from()->capacity() >= alignment, "from too small");
-  guarantee(to()->capacity() >= alignment, "to too small");
-
-  // Relationship of spaces to each other
-  char* eden_start = (char*)eden()->bottom();
-  char* eden_end   = (char*)eden()->end();   
-  char* from_start = (char*)from()->bottom();
-  char* from_end   = (char*)from()->end();
-  char* to_start   = (char*)to()->bottom();
-  char* to_end     = (char*)to()->end();
-
-  guarantee(eden_start >= virtual_space()->low(), "eden bottom");
-  guarantee(eden_start < eden_end, "eden space consistency");
-  guarantee(from_start < from_end, "from space consistency");
-  guarantee(to_start < to_end, "to space consistency");
-
-  // Check whether from space is below to space
-  if (from_start < to_start) {
-    // Eden, from, to
-    guarantee(eden_end <= from_start, "eden/from boundary");
-    guarantee(from_end <= to_start,   "from/to boundary");
-    guarantee(to_end <= virtual_space()->high(), "to end");
-  } else {
-    // Eden, to, from
-    guarantee(eden_end <= to_start, "eden/to boundary");
-    guarantee(to_end <= from_start, "to/from boundary");
-    guarantee(from_end <= virtual_space()->high(), "from end");
-  }
-
-  // More checks that the virtual space is consistent with the spaces
-  assert(virtual_space()->committed_size() >=
-    (eden()->capacity() +
-     to()->capacity() +
-     from()->capacity()), "Committed size is inconsistent");
-  assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
-    "Space invariant");
-  char* eden_top = (char*)eden()->top();
-  char* from_top = (char*)from()->top();
-  char* to_top = (char*)to()->top();
-  assert(eden_top <= virtual_space()->high(), "eden top");
-  assert(from_top <= virtual_space()->high(), "from top");
-  assert(to_top <= virtual_space()->high(), "to top");
-}
-#endif
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/asParNewGeneration.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)asParNewGeneration.hpp	1.8 07/05/05 17:05:25 JVM"
-#endif
-/*
- * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// A Generation that does parallel young-gen collection extended
-// for adaptive size policy.
-
-// Division of generation into spaces
-// done by DefNewGeneration::compute_space_boundaries()
-//	+---------------+
-//	| uncommitted	|
-//	|---------------|
-//	| ss0		|
-//	|---------------|
-//	| ss1		|
-//	|---------------|
-//	|		|
-//	| eden		|
-//	|		|
-//	+---------------+	<-- low end of VirtualSpace
-//
-class ASParNewGeneration: public ParNewGeneration {
-
-  size_t _min_gen_size;
-
-  // Resize the generation based on the desired sizes of 
-  // the constituent spaces.
-  bool resize_generation(size_t eden_size, size_t survivor_size);
-  // Resize the spaces based on their desired sizes but
-  // respecting the maximum size of the generation.
-  void resize_spaces(size_t eden_size, size_t survivor_size);
-  // Return the byte size remaining to the minimum generation size.
-  size_t available_to_min_gen();
-  // Return the byte size remaining to the live data in the generation.
-  size_t available_to_live() const;
-  // Return the byte size that the generation is allowed to shrink.
-  size_t limit_gen_shrink(size_t bytes);
-  // Reset the size of the spaces after a shrink of the generation.
-  void reset_survivors_after_shrink();
-
-  // Accessor
-  VirtualSpace* virtual_space() { return &_virtual_space; }
-
-  virtual void adjust_desired_tenuring_threshold();
-
- public:
-
-  ASParNewGeneration(ReservedSpace rs, 
-		     size_t initial_byte_size, 
-		     size_t min_byte_size,
-		     int level);
-
-  virtual const char* short_name() const { return "ASParNew"; }
-  virtual const char* name() const;
-  virtual Generation::Name kind() { return ASParNew; }
-
-  // Change the sizes of eden and the survivor spaces in
-  // the generation.  The parameters are desired sizes
-  // and are not guaranteed to be met.  For example, if
-  // the total is larger than the generation. 
-  void resize(size_t eden_size, size_t survivor_size);
-
-  virtual void compute_new_size();
-
-  size_t max_gen_size()                 { return _reserved.byte_size(); }
-  size_t min_gen_size() const		{ return _min_gen_size; }
-
-  // Space boundary invariant checker
-  void space_invariants() PRODUCT_RETURN;
-};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,1213 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)binaryTreeDictionary.cpp	1.37 07/05/05 17:05:43 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_binaryTreeDictionary.cpp.incl"
+
+////////////////////////////////////////////////////////////////////////////////
+// A binary tree based search structure for free blocks.
+// This is currently used in the Concurrent Mark&Sweep implementation.
+////////////////////////////////////////////////////////////////////////////////
+
+TreeChunk* TreeChunk::as_TreeChunk(FreeChunk* fc) {
+  // Do some assertion checking here.
+  return (TreeChunk*) fc;
+}
+
+void TreeChunk::verifyTreeChunkList() const {
+  TreeChunk* nextTC = (TreeChunk*)next();
+  if (prev() != NULL) { // interior list node shouldn'r have tree fields
+    guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
+              embedded_list()->right()  == NULL, "should be clear");
+  }
+  if (nextTC != NULL) {
+    guarantee(as_TreeChunk(nextTC->prev()) == this, "broken chain");
+    guarantee(nextTC->size() == size(), "wrong size");
+    nextTC->verifyTreeChunkList();
+  }
+}
+
+
+TreeList* TreeList::as_TreeList(TreeChunk* tc) {
+  // This first free chunk in the list will be the tree list.
+  assert(tc->size() >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
+  TreeList* tl = tc->embedded_list();
+  tc->set_list(tl);
+#ifdef ASSERT
+  tl->set_protecting_lock(NULL);
+#endif
+  tl->set_hint(0);
+  tl->set_size(tc->size());
+  tl->link_head(tc);
+  tl->link_tail(tc);
+  tl->set_count(1);
+  tl->init_statistics();
+  tl->setParent(NULL);
+  tl->setLeft(NULL);
+  tl->setRight(NULL);
+  return tl;
+}
+TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
+  TreeChunk* tc = (TreeChunk*) addr;
+  assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
+  assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL, 
+    "Space should be clear");
+  tc->setSize(size);
+  tc->linkPrev(NULL);
+  tc->linkNext(NULL);
+  TreeList* tl = TreeList::as_TreeList(tc);
+  return tl;
+}
+
+TreeList* TreeList::removeChunkReplaceIfNeeded(TreeChunk* tc) {
+
+  TreeList* retTL = this;
+  FreeChunk* list = head();
+  assert(!list || list != list->next(), "Chunk on list twice");
+  assert(tc != NULL, "Chunk being removed is NULL");
+  assert(parent() == NULL || this == parent()->left() || 
+    this == parent()->right(), "list is inconsistent");
+  assert(tc->isFree(), "Header is not marked correctly");
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+
+  FreeChunk* prevFC = tc->prev();
+  TreeChunk* nextTC = TreeChunk::as_TreeChunk(tc->next());
+  assert(list != NULL, "should have at least the target chunk");
+
+  // Is this the first item on the list?
+  if (tc == list) {
+    // The "getChunk..." functions for a TreeList will not return the
+    // first chunk in the list unless it is the last chunk in the list
+    // because the first chunk is also acting as the tree node.
+    // When coalescing happens, however, the first chunk in the a tree
+    // list can be the start of a free range.  Free ranges are removed
+    // from the free lists so that they are not available to be 
+    // allocated when the sweeper yields (giving up the free list lock)
+    // to allow mutator activity.  If this chunk is the first in the
+    // list and is not the last in the list, do the work to copy the
+    // TreeList from the first chunk to the next chunk and update all
+    // the TreeList pointers in the chunks in the list.
+    if (nextTC == NULL) {
+      assert(prevFC == NULL, "Not last chunk in the list")
+      set_tail(NULL);
+      set_head(NULL);
+    } else {
+      // copy embedded list.
+      nextTC->set_embedded_list(tc->embedded_list());
+      retTL = nextTC->embedded_list();
+      // Fix the pointer to the list in each chunk in the list.
+      // This can be slow for a long list.  Consider having
+      // an option that does not allow the first chunk on the
+      // list to be coalesced.
+      for (TreeChunk* curTC = nextTC; curTC != NULL; 
+	  curTC = TreeChunk::as_TreeChunk(curTC->next())) {
+        curTC->set_list(retTL);
+      }
+      // Fix the parent to point to the new TreeList.
+      if (retTL->parent() != NULL) {
+	if (this == retTL->parent()->left()) {
+	  retTL->parent()->setLeft(retTL);
+	} else {
+	  assert(this == retTL->parent()->right(), "Parent is incorrect");
+	  retTL->parent()->setRight(retTL);
+	}
+      }
+      // Fix the children's parent pointers to point to the
+      // new list.
+      assert(right() == retTL->right(), "Should have been copied");
+      if (retTL->right() != NULL) {
+	retTL->right()->setParent(retTL);
+      }
+      assert(left() == retTL->left(), "Should have been copied");
+      if (retTL->left() != NULL) {
+	retTL->left()->setParent(retTL);
+      }
+      retTL->link_head(nextTC);
+      assert(nextTC->isFree(), "Should be a free chunk");
+    }
+  } else {
+    if (nextTC == NULL) {
+      // Removing chunk at tail of list
+      link_tail(prevFC);
+    }
+    // Chunk is interior to the list
+    prevFC->linkAfter(nextTC);
+  }
+
+  // Below this point the embeded TreeList being used for the
+  // tree node may have changed. Don't use "this" 
+  // TreeList*.
+  // chunk should still be a free chunk (bit set in _prev)
+  assert(!retTL->head() || retTL->size() == retTL->head()->size(), 
+    "Wrong sized chunk in list");
+  debug_only(
+    tc->linkPrev(NULL);  
+    tc->linkNext(NULL);
+    tc->set_list(NULL);
+    bool prev_found = false;
+    bool next_found = false;
+    for (FreeChunk* curFC = retTL->head(); 
+	 curFC != NULL; curFC = curFC->next()) {
+      assert(curFC != tc, "Chunk is still in list");
+      if (curFC == prevFC) {
+	prev_found = true;
+      }
+      if (curFC == nextTC) {
+	next_found = true;
+      }
+    }
+    assert(prevFC == NULL || prev_found, "Chunk was lost from list");
+    assert(nextTC == NULL || next_found, "Chunk was lost from list");
+    assert(retTL->parent() == NULL ||
+	   retTL == retTL->parent()->left() || 
+	   retTL == retTL->parent()->right(),
+           "list is inconsistent");
+  )
+  retTL->decrement_count();
+
+  assert(tc->isFree(), "Should still be a free chunk");
+  assert(retTL->head() == NULL || retTL->head()->prev() == NULL, 
+    "list invariant");
+  assert(retTL->tail() == NULL || retTL->tail()->next() == NULL, 
+    "list invariant");
+  return retTL;
+}
+void TreeList::returnChunkAtTail(TreeChunk* chunk) {
+  assert(chunk != NULL, "returning NULL chunk");
+  assert(chunk->list() == this, "list should be set for chunk");
+  assert(tail() != NULL, "The tree list is embedded in the first chunk");
+  // which means that the list can never be empty.
+  assert(!verifyChunkInFreeLists(chunk), "Double entry");
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+  
+  FreeChunk* fc = tail();
+  fc->linkAfter(chunk);
+  link_tail(chunk);
+
+  assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
+  increment_count();
+  debug_only(increment_returnedBytes_by(chunk->size()*sizeof(HeapWord));)
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+}
+
+// Add this chunk at the head of the list.  "At the head of the list"
+// is defined to be after the chunk pointer to by head().  This is 
+// because the TreeList is embedded in the first TreeChunk in the
+// list.  See the definition of TreeChunk.
+void TreeList::returnChunkAtHead(TreeChunk* chunk) {
+  assert(chunk->list() == this, "list should be set for chunk");
+  assert(head() != NULL, "The tree list is embedded in the first chunk");
+  assert(chunk != NULL, "returning NULL chunk");
+  assert(!verifyChunkInFreeLists(chunk), "Double entry");
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+
+  FreeChunk* fc = head()->next();
+  if (fc != NULL) {
+    chunk->linkAfter(fc);
+  } else {
+    assert(tail() == NULL, "List is inconsistent");
+    link_tail(chunk);
+  }
+  head()->linkAfter(chunk);
+  assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
+  increment_count();
+  debug_only(increment_returnedBytes_by(chunk->size()*sizeof(HeapWord));)
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+}
+
+TreeChunk* TreeList::head_as_TreeChunk() {
+  assert(head() == NULL || TreeChunk::as_TreeChunk(head())->list() == this,
+    "Wrong type of chunk?");
+  return TreeChunk::as_TreeChunk(head());
+}
+
+TreeChunk* TreeList::first_available() {
+  guarantee(head() != NULL, "The head of the list cannot be NULL");
+  FreeChunk* fc = head()->next();
+  TreeChunk* retTC;
+  if (fc == NULL) {
+    retTC = head_as_TreeChunk();
+  } else {
+    retTC = TreeChunk::as_TreeChunk(fc);
+  }
+  assert(retTC->list() == this, "Wrong type of chunk.");
+  return retTC;
+}
+
+BinaryTreeDictionary::BinaryTreeDictionary(MemRegion mr, bool splay):
+  _splay(splay)
+{
+  assert(mr.byte_size() > MIN_TREE_CHUNK_SIZE, "minimum chunk size");
+
+  reset(mr);
+  assert(root()->left() == NULL, "reset check failed");
+  assert(root()->right() == NULL, "reset check failed");
+  assert(root()->head()->next() == NULL, "reset check failed");
+  assert(root()->head()->prev() == NULL, "reset check failed");
+  assert(totalSize() == root()->size(), "reset check failed");
+  assert(totalFreeBlocks() == 1, "reset check failed");
+}
+
+void BinaryTreeDictionary::inc_totalSize(size_t inc) {
+  _totalSize = _totalSize + inc;
+}
+
+void BinaryTreeDictionary::dec_totalSize(size_t dec) {
+  _totalSize = _totalSize - dec;
+}
+
+void BinaryTreeDictionary::reset(MemRegion mr) {
+  assert(mr.byte_size() > MIN_TREE_CHUNK_SIZE, "minimum chunk size");
+  set_root(TreeList::as_TreeList(mr.start(), mr.word_size()));
+  set_totalSize(mr.word_size());
+  set_totalFreeBlocks(1);
+}
+
+void BinaryTreeDictionary::reset(HeapWord* addr, size_t byte_size) {
+  MemRegion mr(addr, heap_word_size(byte_size));
+  reset(mr);
+}
+
+void BinaryTreeDictionary::reset() {
+  set_root(NULL);
+  set_totalSize(0);
+  set_totalFreeBlocks(0);
+}
+
+// Get a free block of size at least size from tree, or NULL.
+// If a splay step is requested, the removal algorithm (only) incorporates
+// a splay step as follows:
+// . the search proceeds down the tree looking for a possible
+//   match. At the (closest) matching location, an appropriate splay step is applied
+//   (zig, zig-zig or zig-zag). A chunk of the appropriate size is then returned
+//   if available, and if it's the last chunk, the node is deleted. A deteleted
+//   node is replaced in place by its tree successor.
+TreeChunk*
+BinaryTreeDictionary::getChunkFromTree(size_t size, Dither dither, bool splay)
+{
+  TreeList *curTL, *prevTL;
+  TreeChunk* retTC = NULL;
+  assert(size >= MIN_TREE_CHUNK_SIZE, "minimum chunk size");
+  if (FLSVerifyDictionary) {
+    verifyTree();
+  }
+  // starting at the root, work downwards trying to find match.
+  // Remember the last node of size too great or too small.
+  for (prevTL = curTL = root(); curTL != NULL;) {
+    if (curTL->size() == size) {        // exact match
+      break;
+    } 
+    prevTL = curTL;
+    if (curTL->size() < size) {        // proceed to right sub-tree
+      curTL = curTL->right();
+    } else {                           // proceed to left sub-tree
+      assert(curTL->size() > size, "size inconsistency");
+      curTL = curTL->left();
+    }
+  }
+  if (curTL == NULL) { // couldn't find exact match
+    // try and find the next larger size by walking back up the search path
+    for (curTL = prevTL; curTL != NULL;) {
+      if (curTL->size() >= size) break;
+      else curTL = curTL->parent();
+    }
+    assert(curTL == NULL || curTL->count() > 0,
+      "An empty list should not be in the tree");
+  }
+  if (curTL != NULL) {
+    assert(curTL->size() >= size, "size inconsistency");
+    if (UseCMSAdaptiveFreeLists) {
+  
+      // A candidate chunk has been found.  If it is already under
+      // populated, get a chunk associated with the hint for this
+      // chunk.
+      if (curTL->surplus() <= 0) {
+        /* Use the hint to find a size with a surplus, and reset the hint. */
+        TreeList* hintTL = curTL;
+        while (hintTL->hint() != 0) {
+  	  assert(hintTL->hint() == 0 || hintTL->hint() > hintTL->size(),
+	    "hint points in the wrong direction");
+          hintTL = findList(hintTL->hint());
+  	  assert(curTL != hintTL, "Infinite loop");
+          if (hintTL == NULL || 
+	      hintTL == curTL /* Should not happen but protect against it */ ) {
+  	    // No useful hint.  Set the hint to NULL and go on.
+            curTL->set_hint(0);
+            break;
+          }
+          assert(hintTL->size() > size, "hint is inconsistent");
+          if (hintTL->surplus() > 0) {
+  	    // The hint led to a list that has a surplus.  Use it.
+  	    // Set the hint for the candidate to an overpopulated
+  	    // size.  
+            curTL->set_hint(hintTL->size());
+            // Change the candidate.
+            curTL = hintTL;
+            break;
+          }
+  	  // The evm code reset the hint of the candidate as
+  	  // at an interrim point.  Why?  Seems like this leaves
+  	  // the hint pointing to a list that didn't work.
+          // curTL->set_hint(hintTL->size());
+        }
+      }
+    }
+    // don't waste time splaying if chunk's singleton
+    if (splay && curTL->head()->next() != NULL) {
+      semiSplayStep(curTL);
+    }
+    retTC = curTL->first_available();
+    assert((retTC != NULL) && (curTL->count() > 0),
+      "A list in the binary tree should not be NULL");
+    assert(retTC->size() >= size, 
+      "A chunk of the wrong size was found");
+    removeChunkFromTree(retTC);
+    assert(retTC->isFree(), "Header is not marked correctly");
+  }
+
+  if (FLSVerifyDictionary) {
+    verify();
+  }
+  return retTC;
+}
+
+TreeList* BinaryTreeDictionary::findList(size_t size) const {
+  TreeList* curTL;
+  for (curTL = root(); curTL != NULL;) {
+    if (curTL->size() == size) {        // exact match
+      break;
+    } 
+    
+    if (curTL->size() < size) {        // proceed to right sub-tree
+      curTL = curTL->right();
+    } else {                           // proceed to left sub-tree
+      assert(curTL->size() > size, "size inconsistency");
+      curTL = curTL->left();
+    }
+  }
+  return curTL;
+}
+
+
+bool BinaryTreeDictionary::verifyChunkInFreeLists(FreeChunk* tc) const {
+  size_t size = tc->size();
+  TreeList* tl = findList(size);
+  if (tl == NULL) {
+    return false;
+  } else {
+    return tl->verifyChunkInFreeLists(tc);
+  }
+}
+
+FreeChunk* BinaryTreeDictionary::findLargestDict() const {
+  TreeList *curTL = root();
+  if (curTL != NULL) {
+    while(curTL->right() != NULL) curTL = curTL->right();
+    return curTL->first_available();
+  } else {
+    return NULL;
+  }
+}
+
+// Remove the current chunk from the tree.  If it is not the last 
+// chunk in a list on a tree node, just unlink it.
+// If it is the last chunk in the list (the next link is NULL),
+// remove the node and repair the tree.
+TreeChunk*
+BinaryTreeDictionary::removeChunkFromTree(TreeChunk* tc) {
+  assert(tc != NULL, "Should not call with a NULL chunk");
+  assert(tc->isFree(), "Header is not marked correctly");
+
+  TreeList *newTL, *parentTL;
+  TreeChunk* retTC;
+  TreeList* tl = tc->list();
+  debug_only(
+    bool removing_only_chunk = false;
+    if (tl == _root) {
+      if ((_root->left() == NULL) && (_root->right() == NULL)) {
+        if (_root->count() == 1) {
+	  assert(_root->head() == tc, "Should only be this one chunk");
+	  removing_only_chunk = true;
+        }
+      }
+    }
+  )
+  assert(tl != NULL, "List should be set");
+  assert(tl->parent() == NULL || tl == tl->parent()->left() || 
+	 tl == tl->parent()->right(), "list is inconsistent");
+
+  bool complicatedSplice = false;
+
+  retTC = tc;
+  // Removing this chunk can have the side effect of changing the node
+  // (TreeList*) in the tree.  If the node is the root, update it.
+  TreeList* replacementTL = tl->removeChunkReplaceIfNeeded(tc);
+  assert(tc->isFree(), "Chunk should still be free");
+  assert(replacementTL->parent() == NULL ||
+	 replacementTL == replacementTL->parent()->left() || 
+	 replacementTL == replacementTL->parent()->right(),
+         "list is inconsistent");
+  if (tl == root()) {
+    assert(replacementTL->parent() == NULL, "Incorrectly replacing root");
+    set_root(replacementTL);
+  }
+  debug_only(
+    if (tl != replacementTL) {
+      assert(replacementTL->head() != NULL, 
+        "If the tree list was replaced, it should not be a NULL list");
+      TreeList* rhl = replacementTL->head_as_TreeChunk()->list();
+      TreeList* rtl = TreeChunk::as_TreeChunk(replacementTL->tail())->list();
+      assert(rhl == replacementTL, "Broken head");
+      assert(rtl == replacementTL, "Broken tail");
+      assert(replacementTL->size() == tc->size(),  "Broken size");
+    }
+  )
+
+  // Does the tree need to be repaired?
+  if (replacementTL->count() == 0) {
+    assert(replacementTL->head() == NULL && 
+	   replacementTL->tail() == NULL, "list count is incorrect");
+    // Find the replacement node for the (soon to be empty) node being removed.
+    // if we have a single (or no) child, splice child in our stead
+    if (replacementTL->left() == NULL) {
+      // left is NULL so pick right.  right may also be NULL.
+      newTL = replacementTL->right();
+      debug_only(replacementTL->clearRight();)
+    } else if (replacementTL->right() == NULL) {
+      // right is NULL
+      newTL = replacementTL->left();
+      debug_only(replacementTL->clearLeft();)
+    } else {  // we have both children, so, by patriarchal convention,
+              // my replacement is least node in right sub-tree
+      complicatedSplice = true;
+      newTL = removeTreeMinimum(replacementTL->right());
+      assert(newTL != NULL && newTL->left() == NULL &&
+             newTL->right() == NULL, "sub-tree minimum exists");
+    }
+    // newTL is the replacement for the (soon to be empty) node.
+    // newTL may be NULL.
+    // should verify; we just cleanly excised our replacement
+    if (FLSVerifyDictionary) {
+      verifyTree();
+    }
+    // first make newTL my parent's child
+    if ((parentTL = replacementTL->parent()) == NULL) {  
+      // newTL should be root
+      assert(tl == root(), "Incorrectly replacing root");
+      set_root(newTL);
+      if (newTL != NULL) {
+        newTL->clearParent();
+      }
+    } else if (parentTL->right() == replacementTL) {   
+      // replacementTL is a right child
+      parentTL->setRight(newTL);
+    } else {                                // replacementTL is a left child
+      assert(parentTL->left() == replacementTL, "should be left child");
+      parentTL->setLeft(newTL);
+    }
+    debug_only(replacementTL->clearParent();)
+    if (complicatedSplice) {  // we need newTL to get replacementTL's 
+			      // two children
+      assert(newTL != NULL &&
+             newTL->left() == NULL && newTL->right() == NULL,
+            "newTL should not have encumbrances from the past");
+      // we'd like to assert as below:
+      // assert(replacementTL->left() != NULL && replacementTL->right() != NULL,
+      //       "else !complicatedSplice");
+      // ... however, the above assertion is too strong because we aren't
+      // guaranteed that replacementTL->right() is still NULL. 
+      // Recall that we removed
+      // the right sub-tree minimum from replacementTL. 
+      // That may well have been its right
+      // child! So we'll just assert half of the above:
+      assert(replacementTL->left() != NULL, "else !complicatedSplice");
+      newTL->setLeft(replacementTL->left());
+      newTL->setRight(replacementTL->right());
+      debug_only(
+        replacementTL->clearRight();
+        replacementTL->clearLeft();
+      )
+    }
+    assert(replacementTL->right() == NULL && 
+	   replacementTL->left() == NULL && 
+	   replacementTL->parent() == NULL,
+        "delete without encumbrances");
+  }
+
+  assert(totalSize() >= retTC->size(), "Incorrect total size");
+  dec_totalSize(retTC->size());     // size book-keeping
+  assert(totalFreeBlocks() > 0, "Incorrect total count");
+  set_totalFreeBlocks(totalFreeBlocks() - 1);
+
+  assert(retTC != NULL, "null chunk?");
+  assert(retTC->prev() == NULL && retTC->next() == NULL,
+         "should return without encumbrances");
+  if (FLSVerifyDictionary) {
+    verifyTree();
+  }
+  assert(!removing_only_chunk || _root == NULL, "root should be NULL");
+  return TreeChunk::as_TreeChunk(retTC);
+}
+
+// Remove the leftmost node (lm) in the tree and return it.
+// If lm has a right child, link it to the left node of
+// the parent of lm.
+TreeList* BinaryTreeDictionary::removeTreeMinimum(TreeList* tl) {
+  assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
+  // locate the subtree minimum by walking down left branches
+  TreeList* curTL = tl;
+  for (; curTL->left() != NULL; curTL = curTL->left());
+  // obviously curTL now has at most one child, a right child
+  if (curTL != root()) {  // Should this test just be removed?
+    TreeList* parentTL = curTL->parent();
+    if (parentTL->left() == curTL) { // curTL is a left child
+      parentTL->setLeft(curTL->right());
+    } else {
+      // If the list tl has no left child, then curTL may be
+      // the right child of parentTL.
+      assert(parentTL->right() == curTL, "should be a right child");
+      parentTL->setRight(curTL->right());
+    }
+  } else {
+    // The only use of this method would not pass the root of the
+    // tree (as indicated by the assertion above that the tree list
+    // has a parent) but the specification does not explicitly exclude the
+    // passing of the root so accomodate it.
+    set_root(NULL);
+  }
+  debug_only(
+    curTL->clearParent();  // Test if this needs to be cleared
+    curTL->clearRight();    // recall, above, left child is already null
+  )
+  // we just excised a (non-root) node, we should still verify all tree invariants
+  if (FLSVerifyDictionary) {
+    verifyTree();
+  }
+  return curTL;
+}
+
+// Based on a simplification of the algorithm by Sleator and Tarjan (JACM 1985).
+// The simplifications are the following:
+// . we splay only when we delete (not when we insert)
+// . we apply a single spay step per deletion/access
+// By doing such partial splaying, we reduce the amount of restructuring,
+// while getting a reasonably efficient search tree (we think).
+// [Measurements will be needed to (in)validate this expectation.]
+
+void BinaryTreeDictionary::semiSplayStep(TreeList* tc) {
+  // apply a semi-splay step at the given node:
+  // . if root, norting needs to be done
+  // . if child of root, splay once
+  // . else zig-zig or sig-zag depending on path from grandparent
+  if (root() == tc) return;
+  warning("*** Splaying not yet implemented; "
+          "tree operations may be inefficient ***");
+}
+
+void BinaryTreeDictionary::insertChunkInTree(FreeChunk* fc) {
+  TreeList *curTL, *prevTL;
+  size_t size = fc->size();
+
+  assert(size >= MIN_TREE_CHUNK_SIZE, "too small to be a TreeList");
+  if (FLSVerifyDictionary) {
+    verifyTree();
+  }
+  // XXX: do i need to clear the FreeChunk fields, let me do it just in case
+  // Revisit this later
+  
+  fc->clearNext();
+  fc->linkPrev(NULL);
+  
+  // work down from the _root, looking for insertion point
+  for (prevTL = curTL = root(); curTL != NULL;) {
+    if (curTL->size() == size)  // exact match
+      break;
+    prevTL = curTL;
+    if (curTL->size() > size) { // follow left branch
+      curTL = curTL->left();
+    } else {                    // follow right branch
+      assert(curTL->size() < size, "size inconsistency");
+      curTL = curTL->right();
+    }
+  }
+  TreeChunk* tc = TreeChunk::as_TreeChunk(fc);
+  // This chunk is being returned to the binary try.  It's embedded
+  // TreeList should be unused at this point.
+  tc->initialize();
+  if (curTL != NULL) {          // exact match
+    tc->set_list(curTL);
+    curTL->returnChunkAtTail(tc);
+  } else {                     // need a new node in tree
+    tc->clearNext();
+    tc->linkPrev(NULL);
+    TreeList* newTL = TreeList::as_TreeList(tc);
+    assert(((TreeChunk*)tc)->list() == newTL,
+      "List was not initialized correctly");
+    if (prevTL == NULL) {      // we are the only tree node
+      assert(root() == NULL, "control point invariant");
+      set_root(newTL);
+    } else {                   // insert under prevTL ...
+      if (prevTL->size() < size) {   // am right child
+        assert(prevTL->right() == NULL, "control point invariant");
+        prevTL->setRight(newTL);
+      } else {                       // am left child
+        assert(prevTL->size() > size && prevTL->left() == NULL, "cpt pt inv");
+        prevTL->setLeft(newTL);
+      }
+    }
+  }
+  assert(tc->list() != NULL, "Tree list should be set");
+
+  inc_totalSize(size);
+  // Method 'totalSizeInTree' walks through the every block in the
+  // tree, so it can cause significant performance loss if there are
+  // many blocks in the tree
+  assert(!FLSVerifyDictionary || totalSizeInTree(root()) == totalSize(), "_totalSize inconsistency");
+  set_totalFreeBlocks(totalFreeBlocks() + 1);
+  if (FLSVerifyDictionary) {
+    verifyTree();
+  }
+}
+
+size_t BinaryTreeDictionary::maxChunkSize() const {
+  verify_par_locked();
+  TreeList* tc = root();
+  if (tc == NULL) return 0;
+  for (; tc->right() != NULL; tc = tc->right());
+  return tc->size();
+}
+
+size_t BinaryTreeDictionary::totalListLength(TreeList* tl) const {
+  size_t res;
+  res = tl->count();
+#ifdef ASSERT
+  size_t cnt;
+  FreeChunk* tc = tl->head();
+  for (cnt = 0; tc != NULL; tc = tc->next(), cnt++);
+  assert(res == cnt, "The count is not being maintained correctly");
+#endif
+  return res;
+}
+
+size_t BinaryTreeDictionary::totalSizeInTree(TreeList* tl) const {
+  if (tl == NULL)
+    return 0;
+  return (tl->size() * totalListLength(tl)) +
+         totalSizeInTree(tl->left())    +
+         totalSizeInTree(tl->right());
+}
+
+double BinaryTreeDictionary::sum_of_squared_block_sizes(TreeList* const tl) const {
+  if (tl == NULL) {
+    return 0.0;
+  }
+  double size = (double)(tl->size());
+  double curr = size * size * totalListLength(tl);
+  curr += sum_of_squared_block_sizes(tl->left());
+  curr += sum_of_squared_block_sizes(tl->right());
+  return curr;
+}
+
+size_t BinaryTreeDictionary::totalFreeBlocksInTree(TreeList* tl) const {
+  if (tl == NULL)
+    return 0;
+  return totalListLength(tl) +
+         totalFreeBlocksInTree(tl->left()) +
+         totalFreeBlocksInTree(tl->right());
+}
+
+size_t BinaryTreeDictionary::numFreeBlocks() const {
+  assert(totalFreeBlocksInTree(root()) == totalFreeBlocks(), 
+         "_totalFreeBlocks inconsistency");
+  return totalFreeBlocks();
+}
+
+size_t BinaryTreeDictionary::treeHeightHelper(TreeList* tl) const {
+  if (tl == NULL)
+    return 0;
+  return 1 + MAX2(treeHeightHelper(tl->left()),
+                  treeHeightHelper(tl->right()));
+}
+
+size_t BinaryTreeDictionary::treeHeight() const {
+  return treeHeightHelper(root());
+}
+
+size_t BinaryTreeDictionary::totalNodesHelper(TreeList* tl) const {
+  if (tl == NULL) {
+    return 0;
+  }
+  return 1 + totalNodesHelper(tl->left()) +
+    totalNodesHelper(tl->right());
+}
+
+size_t BinaryTreeDictionary::totalNodesInTree(TreeList* tl) const {
+  return totalNodesHelper(root());
+}
+
+void BinaryTreeDictionary::dictCensusUpdate(size_t size, bool split, bool birth){
+  TreeList* nd = findList(size);
+  if (nd) {
+    if (split) {
+      if (birth) {
+        nd->increment_splitBirths();
+        nd->increment_surplus();
+      }  else {
+        nd->increment_splitDeaths();
+        nd->decrement_surplus();
+      }
+    } else {
+      if (birth) {
+        nd->increment_coalBirths();
+        nd->increment_surplus();
+      } else {
+        nd->increment_coalDeaths();
+        nd->decrement_surplus();
+      }
+    }
+  }
+  // A list for this size may not be found (nd == 0) if
+  //   This is a death where the appropriate list is now
+  //     empty and has been removed from the list.
+  //   This is a birth associated with a LinAB.  The chunk
+  //     for the LinAB is not in the dictionary.
+}
+
+bool BinaryTreeDictionary::coalDictOverPopulated(size_t size) {
+  TreeList* list_of_size = findList(size);
+  // None of requested size implies overpopulated.
+  return list_of_size == NULL || list_of_size->coalDesired() <= 0 ||
+         list_of_size->count() > list_of_size->coalDesired();
+}
+
+// Closures for walking the binary tree.
+//   do_list() walks the free list in a node applying the closure
+//     to each free chunk in the list
+//   do_tree() walks the nodes in the binary tree applying do_list()
+//     to each list at each node.
+
+class TreeCensusClosure : public StackObj {
+ protected:
+  virtual void do_list(FreeList* fl) = 0;
+ public:
+  virtual void do_tree(TreeList* tl) = 0;
+};
+
+class AscendTreeCensusClosure : public TreeCensusClosure {
+ public:
+  void do_tree(TreeList* tl) {
+    if (tl != NULL) {
+      do_tree(tl->left());
+      do_list(tl);
+      do_tree(tl->right());
+    }
+  }
+};
+
+class DescendTreeCensusClosure : public TreeCensusClosure {
+ public:
+  void do_tree(TreeList* tl) {
+    if (tl != NULL) {
+      do_tree(tl->right());
+      do_list(tl);
+      do_tree(tl->left());
+    }
+  }
+};
+       
+// For each list in the tree, calculate the desired, desired
+// coalesce, count before sweep, and surplus before sweep.
+class BeginSweepClosure : public AscendTreeCensusClosure {
+  double _percentage;
+  float _inter_sweep_current;
+  float _inter_sweep_estimate;
+  
+ public:
+  BeginSweepClosure(double p, float inter_sweep_current,
+                              float inter_sweep_estimate) :
+   _percentage(p),
+   _inter_sweep_current(inter_sweep_current),
+   _inter_sweep_estimate(inter_sweep_estimate) { }
+
+  void do_list(FreeList* fl) {
+    double coalSurplusPercent = _percentage;
+    fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate);
+    fl->set_coalDesired((ssize_t)((double)fl->desired() * coalSurplusPercent));
+    fl->set_beforeSweep(fl->count());
+    fl->set_bfrSurp(fl->surplus());
+  }
+};
+
+// Used to search the tree until a condition is met.
+// Similar to TreeCensusClosure but searches the
+// tree and returns promptly when found.
+
+class TreeSearchClosure : public StackObj {
+ protected:
+  virtual bool do_list(FreeList* fl) = 0;
+ public:
+  virtual bool do_tree(TreeList* tl) = 0;
+};
+
+#if 0 //  Don't need this yet but here for symmetry.
+class AscendTreeSearchClosure : public TreeSearchClosure {
+ public:
+  bool do_tree(TreeList* tl) {
+    if (tl != NULL) {
+      if (do_tree(tl->left())) return true;
+      if (do_list(tl)) return true;
+      if (do_tree(tl->right())) return true;
+    }
+    return false;
+  }
+};
+#endif
+
+class DescendTreeSearchClosure : public TreeSearchClosure {
+ public:
+  bool do_tree(TreeList* tl) {
+    if (tl != NULL) {
+      if (do_tree(tl->right())) return true;
+      if (do_list(tl)) return true;
+      if (do_tree(tl->left())) return true;
+    }
+    return false;
+  }
+};
+
+// Searches the tree for a chunk that ends at the
+// specified address.
+class EndTreeSearchClosure : public DescendTreeSearchClosure {
+  HeapWord* _target;
+  FreeChunk* _found;
+
+ public:
+  EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {}
+  bool do_list(FreeList* fl) {
+    FreeChunk* item = fl->head();
+    while (item != NULL) {
+      if (item->end() == _target) {
+	_found = item;
+	return true;
+      }
+      item = item->next();
+    }
+    return false;
+  }
+  FreeChunk* found() { return _found; }
+};
+
+FreeChunk* BinaryTreeDictionary::find_chunk_ends_at(HeapWord* target) const {
+  EndTreeSearchClosure etsc(target);
+  bool found_target = etsc.do_tree(root());
+  assert(found_target || etsc.found() == NULL, "Consistency check");
+  assert(!found_target || etsc.found() != NULL, "Consistency check");
+  return etsc.found();
+}
+
+void BinaryTreeDictionary::beginSweepDictCensus(double coalSurplusPercent,
+  float inter_sweep_current, float inter_sweep_estimate) {
+  BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
+                                            inter_sweep_estimate);
+  bsc.do_tree(root());
+}
+
+// Closures and methods for calculating total bytes returned to the
+// free lists in the tree.
+NOT_PRODUCT(
+  class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure {
+   public:
+    void do_list(FreeList* fl) {
+      fl->set_returnedBytes(0);
+    }
+  };
+  
+  void BinaryTreeDictionary::initializeDictReturnedBytes() {
+    InitializeDictReturnedBytesClosure idrb;
+    idrb.do_tree(root());
+  }
+  
+  class ReturnedBytesClosure : public AscendTreeCensusClosure {
+    size_t _dictReturnedBytes;
+   public:
+    ReturnedBytesClosure() { _dictReturnedBytes = 0; }
+    void do_list(FreeList* fl) {
+      _dictReturnedBytes += fl->returnedBytes();
+    }
+    size_t dictReturnedBytes() { return _dictReturnedBytes; }
+  };
+  
+  size_t BinaryTreeDictionary::sumDictReturnedBytes() {
+    ReturnedBytesClosure rbc;
+    rbc.do_tree(root());
+  
+    return rbc.dictReturnedBytes();
+  }
+
+  // Count the number of entries in the tree.
+  class treeCountClosure : public DescendTreeCensusClosure {
+   public:
+    uint count;
+    treeCountClosure(uint c) { count = c; }
+    void do_list(FreeList* fl) {
+      count++;
+    }
+  };
+
+  size_t BinaryTreeDictionary::totalCount() {
+    treeCountClosure ctc(0);
+    ctc.do_tree(root());
+    return ctc.count;
+  }
+)
+
+// Calculate surpluses for the lists in the tree.
+class setTreeSurplusClosure : public AscendTreeCensusClosure {
+  double percentage;
+ public:
+  setTreeSurplusClosure(double v) { percentage = v; }
+  void do_list(FreeList* fl) {
+    double splitSurplusPercent = percentage;
+    fl->set_surplus(fl->count() -
+                   (ssize_t)((double)fl->desired() * splitSurplusPercent));
+  }
+};
+
+void BinaryTreeDictionary::setTreeSurplus(double splitSurplusPercent) {
+  setTreeSurplusClosure sts(splitSurplusPercent);
+  sts.do_tree(root());
+}
+
+// Set hints for the lists in the tree.
+class setTreeHintsClosure : public DescendTreeCensusClosure {
+  size_t hint;
+ public:
+  setTreeHintsClosure(size_t v) { hint = v; }
+  void do_list(FreeList* fl) {
+    fl->set_hint(hint);
+    assert(fl->hint() == 0 || fl->hint() > fl->size(), 
+      "Current hint is inconsistent");
+    if (fl->surplus() > 0) {
+      hint = fl->size();
+    }
+  }
+};
+
+void BinaryTreeDictionary::setTreeHints(void) {
+  setTreeHintsClosure sth(0);
+  sth.do_tree(root());
+}
+
+// Save count before previous sweep and splits and coalesces.
+class clearTreeCensusClosure : public AscendTreeCensusClosure {
+  void do_list(FreeList* fl) {
+    fl->set_prevSweep(fl->count());
+    fl->set_coalBirths(0);
+    fl->set_coalDeaths(0);
+    fl->set_splitBirths(0);
+    fl->set_splitDeaths(0);
+  }
+};
+
+void BinaryTreeDictionary::clearTreeCensus(void) {
+  clearTreeCensusClosure ctc;
+  ctc.do_tree(root());
+}
+
+// Do reporting and post sweep clean up.
+void BinaryTreeDictionary::endSweepDictCensus(double splitSurplusPercent) {
+  // Does walking the tree 3 times hurt?
+  setTreeSurplus(splitSurplusPercent);
+  setTreeHints();
+  if (PrintGC && Verbose) {
+    reportStatistics();
+  }
+  clearTreeCensus();
+}
+    
+// Print summary statistics
+void BinaryTreeDictionary::reportStatistics() const {
+  verify_par_locked();
+  gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
+         "------------------------------------\n");
+  size_t totalSize = totalChunkSize(debug_only(NULL));
+  size_t    freeBlocks = numFreeBlocks();
+  gclog_or_tty->print("Total Free Space: %d\n", totalSize);
+  gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSize());
+  gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
+  if (freeBlocks > 0) {
+    gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
+  }
+  gclog_or_tty->print("Tree      Height: %d\n", treeHeight());
+}
+
+// Print census information - counts, births, deaths, etc.
+// for each list in the tree.  Also print some summary
+// information.
+class printTreeCensusClosure : public AscendTreeCensusClosure {
+  size_t _totalFree;
+  AllocationStats _totals;
+  size_t _count;
+ 
+ public:
+  printTreeCensusClosure() {
+    _totalFree = 0;
+    _count = 0;
+    _totals.initialize();
+  }
+  AllocationStats* totals() { return &_totals; }
+  size_t count() { return _count; }
+  void increment_count_by(size_t v) { _count += v; }
+  size_t totalFree() { return _totalFree; }
+  void increment_totalFree_by(size_t v) { _totalFree += v; }
+  void do_list(FreeList* fl) {
+    bool nl = false; // "maybe this is not needed" isNearLargestChunk(fl->head());
+
+    gclog_or_tty->print("%c %4d\t\t" "%7d\t" "%7d\t"
+               "%7d\t"      "%7d\t" "%7d\t" "%7d\t"
+               "%7d\t"      "%7d\t" "%7d\t"
+               "%7d\t" "\n",
+               " n"[nl], fl->size(), fl->bfrSurp(), fl->surplus(),
+               fl->desired(), fl->prevSweep(), fl->beforeSweep(), fl->count(),
+               fl->coalBirths(), fl->coalDeaths(), fl->splitBirths(),
+               fl->splitDeaths());
+  
+    increment_totalFree_by(fl->count() * fl->size());
+    increment_count_by(fl->count());
+    totals()->set_bfrSurp(totals()->bfrSurp() + fl->bfrSurp());
+    totals()->set_surplus(totals()->splitDeaths()     + fl->surplus());
+    totals()->set_prevSweep(totals()->prevSweep()   + fl->prevSweep());
+    totals()->set_beforeSweep(totals()->beforeSweep() + fl->beforeSweep());
+    totals()->set_coalBirths(totals()->coalBirths()  + fl->coalBirths());
+    totals()->set_coalDeaths(totals()->coalDeaths()  + fl->coalDeaths());
+    totals()->set_splitBirths(totals()->splitBirths() + fl->splitBirths());
+    totals()->set_splitDeaths(totals()->splitDeaths() + fl->splitDeaths());
+  }
+};
+
+void BinaryTreeDictionary::printDictCensus(void) const {
+  
+  gclog_or_tty->print("\nBinaryTree\n");
+  gclog_or_tty->print(
+             "%4s\t\t" "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"
+             "%7s\t"   "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"     "\n",
+             "size",  "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
+             "count", "cBirths", "cDeaths", "sBirths", "sDeaths");
+
+  printTreeCensusClosure ptc;
+  ptc.do_tree(root());
+
+  gclog_or_tty->print(
+             "\t\t"    "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"
+             "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"     "\n",
+                       "bfrsurp", "surplus", "prvSwep", "bfrSwep",
+             "count",  "cBirths", "cDeaths", "sBirths", "sDeaths");
+  gclog_or_tty->print(
+             "%s\t\t"  "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"
+             "%7d\t"   "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"    "\n",
+             "totl",
+             ptc.totals()->bfrSurp(), 
+	     ptc.totals()->surplus(), 
+	     ptc.totals()->prevSweep(), 
+	     ptc.totals()->beforeSweep(), 
+	     ptc.count(), 
+	     ptc.totals()->coalBirths(), 
+	     ptc.totals()->coalDeaths(), 
+	     ptc.totals()->splitBirths(), 
+	     ptc.totals()->splitDeaths());
+  gclog_or_tty->print("totalFree(words): %7d growth: %8.5f  deficit: %8.5f\n",
+              ptc.totalFree(),
+              (double)(ptc.totals()->splitBirths()+ptc.totals()->coalBirths()
+                       -ptc.totals()->splitDeaths()-ptc.totals()->coalDeaths())
+              /(ptc.totals()->prevSweep() != 0 ?
+                (double)ptc.totals()->prevSweep() : 1.0),
+             (double)(ptc.totals()->desired() - ptc.count())
+             /(ptc.totals()->desired() != 0 ?
+               (double)ptc.totals()->desired() : 1.0));
+}
+
+// Verify the following tree invariants:
+// . _root has no parent
+// . parent and child point to each other
+// . each node's key correctly related to that of its child(ren)
+void BinaryTreeDictionary::verifyTree() const {
+  guarantee(root() == NULL || totalFreeBlocks() == 0 ||
+    totalSize() != 0, "_totalSize should't be 0?");
+  guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent");
+  verifyTreeHelper(root());
+}
+
+size_t BinaryTreeDictionary::verifyPrevFreePtrs(TreeList* tl) {
+  size_t ct = 0;
+  for (FreeChunk* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
+    ct++;
+    assert(curFC->prev() == NULL || curFC->prev()->isFree(), 
+      "Chunk should be free");
+  }
+  return ct;
+}
+
+// Note: this helper is recursive rather than iterative, so use with
+// caution on very deep trees; and watch out for stack overflow errors;
+// In general, to be used only for debugging.
+void BinaryTreeDictionary::verifyTreeHelper(TreeList* tl) const {
+  if (tl == NULL)
+    return;
+  guarantee(tl->size() != 0, "A list must has a size");
+  guarantee(tl->left()  == NULL || tl->left()->parent()  == tl,
+         "parent<-/->left");
+  guarantee(tl->right() == NULL || tl->right()->parent() == tl,
+         "parent<-/->right");;
+  guarantee(tl->left() == NULL  || tl->left()->size()    <  tl->size(),
+         "parent !> left");
+  guarantee(tl->right() == NULL || tl->right()->size()   >  tl->size(), 
+         "parent !< left");
+  guarantee(tl->head() == NULL || tl->head()->isFree(), "!Free");
+  guarantee(tl->head() == NULL || tl->head_as_TreeChunk()->list() == tl, 
+    "list inconsistency");
+  guarantee(tl->count() > 0 || (tl->head() == NULL && tl->tail() == NULL),
+    "list count is inconsistent");
+  guarantee(tl->count() > 1 || tl->head() == tl->tail(),
+    "list is incorrectly constructed");
+  size_t count = verifyPrevFreePtrs(tl);
+  guarantee(count == (size_t)tl->count(), "Node count is incorrect");
+  if (tl->head() != NULL) {
+    tl->head_as_TreeChunk()->verifyTreeChunkList();
+  }
+  verifyTreeHelper(tl->left());
+  verifyTreeHelper(tl->right());
+}
+
+void BinaryTreeDictionary::verify() const {
+  verifyTree();
+  guarantee(totalSize() == totalSizeInTree(root()), "Total Size inconsistency");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,286 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)binaryTreeDictionary.hpp	1.26 07/05/05 17:05:41 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+/* 
+ * A binary tree based search structure for free blocks.
+ * This is currently used in the Concurrent Mark&Sweep implementation.
+ */
+
+// A TreeList is a FreeList which can be used to maintain a
+// binary tree of free lists.
+
+class TreeChunk;
+class BinaryTreeDictionary;
+class AscendTreeCensusClosure;
+class DescendTreeCensusClosure;
+class DescendTreeSearchClosure;
+
+class TreeList: public FreeList {
+  friend class TreeChunk;
+  friend class BinaryTreeDictionary;
+  friend class AscendTreeCensusClosure;
+  friend class DescendTreeCensusClosure;
+  friend class DescendTreeSearchClosure;
+  TreeList* _parent;
+  TreeList* _left;
+  TreeList* _right;
+
+ protected:
+  TreeList* parent() const { return _parent; }
+  TreeList* left()   const { return _left;   }
+  TreeList* right()  const { return _right;  }
+
+  // Accessors for links in tree.
+
+  void setLeft(TreeList* tl) {
+    _left   = tl;
+    if (tl != NULL)
+      tl->setParent(this);
+  }
+  void setRight(TreeList* tl) {
+    _right  = tl;
+    if (tl != NULL)
+      tl->setParent(this);
+  }
+  void setParent(TreeList* tl)  { _parent = tl;   }
+
+  void clearLeft()               { _left = NULL;   }
+  void clearRight()              { _right = NULL;  }
+  void clearParent()             { _parent = NULL; }
+  void initialize()		 { clearLeft(); clearRight(), clearParent(); }
+
+  // For constructing a TreeList from a Tree chunk or
+  // address and size.
+  static TreeList* as_TreeList(TreeChunk* tc);
+  static TreeList* as_TreeList(HeapWord* addr, size_t size);
+
+  // Returns the head of the free list as a pointer to a TreeChunk.
+  TreeChunk* head_as_TreeChunk();
+
+  // Returns the first available chunk in the free list as a pointer
+  // to a TreeChunk.
+  TreeChunk* first_available();
+
+  // removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
+  // If "tc" is the first chunk in the list, it is also the
+  // TreeList that is the node in the tree.  removeChunkReplaceIfNeeded()
+  // returns the possibly replaced TreeList* for the node in
+  // the tree.  It also updates the parent of the original
+  // node to point to the new node.
+  TreeList* removeChunkReplaceIfNeeded(TreeChunk* tc);
+  // See FreeList.
+  void returnChunkAtHead(TreeChunk* tc);
+  void returnChunkAtTail(TreeChunk* tc);
+};
+
+// A TreeChunk is a subclass of a FreeChunk that additionally
+// maintains a pointer to the free list on which it is currently
+// linked.  
+// A TreeChunk is also used as a node in the binary tree.  This
+// allows the binary tree to be maintained without any additional
+// storage (the free chunks are used).  In a binary tree the first
+// chunk in the free list is also the tree node.  Note that the
+// TreeChunk has an embedded TreeList for this purpose.  Because
+// the first chunk in the list is distinguished in this fashion
+// (also is the node in the tree), it is the last chunk to be found
+// on the free list for a node in the tree and is only removed if
+// it is the last chunk on the free list.
+
+class TreeChunk : public FreeChunk {
+  friend class TreeList;
+  TreeList* _list;
+  TreeList _embedded_list;  // if non-null, this chunk is on _list
+ protected:
+  TreeList* embedded_list() const { return (TreeList*) &_embedded_list; }
+  void set_embedded_list(TreeList* v) { _embedded_list = *v; }
+ public:
+  TreeList* list() { return _list; }
+  void set_list(TreeList* v) { _list = v; }
+  static TreeChunk* as_TreeChunk(FreeChunk* fc);
+  // Initialize fields in a TreeChunk that should be
+  // initialized when the TreeChunk is being added to
+  // a free list in the tree.
+  void initialize() { embedded_list()->initialize(); }
+
+  // debugging
+  void verifyTreeChunkList() const;
+};
+
+const size_t MIN_TREE_CHUNK_SIZE  = sizeof(TreeChunk)/HeapWordSize;
+
+class BinaryTreeDictionary: public FreeBlockDictionary {
+  bool       _splay;
+  size_t     _totalSize;
+  size_t     _totalFreeBlocks;
+  TreeList* _root;
+
+  // private accessors
+  bool splay() const { return _splay; }
+  void set_splay(bool v) { _splay = v; }
+  size_t totalSize() const { return _totalSize; }
+  void set_totalSize(size_t v) { _totalSize = v; }
+  virtual void inc_totalSize(size_t v);
+  virtual void dec_totalSize(size_t v);
+  size_t totalFreeBlocks() const { return _totalFreeBlocks; }
+  void set_totalFreeBlocks(size_t v) { _totalFreeBlocks = v; }
+  TreeList* root() const { return _root; }
+  void set_root(TreeList* v) { _root = v; }
+
+  // Remove a chunk of size "size" or larger from the tree and
+  // return it.  If the chunk 
+  // is the last chunk of that size, remove the node for that size 
+  // from the tree.
+  TreeChunk* getChunkFromTree(size_t size, Dither dither, bool splay);
+  // Return a list of the specified size or NULL from the tree.
+  // The list is not removed from the tree.
+  TreeList* findList (size_t size) const;
+  // Remove this chunk from the tree.  If the removal results
+  // in an empty list in the tree, remove the empty list.
+  TreeChunk* removeChunkFromTree(TreeChunk* tc);
+  // Remove the node in the trees starting at tl that has the
+  // minimum value and return it.  Repair the tree as needed.
+  TreeList* removeTreeMinimum(TreeList* tl);
+  void       semiSplayStep(TreeList* tl);
+  // Add this free chunk to the tree.
+  void       insertChunkInTree(FreeChunk* freeChunk);
+ public:
+  void       verifyTree() const;
+  // verify that the given chunk is in the tree.
+  bool       verifyChunkInFreeLists(FreeChunk* tc) const;
+ private:
+  void          verifyTreeHelper(TreeList* tl) const;
+  static size_t verifyPrevFreePtrs(TreeList* tl);
+
+  // Returns the total number of chunks in the list.
+  size_t     totalListLength(TreeList* tl) const;
+  // Returns the total number of words in the chunks in the tree
+  // starting at "tl".
+  size_t     totalSizeInTree(TreeList* tl) const;
+  // Returns the sum of the square of the size of each block
+  // in the tree starting at "tl".
+  double     sum_of_squared_block_sizes(TreeList* const tl) const;
+  // Returns the total number of free blocks in the tree starting
+  // at "tl".
+  size_t     totalFreeBlocksInTree(TreeList* tl) const;
+  size_t     numFreeBlocks() const;
+  size_t     treeHeight() const;
+  size_t     treeHeightHelper(TreeList* tl) const;
+  size_t     totalNodesInTree(TreeList* tl) const;
+  size_t     totalNodesHelper(TreeList* tl) const;
+
+ public:
+  // Constructor
+  BinaryTreeDictionary(MemRegion mr, bool splay = false);
+
+  // Reset the dictionary to the initial conditions with
+  // a single free chunk.
+  void	     reset(MemRegion mr);
+  void       reset(HeapWord* addr, size_t size);
+  // Reset the dictionary to be empty.
+  void       reset();
+
+  // Return a chunk of size "size" or greater from
+  // the tree.  
+  // want a better dynamic splay strategy for the future.
+  FreeChunk* getChunk(size_t size, Dither dither) {
+    verify_par_locked();
+    FreeChunk* res = getChunkFromTree(size, dither, splay());
+    assert(res == NULL || res->isFree(),
+           "Should be returning a free chunk");
+    return res;
+  }
+
+  void returnChunk(FreeChunk* chunk) {
+    verify_par_locked();
+    insertChunkInTree(chunk);
+  }
+
+  void removeChunk(FreeChunk* chunk) {
+    verify_par_locked();
+    removeChunkFromTree((TreeChunk*)chunk);
+    assert(chunk->isFree(), "Should still be a free chunk");
+  }
+
+  size_t     maxChunkSize() const;
+  size_t     totalChunkSize(debug_only(const Mutex* lock)) const {
+    debug_only(
+      if (lock != NULL && lock->owned_by_self()) {
+        assert(totalSizeInTree(root()) == totalSize(),
+               "_totalSize inconsistency");
+      }
+    )
+    return totalSize();
+  }
+
+  size_t     minSize() const {
+    return MIN_TREE_CHUNK_SIZE;
+  }
+
+  double     sum_of_squared_block_sizes() const {
+    return sum_of_squared_block_sizes(root());
+  }
+
+  FreeChunk* find_chunk_ends_at(HeapWord* target) const;
+
+  // Find the list with size "size" in the binary tree and update
+  // the statistics in the list according to "split" (chunk was
+  // split or coalesce) and "birth" (chunk was added or removed).
+  void       dictCensusUpdate(size_t size, bool split, bool birth);
+  // Return true if the dictionary is overpopulated (more chunks of
+  // this size than desired) for size "size".
+  bool       coalDictOverPopulated(size_t size);
+  // Methods called at the beginning of a sweep to prepare the
+  // statistics for the sweep.
+  void       beginSweepDictCensus(double coalSurplusPercent,
+                                  float sweep_current,
+                                  float sweep_estimate);
+  // Methods called after the end of a sweep to modify the
+  // statistics for the sweep.
+  void       endSweepDictCensus(double splitSurplusPercent);
+  // Return the largest free chunk in the tree.
+  FreeChunk* findLargestDict() const;
+  // Accessors for statistics
+  void 	     setTreeSurplus(double splitSurplusPercent);
+  void 	     setTreeHints(void);
+  // Reset statistics for all the lists in the tree.
+  void	     clearTreeCensus(void);
+  // Print the statistcis for all the lists in the tree.  Also may
+  // print out summaries.
+  void	     printDictCensus(void) const;
+
+  // For debugging.  Returns the sum of the _returnedBytes for
+  // all lists in the tree.
+  size_t     sumDictReturnedBytes()	PRODUCT_RETURN0;
+  // Sets the _returnedBytes for all the lists in the tree to zero.
+  void	     initializeDictReturnedBytes()	PRODUCT_RETURN;
+  // For debugging.  Return the total number of chunks in the dictionary.
+  size_t     totalCount()	PRODUCT_RETURN0;
+
+  void       reportStatistics() const;
+
+  void       verify() const;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,99 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)cmsLockVerifier.cpp	1.14 07/05/05 17:05:44 JVM"
+#endif
+/*
+ * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_cmsLockVerifier.cpp.incl"
+
+///////////// Locking verification specific to CMS //////////////
+// Much like "assert_lock_strong()", except that it relaxes the
+// assertion somewhat for the parallel GC case, where VM thread
+// or the CMS thread might hold the lock on behalf of the parallel
+// threads. The second argument is in support of an extra locking
+// check for CFL spaces' free list locks.
+#ifndef PRODUCT
+void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
+  if (!Universe::is_fully_initialized()) {
+    return;
+  }
+
+  Thread* myThread = Thread::current();
+
+  if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
+    assert(p_lock == NULL, "Unexpected state");
+    if (myThread->is_ConcurrentGC_thread()) {
+      // This test might have to change in the future, if there can be
+      // multiple peer CMS threads.  But for now, if we're testing the CMS
+      assert(myThread == ConcurrentMarkSweepThread::cmst(),
+	     "In CMS, CMS thread is the only Conc GC thread.");
+      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+             "CMS thread should have CMS token");
+    } else if (myThread->is_VM_thread()) {
+      assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
+             "VM thread should have CMS token");
+    } else {
+      // Token should be held on our behalf by one of the other
+      // of CMS or VM thread; not enough easily testable
+      // state info to test which here.
+      assert(myThread->is_GC_task_thread(), "Unexpected thread type");
+    }
+    return;
+  } 
+
+  if (ParallelGCThreads == 0) {
+    assert_lock_strong(lock);
+  } else {
+    if (myThread->is_VM_thread()
+        || myThread->is_ConcurrentGC_thread()
+        || myThread->is_Java_thread()) {
+      // Make sure that we are holding the associated lock.
+      assert_lock_strong(lock);
+      // The checking of p_lock is a spl case for CFLS' free list
+      // locks: we make sure that none of the parallel GC work gang
+      // threads are holding "sub-locks" of freeListLock(). We check only
+      // the parDictionaryAllocLock because the others are too numerous.
+      // This spl case code is somewhat ugly and any improvements
+      // are welcome XXX FIX ME!!
+      if (p_lock != NULL) {
+        assert(!p_lock->is_locked() || p_lock->owned_by_self(),
+               "Possible race between this and parallel GC threads");
+      }
+    } else if (myThread->is_GC_task_thread()) {
+      // Make sure that the VM or CMS thread holds lock on our behalf
+      // XXX If there were a concept of a gang_master for a (set of)
+      // gang_workers, we could have used the identity of that thread
+      // for checking ownership here; for now we just disjunct.
+      assert(lock->owner() == VMThread::vm_thread() ||
+             lock->owner() == ConcurrentMarkSweepThread::cmst(),
+             "Should be locked by VM thread or CMS thread on my behalf");
+    } else {
+      // Make sure we didn't miss some obscure corner case
+      ShouldNotReachHere();
+    }
+  }
+}
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,40 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)cmsLockVerifier.hpp	1.9 07/05/05 17:05:44 JVM"
+#endif
+/*
+ * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+///////////// Locking verification specific to CMS //////////////
+// Much like "assert_lock_strong()", except
+// that it relaxes the assertion somewhat for the parallel GC case, where
+// main GC thread or the CMS thread might hold the lock on behalf of
+// the parallel threads.
+class CMSLockVerifier: AllStatic {
+ public:
+  static void assert_locked(const Mutex* lock, const Mutex* p_lock)
+    PRODUCT_RETURN;
+  static void assert_locked(const Mutex* lock) {
+    assert_locked(lock, NULL);
+  }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,2849 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)compactibleFreeListSpace.cpp	1.142 07/05/17 15:50:57 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_compactibleFreeListSpace.cpp.incl"
+
+/////////////////////////////////////////////////////////////////////////
+//// CompactibleFreeListSpace
+/////////////////////////////////////////////////////////////////////////
+
+// highest ranked  free list lock rank
+int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
+
+// Constructor
+CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
+  MemRegion mr, bool use_adaptive_freelists,
+  FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
+  _dictionaryChoice(dictionaryChoice),
+  _adaptive_freelists(use_adaptive_freelists),
+  _bt(bs, mr),
+  // free list locks are in the range of values taken by _lockRank
+  // This range currently is [_leaf+2, _leaf+3]
+  // Note: this requires that CFLspace c'tors
+  // are called serially in the order in which the locks are
+  // are acquired in the program text. This is true today.
+  _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
+  _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
+			  "CompactibleFreeListSpace._dict_par_lock", true),
+  _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+                    CMSRescanMultiple),
+  _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+                    CMSConcMarkMultiple),
+  _collector(NULL)
+{
+  _bt.set_space(this);
+  initialize(mr, true);
+  // We have all of "mr", all of which we place in the dictionary
+  // as one big chunk. We'll need to decide here which of several
+  // possible alternative dictionary implementations to use. For
+  // now the choice is easy, since we have only one working
+  // implementation, namely, the simple binary tree (splaying
+  // temporarily disabled).
+  switch (dictionaryChoice) {
+    case FreeBlockDictionary::dictionaryBinaryTree:
+      _dictionary = new BinaryTreeDictionary(mr);
+      break;
+    case FreeBlockDictionary::dictionarySplayTree:
+    case FreeBlockDictionary::dictionarySkipList:
+    default:
+      warning("dictionaryChoice: selected option not understood; using"
+              " default BinaryTreeDictionary implementation instead.");
+      _dictionary = new BinaryTreeDictionary(mr);
+      break;
+  }
+  splitBirth(mr.word_size());
+  assert(_dictionary != NULL, "CMS dictionary initialization");
+  // The indexed free lists are initially all empty and are lazily
+  // filled in on demand. Initialize the array elements to NULL.
+  initializeIndexedFreeListArray();
+
+  // Not using adaptive free lists assumes that allocation is first
+  // from the linAB's.  Also a cms perm gen which can be compacted
+  // has to have the klass's klassKlass allocated at a lower
+  // address in the heap than the klass so that the klassKlass is
+  // moved to its new location before the klass is moved.
+  // Set the _refillSize for the linear allocation blocks
+  if (!use_adaptive_freelists) {
+    FreeChunk* fc = _dictionary->getChunk(mr.word_size());
+    // The small linAB initially has all the space and will allocate
+    // a chunk of any size.
+    HeapWord* addr = (HeapWord*) fc;
+    _smallLinearAllocBlock.set(addr, fc->size() , 
+      1024*SmallForLinearAlloc, fc->size());
+    // Note that _unallocated_block is not updated here.
+    // Allocations from the linear allocation block should
+    // update it.
+  } else {
+    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, 
+			       SmallForLinearAlloc);
+  }
+  // CMSIndexedFreeListReplenish should be at least 1
+  CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
+  _promoInfo.setSpace(this);
+  if (UseCMSBestFit) {
+    _fitStrategy = FreeBlockBestFitFirst;
+  } else {
+    _fitStrategy = FreeBlockStrategyNone;
+  }
+  checkFreeListConsistency();
+
+  // Initialize locks for parallel case.
+  if (ParallelGCThreads > 0) {
+    for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+      _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
+					      "a freelist par lock",
+					      true);
+      if (_indexedFreeListParLocks[i] == NULL) 
+	vm_exit_during_initialization("Could not allocate a par lock");
+      DEBUG_ONLY(
+        _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
+      )
+    }
+    _dictionary->set_par_lock(&_parDictionaryAllocLock);
+  }
+}
+
+// Like CompactibleSpace forward() but always calls cross_threshold() to
+// update the block offset table.  Removed initialize_threshold call because
+// CFLS does not use a block offset array for contiguous spaces.
+HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, 
+				    CompactPoint* cp, HeapWord* compact_top) {
+  // q is alive
+  // First check if we should switch compaction space
+  assert(this == cp->space, "'this' should be current compaction space.");
+  size_t compaction_max_size = pointer_delta(end(), compact_top);
+  assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
+    "virtual adjustObjectSize_v() method is not correct");
+  size_t adjusted_size = adjustObjectSize(size);
+  assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
+         "no small fragments allowed");
+  assert(minimum_free_block_size() == MinChunkSize,
+         "for de-virtualized reference below");
+  // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
+  if (adjusted_size + MinChunkSize > compaction_max_size &&
+      adjusted_size != compaction_max_size) {
+    do {
+      // switch to next compaction space
+      cp->space->set_compaction_top(compact_top);
+      cp->space = cp->space->next_compaction_space();
+      if (cp->space == NULL) {
+        cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
+        assert(cp->gen != NULL, "compaction must succeed");
+        cp->space = cp->gen->first_compaction_space();
+        assert(cp->space != NULL, "generation must have a first compaction space");
+      }
+      compact_top = cp->space->bottom();
+      cp->space->set_compaction_top(compact_top);
+      // The correct adjusted_size may not be the same as that for this method
+      // (i.e., cp->space may no longer be "this" so adjust the size again.
+      // Use the virtual method which is not used above to save the virtual
+      // dispatch.
+      adjusted_size = cp->space->adjust_object_size_v(size);
+      compaction_max_size = pointer_delta(cp->space->end(), compact_top);
+      assert(cp->space->minimum_free_block_size() == 0, "just checking");
+    } while (adjusted_size > compaction_max_size);
+  }
+
+  // store the forwarding pointer into the mark word
+  if ((HeapWord*)q != compact_top) {
+    q->forward_to(oop(compact_top));
+    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
+  } else {
+    // if the object isn't moving we can just set the mark to the default
+    // mark and handle it specially later on.  
+    q->init_mark();
+    assert(q->forwardee() == NULL, "should be forwarded to NULL");
+  }
+
+  debug_only(MarkSweep::register_live_oop(q, adjusted_size));
+  compact_top += adjusted_size;
+
+  // we need to update the offset table so that the beginnings of objects can be
+  // found during scavenge.  Note that we are updating the offset table based on
+  // where the object will be once the compaction phase finishes.
+
+  // Always call cross_threshold().  A contiguous space can only call it when
+  // the compaction_top exceeds the current threshold but not for an
+  // non-contiguous space.
+  cp->threshold =
+    cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
+  return compact_top;
+}
+
+// A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
+// and use of single_block instead of alloc_block.  The name here is not really
+// appropriate - maybe a more general name could be invented for both the
+// contiguous and noncontiguous spaces.
+
+HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
+  _bt.single_block(start, the_end);
+  return end();
+}
+
+// Initialize them to NULL.
+void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
+  for (size_t i = 0; i < IndexSetSize; i++) {
+    // Note that on platforms where objects are double word aligned,
+    // the odd array elements are not used.  It is convenient, however,
+    // to map directly from the object size to the array element.
+    _indexedFreeList[i].reset(IndexSetSize);
+    _indexedFreeList[i].set_size(i);
+    assert(_indexedFreeList[i].count() == 0, "reset check failed");
+    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
+    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
+    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
+  }
+}
+
+void CompactibleFreeListSpace::resetIndexedFreeListArray() {
+  for (int i = 1; i < IndexSetSize; i++) {
+    assert(_indexedFreeList[i].size() == (size_t) i, 
+      "Indexed free list sizes are incorrect");
+    _indexedFreeList[i].reset(IndexSetSize);
+    assert(_indexedFreeList[i].count() == 0, "reset check failed");
+    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
+    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
+    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
+  }
+}
+
+void CompactibleFreeListSpace::reset(MemRegion mr) {
+  resetIndexedFreeListArray();
+  dictionary()->reset();
+  if (BlockOffsetArrayUseUnallocatedBlock) {
+    assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
+    // Everything's allocated until proven otherwise.
+    _bt.set_unallocated_block(end());
+  }
+  if (!mr.is_empty()) {
+    assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
+    _bt.single_block(mr.start(), mr.word_size());
+    FreeChunk* fc = (FreeChunk*) mr.start();
+    fc->setSize(mr.word_size());
+    if (mr.word_size() >= IndexSetSize ) {
+      returnChunkToDictionary(fc);
+    } else {
+      _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
+      _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
+    }
+  }
+  _promoInfo.reset();
+  _smallLinearAllocBlock._ptr = NULL;
+  _smallLinearAllocBlock._word_size = 0;
+}
+
+void CompactibleFreeListSpace::reset_after_compaction() {
+  // Reset the space to the new reality - one free chunk.
+  MemRegion mr(compaction_top(), end());
+  reset(mr);
+  // Now refill the linear allocation block(s) if possible.
+  if (_adaptive_freelists) {
+    refillLinearAllocBlocksIfNeeded();
+  } else {
+    // Place as much of mr in the linAB as we can get,
+    // provided it was big enough to go into the dictionary.
+    FreeChunk* fc = dictionary()->findLargestDict();
+    if (fc != NULL) {
+      assert(fc->size() == mr.word_size(),
+             "Why was the chunk broken up?");
+      removeChunkFromDictionary(fc);
+      HeapWord* addr = (HeapWord*) fc;
+      _smallLinearAllocBlock.set(addr, fc->size() ,
+        1024*SmallForLinearAlloc, fc->size());
+      // Note that _unallocated_block is not updated here.
+    }
+  }
+}
+
+// Walks the entire dictionary, returning a coterminal
+// chunk, if it exists. Use with caution since it involves
+// a potentially complete walk of a potentially large tree.
+FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
+
+  assert_lock_strong(&_freelistLock);
+
+  return dictionary()->find_chunk_ends_at(end());
+}
+
+
+#ifndef PRODUCT
+void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
+  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
+  }
+}
+
+size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
+  size_t sum = 0;
+  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
+  }
+  return sum;
+}
+
+size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
+  size_t count = 0;
+  for (int i = MinChunkSize; i < IndexSetSize; i++) {
+    debug_only(
+      ssize_t total_list_count = 0;
+      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
+         fc = fc->next()) {
+        total_list_count++;
+      }
+      assert(total_list_count ==  _indexedFreeList[i].count(),
+	"Count in list is incorrect");
+    )
+    count += _indexedFreeList[i].count();
+  }
+  return count;
+}
+
+size_t CompactibleFreeListSpace::totalCount() {
+  size_t num = totalCountInIndexedFreeLists();
+  num +=  dictionary()->totalCount();
+  if (_smallLinearAllocBlock._word_size != 0) {
+    num++;
+  }
+  return num;
+}
+#endif
+
+bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
+  FreeChunk* fc = (FreeChunk*) p;
+  return fc->isFree();
+}
+
+size_t CompactibleFreeListSpace::used() const {
+  return capacity() - free();
+}
+
+size_t CompactibleFreeListSpace::free() const {
+  // "MT-safe, but not MT-precise"(TM), if you will: i.e.
+  // if you do this while the structures are in flux you
+  // may get an approximate answer only; for instance
+  // because there is concurrent allocation either
+  // directly by mutators or for promotion during a GC.
+  // It's "MT-safe", however, in the sense that you are guaranteed
+  // not to crash and burn, for instance, because of walking
+  // pointers that could disappear as you were walking them.
+  // The approximation is because the various components
+  // that are read below are not read atomically (and
+  // further the computation of totalSizeInIndexedFreeLists()
+  // is itself a non-atomic computation. The normal use of
+  // this is during a resize operation at the end of GC
+  // and at that time you are guaranteed to get the
+  // correct actual value. However, for instance, this is
+  // also read completely asynchronously by the "perf-sampler"
+  // that supports jvmstat, and you are apt to see the values
+  // flicker in such cases.
+  assert(_dictionary != NULL, "No _dictionary?");
+  return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
+          totalSizeInIndexedFreeLists() +
+          _smallLinearAllocBlock._word_size) * HeapWordSize;
+}
+
+size_t CompactibleFreeListSpace::max_alloc_in_words() const {
+  assert(_dictionary != NULL, "No _dictionary?");
+  assert_locked();
+  size_t res = _dictionary->maxChunkSize();
+  res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
+                       (size_t) SmallForLinearAlloc - 1));
+  // XXX the following could potentially be pretty slow;
+  // should one, pesimally for the rare cases when res
+  // caclulated above is less than IndexSetSize,
+  // just return res calculated above? My reasoning was that
+  // those cases will be so rare that the extra time spent doesn't
+  // really matter....
+  // Note: do not change the loop test i >= res + IndexSetStride
+  // to i > res below, because i is unsigned and res may be zero.
+  for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
+       i -= IndexSetStride) {
+    if (_indexedFreeList[i].head() != NULL) {
+      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
+      return i;
+    }
+  }
+  return res;
+}
+
+void CompactibleFreeListSpace::reportFreeListStatistics() const {
+  assert_lock_strong(&_freelistLock);
+  assert(PrintFLSStatistics != 0, "Reporting error");
+  _dictionary->reportStatistics();
+  if (PrintFLSStatistics > 1) {
+    reportIndexedFreeListStatistics();
+    size_t totalSize = totalSizeInIndexedFreeLists() +
+                       _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
+    gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
+  }
+}
+
+void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
+  assert_lock_strong(&_freelistLock);
+  gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
+                      "--------------------------------\n");
+  size_t totalSize = totalSizeInIndexedFreeLists();
+  size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
+  gclog_or_tty->print("Total Free Space: %d\n", totalSize);
+  gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
+  gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
+  if (freeBlocks != 0) {
+    gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
+  }
+}
+
+size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
+  size_t res = 0;
+  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    debug_only(
+      ssize_t recount = 0;
+      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
+         fc = fc->next()) {
+        recount += 1;
+      }
+      assert(recount == _indexedFreeList[i].count(), 
+	"Incorrect count in list");
+    )
+    res += _indexedFreeList[i].count();
+  }
+  return res;
+}
+
+size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
+  for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
+    if (_indexedFreeList[i].head() != NULL) {
+      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
+      return (size_t)i;
+    }
+  }
+  return 0;
+}
+
+void CompactibleFreeListSpace::set_end(HeapWord* value) {
+  HeapWord* prevEnd = end();
+  assert(prevEnd != value, "unnecessary set_end call");
+  assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
+  _end = value;
+  if (prevEnd != NULL) {
+    // Resize the underlying block offset table.
+    _bt.resize(pointer_delta(value, bottom()));
+  if (value <= prevEnd) {
+    assert(value >= unallocated_block(), "New end is below unallocated block");
+  } else {
+    // Now, take this new chunk and add it to the free blocks.
+    // Note that the BOT has not yet been updated for this block.
+    size_t newFcSize = pointer_delta(value, prevEnd);
+    // XXX This is REALLY UGLY and should be fixed up. XXX
+    if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
+      // Mark the boundary of the new block in BOT
+      _bt.mark_block(prevEnd, value);
+      // put it all in the linAB
+      if (ParallelGCThreads == 0) {
+        _smallLinearAllocBlock._ptr = prevEnd;
+        _smallLinearAllocBlock._word_size = newFcSize;
+        repairLinearAllocBlock(&_smallLinearAllocBlock);
+      } else { // ParallelGCThreads > 0
+        MutexLockerEx x(parDictionaryAllocLock(),
+                        Mutex::_no_safepoint_check_flag);
+        _smallLinearAllocBlock._ptr = prevEnd;
+        _smallLinearAllocBlock._word_size = newFcSize;
+        repairLinearAllocBlock(&_smallLinearAllocBlock);
+      }
+      // Births of chunks put into a LinAB are not recorded.  Births
+      // of chunks as they are allocated out of a LinAB are.
+    } else {
+      // Add the block to the free lists, if possible coalescing it
+      // with the last free block, and update the BOT and census data.
+      addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
+    }
+  }
+  }
+}
+
+class FreeListSpace_DCTOC : public Filtering_DCTOC {
+  CompactibleFreeListSpace* _cfls;
+  CMSCollector* _collector;
+protected:
+  // Override.
+#define walk_mem_region_with_cl_DECL(ClosureType)                       \
+  virtual void walk_mem_region_with_cl(MemRegion mr,                    \
+				       HeapWord* bottom, HeapWord* top, \
+				       ClosureType* cl);                \
+      void walk_mem_region_with_cl_par(MemRegion mr,                    \
+				       HeapWord* bottom, HeapWord* top, \
+				       ClosureType* cl);                \
+    void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
+				       HeapWord* bottom, HeapWord* top, \
+				       ClosureType* cl)
+  walk_mem_region_with_cl_DECL(OopClosure);
+  walk_mem_region_with_cl_DECL(FilteringClosure);
+
+public:
+  FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
+                      CMSCollector* collector,
+                      OopClosure* cl,
+  		      CardTableModRefBS::PrecisionStyle precision,
+		      HeapWord* boundary) :
+    Filtering_DCTOC(sp, cl, precision, boundary),
+    _cfls(sp), _collector(collector) {}
+};
+
+// We de-virtualize the block-related calls below, since we know that our
+// space is a CompactibleFreeListSpace.
+#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
+void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
+						 HeapWord* bottom,              \
+						 HeapWord* top,                 \
+						 ClosureType* cl) {             \
+   if (SharedHeap::heap()->n_par_threads() > 0) {                               \
+     walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
+   } else {                                                                     \
+     walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
+   }                                                                            \
+}                                                                               \
+void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
+						      HeapWord* bottom,         \
+						      HeapWord* top,            \
+						      ClosureType* cl) {        \
+  /* Skip parts that are before "mr", in case "block_start" sent us             \
+     back too far. */                                                           \
+  HeapWord* mr_start = mr.start();                                              \
+  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
+  HeapWord* next = bottom + bot_size;                                           \
+  while (next < mr_start) {                                                     \
+    bottom = next;                                                              \
+    bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
+    next = bottom + bot_size;                                                   \
+  }                                                                             \
+                                                                                \
+  while (bottom < top) {                                                        \
+    if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
+	!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
+                    oop(bottom)) &&                                             \
+	!_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
+      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      bottom += _cfls->adjustObjectSize(word_sz);                               \
+    } else {                                                                    \
+      bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
+    }                                                                           \
+  }                                                                             \
+}                                                                               \
+void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
+						        HeapWord* bottom,       \
+						        HeapWord* top,          \
+						        ClosureType* cl) {      \
+  /* Skip parts that are before "mr", in case "block_start" sent us             \
+     back too far. */                                                           \
+  HeapWord* mr_start = mr.start();                                              \
+  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
+  HeapWord* next = bottom + bot_size;                                           \
+  while (next < mr_start) {                                                     \
+    bottom = next;                                                              \
+    bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
+    next = bottom + bot_size;                                                   \
+  }                                                                             \
+                                                                                \
+  while (bottom < top) {                                                        \
+    if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
+	!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
+                    oop(bottom)) &&                                             \
+	!_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
+      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      bottom += _cfls->adjustObjectSize(word_sz);                               \
+    } else {                                                                    \
+      bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
+    }                                                                           \
+  }                                                                             \
+}
+
+// (There are only two of these, rather than N, because the split is due
+// only to the introduction of the FilteringClosure, a local part of the
+// impl of this abstraction.)
+FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
+FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
+
+DirtyCardToOopClosure*
+CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
+				      CardTableModRefBS::PrecisionStyle precision,
+				      HeapWord* boundary) {
+  return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
+}
+
+
+// Note on locking for the space iteration functions:
+// since the collector's iteration activities are concurrent with
+// allocation activities by mutators, absent a suitable mutual exclusion
+// mechanism the iterators may go awry. For instace a block being iterated
+// may suddenly be allocated or divided up and part of it allocated and
+// so on.
+
+// Apply the given closure to each block in the space.
+void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
+  assert_lock_strong(freelistLock());
+  HeapWord *cur, *limit;
+  for (cur = bottom(), limit = end(); cur < limit;
+       cur += cl->do_blk_careful(cur));
+}
+
+// Apply the given closure to each block in the space.
+void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
+  assert_lock_strong(freelistLock());
+  HeapWord *cur, *limit;
+  for (cur = bottom(), limit = end(); cur < limit;
+       cur += cl->do_blk(cur));
+}
+
+// Apply the given closure to each oop in the space.
+void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
+  assert_lock_strong(freelistLock());
+  HeapWord *cur, *limit;
+  size_t curSize;
+  for (cur = bottom(), limit = end(); cur < limit;
+       cur += curSize) {
+    curSize = block_size(cur);
+    if (block_is_obj(cur)) {
+      oop(cur)->oop_iterate(cl);
+    }
+  }
+}
+
+// Apply the given closure to each oop in the space \intersect memory region.
+void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
+  assert_lock_strong(freelistLock());
+  if (is_empty()) {
+    return;
+  }
+  MemRegion cur = MemRegion(bottom(), end());
+  mr = mr.intersection(cur);
+  if (mr.is_empty()) {
+    return;
+  }
+  if (mr.equals(cur)) {
+    oop_iterate(cl);
+    return;
+  }
+  assert(mr.end() <= end(), "just took an intersection above");
+  HeapWord* obj_addr = block_start(mr.start());
+  HeapWord* t = mr.end();
+
+  SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
+  if (block_is_obj(obj_addr)) {
+    // Handle first object specially.
+    oop obj = oop(obj_addr);
+    obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
+  } else {
+    FreeChunk* fc = (FreeChunk*)obj_addr;
+    obj_addr += fc->size();
+  }
+  while (obj_addr < t) {
+    HeapWord* obj = obj_addr;
+    obj_addr += block_size(obj_addr);
+    // If "obj_addr" is not greater than top, then the
+    // entire object "obj" is within the region.
+    if (obj_addr <= t) {    
+      if (block_is_obj(obj)) {
+        oop(obj)->oop_iterate(cl);
+      }               
+    } else {
+      // "obj" extends beyond end of region
+      if (block_is_obj(obj)) {
+        oop(obj)->oop_iterate(&smr_blk);
+      }    
+      break;
+    }
+  }
+}
+
+// NOTE: In the following methods, in order to safely be able to
+// apply the closure to an object, we need to be sure that the
+// object has been initialized. We are guaranteed that an object
+// is initialized if we are holding the Heap_lock with the
+// world stopped.
+void CompactibleFreeListSpace::verify_objects_initialized() const {
+  if (is_init_completed()) {
+    assert_locked_or_safepoint(Heap_lock);
+    if (Universe::is_fully_initialized()) {
+      guarantee(SafepointSynchronize::is_at_safepoint(),
+                "Required for objects to be initialized");
+    }
+  } // else make a concession at vm start-up
+}
+
+// Apply the given closure to each object in the space
+void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
+  assert_lock_strong(freelistLock());
+  NOT_PRODUCT(verify_objects_initialized());
+  HeapWord *cur, *limit;
+  size_t curSize;
+  for (cur = bottom(), limit = end(); cur < limit;
+       cur += curSize) {
+    curSize = block_size(cur);
+    if (block_is_obj(cur)) {
+      blk->do_object(oop(cur));
+    }
+  }
+}
+
+void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
+                                                  UpwardsObjectClosure* cl) {
+  assert_locked();
+  NOT_PRODUCT(verify_objects_initialized());
+  Space::object_iterate_mem(mr, cl);
+}
+
+// Callers of this iterator beware: The closure application should
+// be robust in the face of uninitialized objects and should (always)
+// return a correct size so that the next addr + size below gives us a
+// valid block boundary. [See for instance,
+// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
+// in ConcurrentMarkSweepGeneration.cpp.]
+HeapWord*
+CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
+  assert_lock_strong(freelistLock());
+  HeapWord *addr, *last;
+  size_t size;
+  for (addr = bottom(), last  = end();
+       addr < last; addr += size) {
+    FreeChunk* fc = (FreeChunk*)addr;
+    if (fc->isFree()) {
+      // Since we hold the free list lock, which protects direct
+      // allocation in this generation by mutators, a free object
+      // will remain free throughout this iteration code.
+      size = fc->size();
+    } else {
+      // Note that the object need not necessarily be initialized,
+      // because (for instance) the free list lock does NOT protect
+      // object initialization. The closure application below must
+      // therefore be correct in the face of uninitialized objects.
+      size = cl->do_object_careful(oop(addr));
+      if (size == 0) {
+        // An unparsable object found. Signal early termination.
+        return addr;
+      }
+    }
+  }
+  return NULL;
+}
+
+// Callers of this iterator beware: The closure application should
+// be robust in the face of uninitialized objects and should (always)
+// return a correct size so that the next addr + size below gives us a
+// valid block boundary. [See for instance,
+// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
+// in ConcurrentMarkSweepGeneration.cpp.]
+HeapWord*
+CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
+  ObjectClosureCareful* cl) {
+  assert_lock_strong(freelistLock());
+  // Can't use used_region() below because it may not necessarily
+  // be the same as [bottom(),end()); although we could
+  // use [used_region().start(),round_to(used_region().end(),CardSize)),
+  // that appears too cumbersome, so we just do the simpler check
+  // in the assertion below.
+  assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
+         "mr should be non-empty and within used space");
+  HeapWord *addr, *end;
+  size_t size;
+  for (addr = block_start_careful(mr.start()), end  = mr.end();
+       addr < end; addr += size) {
+    FreeChunk* fc = (FreeChunk*)addr;
+    if (fc->isFree()) {
+      // Since we hold the free list lock, which protects direct
+      // allocation in this generation by mutators, a free object
+      // will remain free throughout this iteration code.
+      size = fc->size();
+    } else {
+      // Note that the object need not necessarily be initialized,
+      // because (for instance) the free list lock does NOT protect
+      // object initialization. The closure application below must
+      // therefore be correct in the face of uninitialized objects.
+      size = cl->do_object_careful_m(oop(addr), mr);
+      if (size == 0) {
+        // An unparsable object found. Signal early termination.
+        return addr;
+      }
+    }
+  }
+  return NULL;
+}
+
+
+HeapWord* CompactibleFreeListSpace::block_start(const void* p) const {
+  NOT_PRODUCT(verify_objects_initialized());
+  return _bt.block_start(p);
+}
+
+HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
+  return _bt.block_start_careful(p);
+}
+
+size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
+  NOT_PRODUCT(verify_objects_initialized());
+  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
+  // This must be volatile, or else there is a danger that the compiler
+  // will compile the code below into a sometimes-infinite loop, by keeping 
+  // the value read the first time in a register.
+  oop o = (oop)p;
+  volatile oop* second_word_addr = o->klass_addr();
+  while (true) {
+    klassOop k = (klassOop)(*second_word_addr);
+    // We must do this until we get a consistent view of the object.
+    if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
+      FreeChunk* fc = (FreeChunk*)p;
+      volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
+      size_t res = (*sz_addr);
+      klassOop k2 = (klassOop)(*second_word_addr);  // Read to confirm.
+      if (k == k2) {
+        assert(res != 0, "Block size should not be 0");
+        return res;
+      }
+    } else if (k != NULL) {
+      assert(k->is_oop(), "Should really be klass oop.");
+      assert(o->is_parsable(), "Should be parsable");
+      assert(o->is_oop(), "Should be an oop.");
+      size_t res = o->size_given_klass(k->klass_part());
+      res = adjustObjectSize(res);
+      assert(res != 0, "Block size should not be 0");
+      return res;
+    }
+  }
+}
+
+// A variant of the above that uses the Printezis bits for
+// unparsable but allocated objects. This avoids any possible
+// stalls waiting for mutators to initialize objects, and is
+// thus potentially faster than the variant above. However,
+// this variant may return a zero size for a block that is
+// under mutation and for which a consistent size cannot be
+// inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
+size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
+                                                     const CMSCollector* c)
+const {
+  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
+  // This must be volatile, or else there is a danger that the compiler
+  // will compile the code below into a sometimes-infinite loop, by keeping
+  // the value read the first time in a register.
+  oop o = (oop)p;
+  volatile oop* second_word_addr = o->klass_addr();
+  DEBUG_ONLY(uint loops = 0;)
+  while (true) {
+    klassOop k = (klassOop)(*second_word_addr);
+    // We must do this until we get a consistent view of the object.
+    if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
+      FreeChunk* fc = (FreeChunk*)p;
+      volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
+      size_t res = (*sz_addr);
+      klassOop k2 = (klassOop)(*second_word_addr);  // Read to confirm.
+      if (k == k2) {
+        assert(res != 0, "Block size should not be 0");
+        assert(loops == 0, "Should be 0");
+        return res;
+      }
+    } else if (k != NULL && o->is_parsable()) {
+      assert(k->is_oop(), "Should really be klass oop.");
+      assert(o->is_oop(), "Should be an oop");
+      size_t res = o->size_given_klass(k->klass_part());
+      res = adjustObjectSize(res);
+      assert(res != 0, "Block size should not be 0");
+      return res;
+    } else {
+      return c->block_size_if_printezis_bits(p);
+    }
+    assert(loops == 0, "Can loop at most once");
+    DEBUG_ONLY(loops++;)
+  }
+}
+
+size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
+  NOT_PRODUCT(verify_objects_initialized());
+  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
+  FreeChunk* fc = (FreeChunk*)p;
+  if (fc->isFree()) {
+    return fc->size();
+  } else {
+    // Ignore mark word because this may be a recently promoted
+    // object whose mark word is used to chain together grey
+    // objects (the last one would have a null value).
+    assert(oop(p)->is_oop(true), "Should be an oop");
+    return adjustObjectSize(oop(p)->size());
+  }
+}
+
+// This implementation assumes that the property of "being an object" is
+// stable.  But being a free chunk may not be (because of parallel
+// promotion.)
+bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
+  FreeChunk* fc = (FreeChunk*)p;
+  assert(is_in_reserved(p), "Should be in space");
+  // When doing a mark-sweep-compact of the CMS generation, this
+  // assertion may fail because prepare_for_compaction() uses
+  // space that is garbage to maintain information on ranges of
+  // live objects so that these live ranges can be moved as a whole.
+  // Comment out this assertion until that problem can be solved
+  // (i.e., that the block start calculation may look at objects
+  // at address below "p" in finding the object that contains "p"
+  // and those objects (if garbage) may have been modified to hold
+  // live range information.
+  // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
+  klassOop k = oop(p)->klass();
+  intptr_t ki = (intptr_t)k;
+  if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
+  if (k != NULL) {
+    // Ignore mark word because it may have been used to
+    // chain together promoted objects (the last one
+    // would have a null value).
+    assert(oop(p)->is_oop(true), "Should be an oop");
+    return true;
+  } else {
+    return false;  // Was not an object at the start of collection.
+  }
+}
+
+// Check if the object is alive. This fact is checked either by consulting
+// the main marking bitmap in the sweeping phase or, if it's a permanent
+// generation and we're not in the sweeping phase, by checking the
+// perm_gen_verify_bit_map where we store the "deadness" information if
+// we did not sweep the perm gen in the most recent previous GC cycle.
+bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
+  assert (block_is_obj(p), "The address should point to an object");
+
+  // If we're sweeping, we use object liveness information from the main bit map
+  // for both perm gen and old gen.
+  // We don't need to lock the bitmap (live_map or dead_map below), because
+  // EITHER we are in the middle of the sweeping phase, and the
+  // main marking bit map (live_map below) is locked,
+  // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
+  // is stable, because it's mutated only in the sweeping phase.
+  if (_collector->abstract_state() == CMSCollector::Sweeping) {
+    CMSBitMap* live_map = _collector->markBitMap();
+    return live_map->isMarked((HeapWord*) p);
+  } else {
+    // If we're not currently sweeping and we haven't swept the perm gen in
+    // the previous concurrent cycle then we may have dead but unswept objects
+    // in the perm gen. In this case, we use the "deadness" information
+    // that we had saved in perm_gen_verify_bit_map at the last sweep.
+    if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
+      if (_collector->verifying()) {
+        CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
+        // Object is marked in the dead_map bitmap at the previous sweep
+        // when we know that it's dead; if the bitmap is not allocated then
+        // the object is alive.
+        return (dead_map->sizeInBits() == 0) // bit_map has been allocated
+               || !dead_map->par_isMarked((HeapWord*) p);
+      } else {
+        return false; // We can't say for sure if it's live, so we say that it's dead.
+      }
+    }
+  }
+  return true;
+}
+
+bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
+  FreeChunk* fc = (FreeChunk*)p;
+  assert(is_in_reserved(p), "Should be in space");
+  assert(_bt.block_start(p) == p, "Should be a block boundary");
+  if (!fc->isFree()) {
+    // Ignore mark word because it may have been used to
+    // chain together promoted objects (the last one
+    // would have a null value).
+    assert(oop(p)->is_oop(true), "Should be an oop");
+    return true;
+  }
+  return false;
+}
+
+// "MT-safe but not guaranteed MT-precise" (TM); you may get an
+// approximate answer if you don't hold the freelistlock when you call this.
+size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
+  size_t size = 0;
+  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    debug_only(
+      // We may be calling here without the lock in which case we
+      // won't do this modest sanity check.
+      if (freelistLock()->owned_by_self()) {
+        size_t total_list_size = 0;
+        for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
+          fc = fc->next()) {
+          total_list_size += i;
+        }
+        assert(total_list_size == i * _indexedFreeList[i].count(),
+               "Count in list is incorrect");
+      }
+    )
+    size += i * _indexedFreeList[i].count();
+  }
+  return size;
+}
+
+HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
+  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
+  return allocate(size);
+}
+
+HeapWord*
+CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
+  return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
+}
+
+HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
+  assert_lock_strong(freelistLock());
+  HeapWord* res = NULL;
+  assert(size == adjustObjectSize(size),
+         "use adjustObjectSize() before calling into allocate()");
+  
+  if (_adaptive_freelists) {
+    res = allocate_adaptive_freelists(size);
+  } else {  // non-adaptive free lists
+    res = allocate_non_adaptive_freelists(size);
+  }
+  
+  if (res != NULL) {
+    // check that res does lie in this space!
+    assert(is_in_reserved(res), "Not in this space!");
+    assert(is_aligned((void*)res), "alignment check");
+
+    FreeChunk* fc = (FreeChunk*)res;
+    fc->markNotFree();
+    assert(!fc->isFree(), "shouldn't be marked free");
+    assert(oop(fc)->klass() == NULL, "should look uninitialized");
+    // Verify that the block offset table shows this to
+    // be a single block, but not one which is unallocated.
+    _bt.verify_single_block(res, size); 
+    _bt.verify_not_unallocated(res, size);
+    // mangle a just allocated object with a distinct pattern.
+    debug_only(fc->mangleAllocated(size));
+  }
+  
+  return res;
+}
+
+HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
+  HeapWord* res = NULL;
+  // try and use linear allocation for smaller blocks
+  if (size < _smallLinearAllocBlock._allocation_size_limit) {
+    // if successful, the following also adjusts block offset table
+    res = getChunkFromSmallLinearAllocBlock(size);
+  }
+  // Else triage to indexed lists for smaller sizes
+  if (res == NULL) {
+    if (size < SmallForDictionary) {
+      res = (HeapWord*) getChunkFromIndexedFreeList(size);
+    } else { 
+      // else get it from the big dictionary; if even this doesn't
+      // work we are out of luck.
+      res = (HeapWord*)getChunkFromDictionaryExact(size);
+    }
+  }
+
+  return res;
+}
+
+HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
+  assert_lock_strong(freelistLock());
+  HeapWord* res = NULL;
+  assert(size == adjustObjectSize(size),
+         "use adjustObjectSize() before calling into allocate()");
+  
+  // Strategy
+  //   if small
+  //     exact size from small object indexed list if small
+  //     small or large linear allocation block (linAB) as appropriate
+  //     take from lists of greater sized chunks
+  //   else
+  //     dictionary
+  //     small or large linear allocation block if it has the space
+  // Try allocating exact size from indexTable first
+  if (size < IndexSetSize) {
+    res = (HeapWord*) getChunkFromIndexedFreeList(size);
+    if(res != NULL) {
+      assert(res != (HeapWord*)_indexedFreeList[size].head(), 
+        "Not removed from free list");
+      // no block offset table adjustment is necessary on blocks in
+      // the indexed lists.
+
+    // Try allocating from the small LinAB
+    } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
+	(res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
+	// if successful, the above also adjusts block offset table
+	// Note that this call will refill the LinAB to 
+	// satisfy the request.  This is different that
+	// evm.  
+        // Don't record chunk off a LinAB?  smallSplitBirth(size);
+  
+    } else {
+      // Raid the exact free lists larger than size, even if they are not
+      // overpopulated.
+      res = (HeapWord*) getChunkFromGreater(size);
+    }
+  } else {
+    // Big objects get allocated directly from the dictionary.
+    res = (HeapWord*) getChunkFromDictionaryExact(size);
+    if (res == NULL) {
+      // Try hard not to fail since an allocation failure will likely
+      // trigger a synchronous GC.  Try to get the space from the 
+      // allocation blocks.
+      res = getChunkFromSmallLinearAllocBlockRemainder(size);
+    }
+  }
+  
+  return res;
+}
+
+// A worst-case estimate of the space required (in HeapWords) to expand the heap
+// when promoting obj.
+size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
+  // Depending on the object size, expansion may require refilling either a
+  // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
+  // is added because the dictionary may over-allocate to avoid fragmentation.
+  size_t space = obj_size;
+  if (!_adaptive_freelists) {
+    space = MAX2(space, _smallLinearAllocBlock._refillSize);
+  }
+  space += _promoInfo.refillSize() + 2 * MinChunkSize;
+  return space;
+}
+
+FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
+  FreeChunk* ret;
+
+  assert(numWords >= MinChunkSize, "Size is less than minimum");
+  assert(linearAllocationWouldFail() || bestFitFirst(),
+    "Should not be here");
+
+  size_t i;
+  size_t currSize = numWords + MinChunkSize;
+  assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
+  for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
+    FreeList* fl = &_indexedFreeList[i];
+    if (fl->head()) {
+      ret = getFromListGreater(fl, numWords);
+      assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
+      return ret;
+    }
+  }
+
+  currSize = MAX2((size_t)SmallForDictionary,
+                  (size_t)(numWords + MinChunkSize));
+
+  /* Try to get a chunk that satisfies request, while avoiding
+     fragmentation that can't be handled. */
+  {
+    ret =  dictionary()->getChunk(currSize);
+    if (ret != NULL) {
+      assert(ret->size() - numWords >= MinChunkSize,
+             "Chunk is too small");
+      _bt.allocated((HeapWord*)ret, ret->size());
+      /* Carve returned chunk. */
+      (void) splitChunkAndReturnRemainder(ret, numWords);
+      /* Label this as no longer a free chunk. */
+      assert(ret->isFree(), "This chunk should be free");
+      ret->linkPrev(NULL);
+    }
+    assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
+    return ret;
+  }
+  ShouldNotReachHere();
+}
+
+bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) 
+  const {
+  assert(fc->size() < IndexSetSize, "Size of chunk is too large");
+  return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
+}
+
+bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
+  if (fc->size() >= IndexSetSize) {
+    return dictionary()->verifyChunkInFreeLists(fc);
+  } else {
+    return verifyChunkInIndexedFreeLists(fc);
+  }
+}
+
+#ifndef PRODUCT
+void CompactibleFreeListSpace::assert_locked() const {
+  CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
+}
+#endif
+
+FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
+  // In the parallel case, the main thread holds the free list lock
+  // on behalf the parallel threads.
+  assert_locked();
+  FreeChunk* fc;
+  {
+    // If GC is parallel, this might be called by several threads.
+    // This should be rare enough that the locking overhead won't affect
+    // the sequential code.
+    MutexLockerEx x(parDictionaryAllocLock(),
+                    Mutex::_no_safepoint_check_flag);
+    fc = getChunkFromDictionary(size);
+  }
+  if (fc != NULL) {
+    fc->dontCoalesce();
+    assert(fc->isFree(), "Should be free, but not coalescable");
+    // Verify that the block offset table shows this to
+    // be a single block, but not one which is unallocated.
+    _bt.verify_single_block((HeapWord*)fc, fc->size());
+    _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
+  }
+  return fc;
+}
+
+oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
+  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
+  assert_locked();
+
+  // if we are tracking promotions, then first ensure space for
+  // promotion (including spooling space for saving header if necessary).
+  // then allocate and copy, then track promoted info if needed.
+  // When tracking (see PromotionInfo::track()), the mark word may
+  // be displaced and in this case restoration of the mark word
+  // occurs in the (oop_since_save_marks_)iterate phase.
+  if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
+    return NULL;
+  }
+  // Call the allocate(size_t, bool) form directly to avoid the
+  // additional call through the allocate(size_t) form.  Having
+  // the compile inline the call is problematic because allocate(size_t)
+  // is a virtual method.
+  HeapWord* res = allocate(adjustObjectSize(obj_size));
+  if (res != NULL) {
+    Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
+    // if we should be tracking promotions, do so.
+    if (_promoInfo.tracking()) {
+        _promoInfo.track((PromotedObject*)res);
+    }
+  }
+  return oop(res);
+}
+
+HeapWord*
+CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
+  assert_locked();
+  assert(size >= MinChunkSize, "minimum chunk size");
+  assert(size <  _smallLinearAllocBlock._allocation_size_limit, 
+    "maximum from smallLinearAllocBlock");
+  return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
+}
+
+HeapWord*
+CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
+                                                       size_t size) {
+  assert_locked();
+  assert(size >= MinChunkSize, "too small");
+  HeapWord* res = NULL;
+  // Try to do linear allocation from blk, making sure that
+  if (blk->_word_size == 0) {
+    // We have probably been unable to fill this either in the prologue or
+    // when it was exhausted at the last linear allocation. Bail out until
+    // next time.
+    assert(blk->_ptr == NULL, "consistency check");
+    return NULL;
+  }
+  assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
+  res = getChunkFromLinearAllocBlockRemainder(blk, size);
+  if (res != NULL) return res;
+
+  // about to exhaust this linear allocation block
+  if (blk->_word_size == size) { // exactly satisfied
+    res = blk->_ptr;
+    _bt.allocated(res, blk->_word_size);
+  } else if (size + MinChunkSize <= blk->_refillSize) {
+    // Update _unallocated_block if the size is such that chunk would be
+    // returned to the indexed free list.  All other chunks in the indexed
+    // free lists are allocated from the dictionary so that _unallocated_block
+    // has already been adjusted for them.  Do it here so that the cost
+    // for all chunks added back to the indexed free lists.
+    if (blk->_word_size < SmallForDictionary) {
+      _bt.allocated(blk->_ptr, blk->_word_size);
+    }
+    // Return the chunk that isn't big enough, and then refill below.
+    addChunkToFreeLists(blk->_ptr, blk->_word_size);
+    _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
+    // Don't keep statistics on adding back chunk from a LinAB.
+  } else {
+    // A refilled block would not satisfy the request.
+    return NULL;
+  }
+
+  blk->_ptr = NULL; blk->_word_size = 0;
+  refillLinearAllocBlock(blk);
+  assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
+	 "block was replenished");
+  if (res != NULL) {
+    splitBirth(size);
+    repairLinearAllocBlock(blk);
+  } else if (blk->_ptr != NULL) {
+    res = blk->_ptr;
+    size_t blk_size = blk->_word_size;
+    blk->_word_size -= size;
+    blk->_ptr  += size;
+    splitBirth(size);
+    repairLinearAllocBlock(blk);
+    // Update BOT last so that other (parallel) GC threads see a consistent
+    // view of the BOT and free blocks.
+    // Above must occur before BOT is updated below.
+    _bt.split_block(res, blk_size, size);  // adjust block offset table
+  }
+  return res;
+}
+
+HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
+					LinearAllocBlock* blk, 
+					size_t size) {
+  assert_locked();
+  assert(size >= MinChunkSize, "too small");
+
+  HeapWord* res = NULL;
+  // This is the common case.  Keep it simple.
+  if (blk->_word_size >= size + MinChunkSize) {
+    assert(blk->_ptr != NULL, "consistency check");
+    res = blk->_ptr;
+    // Note that the BOT is up-to-date for the linAB before allocation.  It
+    // indicates the start of the linAB.  The split_block() updates the
+    // BOT for the linAB after the allocation (indicates the start of the
+    // next chunk to be allocated).
+    size_t blk_size = blk->_word_size;
+    blk->_word_size -= size;
+    blk->_ptr  += size;
+    splitBirth(size);
+    repairLinearAllocBlock(blk);
+    // Update BOT last so that other (parallel) GC threads see a consistent
+    // view of the BOT and free blocks.
+    // Above must occur before BOT is updated below.
+    _bt.split_block(res, blk_size, size);  // adjust block offset table
+    _bt.allocated(res, size);
+  } 
+  return res;
+}
+
+FreeChunk* 
+CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
+  assert_locked();
+  assert(size < SmallForDictionary, "just checking");
+  FreeChunk* res;
+  res = _indexedFreeList[size].getChunkAtHead();
+  if (res == NULL) {
+    res = getChunkFromIndexedFreeListHelper(size);
+  }
+  _bt.verify_not_unallocated((HeapWord*) res, size);
+  return res;
+}
+
+FreeChunk*
+CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
+  assert_locked();
+  FreeChunk* fc = NULL;
+  if (size < SmallForDictionary) {
+    assert(_indexedFreeList[size].head() == NULL ||
+      _indexedFreeList[size].surplus() <= 0,
+      "List for this size should be empty or under populated");
+    // Try best fit in exact lists before replenishing the list
+    if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
+      // Replenish list.
+      //
+      // Things tried that failed.
+      //   Tried allocating out of the two LinAB's first before 
+      // replenishing lists.  
+      //   Tried small linAB of size 256 (size in indexed list)
+      // and replenishing indexed lists from the small linAB.
+      //
+      FreeChunk* newFc = NULL;
+      size_t replenish_size = CMSIndexedFreeListReplenish * size;
+      if (replenish_size < SmallForDictionary) {
+	// Do not replenish from an underpopulated size.
+	if (_indexedFreeList[replenish_size].surplus() > 0 &&
+	    _indexedFreeList[replenish_size].head() != NULL) {
+          newFc = 
+            _indexedFreeList[replenish_size].getChunkAtHead();
+	} else {
+	  newFc = bestFitSmall(replenish_size);
+	}
+      }
+      if (newFc != NULL) {
+	splitDeath(replenish_size);
+      } else if (replenish_size > size) {
+        assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
+        newFc = 
+          getChunkFromIndexedFreeListHelper(replenish_size);
+      }
+      if (newFc != NULL) {
+        assert(newFc->size() == replenish_size, "Got wrong size");
+        size_t i;
+        FreeChunk *curFc, *nextFc;
+        // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
+	// The last chunk is not added to the lists but is returned as the
+	// free chunk.
+        for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), 
+  	     i = 0;
+             i < (CMSIndexedFreeListReplenish - 1);
+             curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), 
+  	     i++) {
+          curFc->setSize(size);
+  	  // Don't record this as a return in order to try and
+  	  // determine the "returns" from a GC.
+          _bt.verify_not_unallocated((HeapWord*) fc, size);
+  	  _indexedFreeList[size].returnChunkAtTail(curFc, false);
+  	  _bt.mark_block((HeapWord*)curFc, size);
+  	  splitBirth(size);
+  	  // Don't record the initial population of the indexed list
+  	  // as a split birth.
+        }
+
+        // check that the arithmetic was OK above
+        assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
+          "inconsistency in carving newFc");
+        curFc->setSize(size);
+  	_bt.mark_block((HeapWord*)curFc, size);
+  	splitBirth(size);
+        return curFc;
+      }
+    }
+  } else {
+    // Get a free chunk from the free chunk dictionary to be returned to
+    // replenish the indexed free list.
+    fc = getChunkFromDictionaryExact(size);
+  }
+  assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
+  return fc;
+}
+
+FreeChunk*
+CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
+  assert_locked();
+  FreeChunk* fc = _dictionary->getChunk(size);
+  if (fc == NULL) {
+    return NULL;
+  }
+  _bt.allocated((HeapWord*)fc, fc->size());
+  if (fc->size() >= size + MinChunkSize) {
+    fc = splitChunkAndReturnRemainder(fc, size);
+  }
+  assert(fc->size() >= size, "chunk too small");
+  assert(fc->size() < size + MinChunkSize, "chunk too big");
+  _bt.verify_single_block((HeapWord*)fc, fc->size());
+  return fc;
+}
+
+FreeChunk*
+CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
+  assert_locked();
+  FreeChunk* fc = _dictionary->getChunk(size);
+  if (fc == NULL) {
+    return fc;
+  }
+  _bt.allocated((HeapWord*)fc, fc->size());
+  if (fc->size() == size) {
+    _bt.verify_single_block((HeapWord*)fc, size);
+    return fc;
+  }
+  assert(fc->size() > size, "getChunk() guarantee");
+  if (fc->size() < size + MinChunkSize) {
+    // Return the chunk to the dictionary and go get a bigger one.
+    returnChunkToDictionary(fc);
+    fc = _dictionary->getChunk(size + MinChunkSize); 
+    if (fc == NULL) {
+      return NULL;
+    }
+    _bt.allocated((HeapWord*)fc, fc->size());
+  }
+  assert(fc->size() >= size + MinChunkSize, "tautology");
+  fc = splitChunkAndReturnRemainder(fc, size);
+  assert(fc->size() == size, "chunk is wrong size");
+  _bt.verify_single_block((HeapWord*)fc, size);
+  return fc;
+}
+
+void
+CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
+  assert_locked();
+
+  size_t size = chunk->size();
+  _bt.verify_single_block((HeapWord*)chunk, size);
+  // adjust _unallocated_block downward, as necessary
+  _bt.freed((HeapWord*)chunk, size);
+  _dictionary->returnChunk(chunk);
+}
+
+void
+CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
+  assert_locked();
+  size_t size = fc->size();
+  _bt.verify_single_block((HeapWord*) fc, size);
+  _bt.verify_not_unallocated((HeapWord*) fc, size);
+  if (_adaptive_freelists) {
+    _indexedFreeList[size].returnChunkAtTail(fc);
+  } else {
+    _indexedFreeList[size].returnChunkAtHead(fc);
+  }
+}
+
+// Add chunk to end of last block -- if it's the largest
+// block -- and update BOT and census data. We would
+// of course have preferred to coalesce it with the
+// last block, but it's currently less expensive to find the
+// largest block than it is to find the last.
+void
+CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
+  HeapWord* chunk, size_t     size) {
+  // check that the chunk does lie in this space!
+  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
+  assert_locked();
+  // One of the parallel gc task threads may be here
+  // whilst others are allocating.
+  Mutex* lock = NULL;
+  if (ParallelGCThreads != 0) {
+    lock = &_parDictionaryAllocLock;
+  }
+  FreeChunk* ec;
+  {
+    MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
+    ec = dictionary()->findLargestDict();  // get largest block
+    if (ec != NULL && ec->end() == chunk) {
+      // It's a coterminal block - we can coalesce.
+      size_t old_size = ec->size();
+      coalDeath(old_size);
+      removeChunkFromDictionary(ec);
+      size += old_size;
+    } else {
+      ec = (FreeChunk*)chunk;
+    }
+  }
+  ec->setSize(size);
+  debug_only(ec->mangleFreed(size));
+  if (size < SmallForDictionary) {
+    lock = _indexedFreeListParLocks[size];
+  }
+  MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
+  addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
+  // record the birth under the lock since the recording involves
+  // manipulation of the list on which the chunk lives and
+  // if the chunk is allocated and is the last on the list,
+  // the list can go away.
+  coalBirth(size);
+}
+
+void
+CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
+                                              size_t     size) {
+  // check that the chunk does lie in this space!
+  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
+  assert_locked();
+  _bt.verify_single_block(chunk, size);
+
+  FreeChunk* fc = (FreeChunk*) chunk;
+  fc->setSize(size);
+  debug_only(fc->mangleFreed(size));
+  if (size < SmallForDictionary) {
+    returnChunkToFreeList(fc);
+  } else {
+    returnChunkToDictionary(fc);
+  }
+}
+
+void
+CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
+  size_t size, bool coalesced) {
+  assert_locked();
+  assert(chunk != NULL, "null chunk");
+  if (coalesced) {
+    // repair BOT
+    _bt.single_block(chunk, size);
+  }
+  addChunkToFreeLists(chunk, size);
+}
+
+// We _must_ find the purported chunk on our free lists;
+// we assert if we don't.
+void
+CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
+  size_t size = fc->size();
+  assert_locked();
+  debug_only(verifyFreeLists());
+  if (size < SmallForDictionary) {
+    removeChunkFromIndexedFreeList(fc);
+  } else {
+    removeChunkFromDictionary(fc);
+  }
+  _bt.verify_single_block((HeapWord*)fc, size);
+  debug_only(verifyFreeLists());
+}
+
+void
+CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
+  size_t size = fc->size();
+  assert_locked();
+  assert(fc != NULL, "null chunk");
+  _bt.verify_single_block((HeapWord*)fc, size);
+  _dictionary->removeChunk(fc);
+  // adjust _unallocated_block upward, as necessary
+  _bt.allocated((HeapWord*)fc, size);
+}
+
+void
+CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
+  assert_locked();
+  size_t size = fc->size();
+  _bt.verify_single_block((HeapWord*)fc, size);
+  NOT_PRODUCT(
+    if (FLSVerifyIndexTable) {
+      verifyIndexedFreeList(size);
+    }
+  )
+  _indexedFreeList[size].removeChunk(fc);
+  debug_only(fc->clearNext());
+  debug_only(fc->clearPrev());
+  NOT_PRODUCT(
+    if (FLSVerifyIndexTable) {
+      verifyIndexedFreeList(size);
+    }
+  )
+}
+
+FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
+  /* A hint is the next larger size that has a surplus.
+     Start search at a size large enough to guarantee that
+     the excess is >= MIN_CHUNK. */
+  size_t start = align_object_size(numWords + MinChunkSize);
+  if (start < IndexSetSize) {
+    FreeList* it   = _indexedFreeList;
+    size_t    hint = _indexedFreeList[start].hint();
+    while (hint < IndexSetSize) {
+      assert(hint % MinObjAlignment == 0, "hint should be aligned");
+      FreeList *fl = &_indexedFreeList[hint];
+      if (fl->surplus() > 0 && fl->head() != NULL) {
+        // Found a list with surplus, reset original hint
+        // and split out a free chunk which is returned.
+        _indexedFreeList[start].set_hint(hint);
+	FreeChunk* res = getFromListGreater(fl, numWords);
+	assert(res == NULL || res->isFree(), 
+	  "Should be returning a free chunk");
+        return res;
+      }
+      hint = fl->hint(); /* keep looking */
+    }
+    /* None found. */
+    it[start].set_hint(IndexSetSize);
+  }
+  return NULL;
+}
+
+/* Requires fl->size >= numWords + MinChunkSize */
+FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
+  size_t numWords) {
+  FreeChunk *curr = fl->head();
+  size_t oldNumWords = curr->size();
+  assert(numWords >= MinChunkSize, "Word size is too small");
+  assert(curr != NULL, "List is empty");
+  assert(oldNumWords >= numWords + MinChunkSize, 
+	"Size of chunks in the list is too small");
+ 
+  fl->removeChunk(curr);
+  // recorded indirectly by splitChunkAndReturnRemainder - 
+  // smallSplit(oldNumWords, numWords);
+  FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
+  // Does anything have to be done for the remainder in terms of
+  // fixing the card table?
+  assert(new_chunk == NULL || new_chunk->isFree(), 
+    "Should be returning a free chunk");
+  return new_chunk;
+}
+
+FreeChunk*
+CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
+  size_t new_size) {
+  assert_locked();
+  size_t size = chunk->size();
+  assert(size > new_size, "Split from a smaller block?");
+  assert(is_aligned(chunk), "alignment problem");
+  assert(size == adjustObjectSize(size), "alignment problem");
+  size_t rem_size = size - new_size;
+  assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
+  assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
+  FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
+  assert(is_aligned(ffc), "alignment problem");
+  ffc->setSize(rem_size);
+  ffc->linkNext(NULL);
+  ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+  // Above must occur before BOT is updated below.
+  // adjust block offset table
+  _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
+  if (rem_size < SmallForDictionary) {
+    bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
+    if (is_par) _indexedFreeListParLocks[rem_size]->lock();
+    returnChunkToFreeList(ffc);
+    split(size, rem_size);
+    if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
+  } else {
+    returnChunkToDictionary(ffc);
+    split(size ,rem_size);
+  }
+  chunk->setSize(new_size);
+  return chunk;
+}
+
+void
+CompactibleFreeListSpace::sweep_completed() {
+  // Now that space is probably plentiful, refill linear
+  // allocation blocks as needed.
+  refillLinearAllocBlocksIfNeeded();
+}
+
+void
+CompactibleFreeListSpace::gc_prologue() {
+  assert_locked();
+  if (PrintFLSStatistics != 0) {
+    gclog_or_tty->print("Before GC:\n");
+    reportFreeListStatistics();
+  }
+  refillLinearAllocBlocksIfNeeded();
+}
+
+void
+CompactibleFreeListSpace::gc_epilogue() {
+  assert_locked();
+  if (PrintGCDetails && Verbose && !_adaptive_freelists) {
+    if (_smallLinearAllocBlock._word_size == 0)
+      warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
+  }
+  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
+  _promoInfo.stopTrackingPromotions();
+  repairLinearAllocationBlocks();
+  // Print Space's stats
+  if (PrintFLSStatistics != 0) {
+    gclog_or_tty->print("After GC:\n");
+    reportFreeListStatistics();
+  }
+}
+
+// Iteration support, mostly delegated from a CMS generation
+
+void CompactibleFreeListSpace::save_marks() {
+  // mark the "end" of the used space at the time of this call;
+  // note, however, that promoted objects from this point
+  // on are tracked in the _promoInfo below.
+  set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? 
+                      unallocated_block() : end());
+  // inform allocator that promotions should be tracked.
+  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
+  _promoInfo.startTrackingPromotions();
+}
+
+bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
+  assert(_promoInfo.tracking(), "No preceding save_marks?");
+  guarantee(SharedHeap::heap()->n_par_threads() == 0,
+	    "Shouldn't be called (yet) during parallel part of gc.");
+  return _promoInfo.noPromotions();
+}
+
+#define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
+                                                                            \
+void CompactibleFreeListSpace::                                             \
+oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
+  assert(SharedHeap::heap()->n_par_threads() == 0,                          \
+         "Shouldn't be called (yet) during parallel part of gc.");          \
+  _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
+  /*                                                                        \
+   * This also restores any displaced headers and removes the elements from \
+   * the iteration set as they are processed, so that we have a clean slate \
+   * at the end of the iteration. Note, thus, that if new objects are       \
+   * promoted as a result of the iteration they are iterated over as well.  \
+   */                                                                       \
+  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
+}
+
+ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
+
+//////////////////////////////////////////////////////////////////////////////
+// We go over the list of promoted objects, removing each from the list,    
+// and applying the closure (this may, in turn, add more elements to  
+// the tail of the promoted list, and these newly added objects will 
+// also be processed) until the list is empty.                      
+// To aid verification and debugging, in the non-product builds
+// we actually forward _promoHead each time we process a promoted oop.
+// Note that this is not necessary in general (i.e. when we don't need to
+// call PromotionInfo::verify()) because oop_iterate can only add to the
+// end of _promoTail, and never needs to look at _promoHead.
+
+#define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
+                                                                            \
+void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
+  NOT_PRODUCT(verify());                                                    \
+  PromotedObject *curObj, *nextObj;                                         \
+  for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
+    if ((nextObj = curObj->next()) == NULL) {                               \
+      /* protect ourselves against additions due to closure application     \
+         below by resetting the list.  */                                   \
+      assert(_promoTail == curObj, "Should have been the tail");            \
+      _promoHead = _promoTail = NULL;                                       \
+    }                                                                       \
+    if (curObj->hasDisplacedMark()) {                                       \
+      /* restore displaced header */                                        \
+      oop(curObj)->set_mark(nextDisplacedHeader());                         \
+    } else {                                                                \
+      /* restore prototypical header */                                     \
+      oop(curObj)->init_mark();                                             \
+    }                                                                       \
+    /* The "promoted_mark" should now not be set */                         \
+    assert(!curObj->hasPromotedMark(),                                      \
+           "Should have been cleared by restoring displaced mark-word");    \
+    NOT_PRODUCT(_promoHead = nextObj);                                      \
+    if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
+    if (nextObj == NULL) { /* start at head of list reset above */          \
+      nextObj = _promoHead;                                                 \
+    }                                                                       \
+  }                                                                         \
+  assert(noPromotions(), "post-condition violation");                       \
+  assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
+  assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
+  assert(_firstIndex == _nextIndex, "empty buffer");                        \
+}
+
+// This should have been ALL_SINCE_...() just like the others,
+// but, because the body of the method above is somehwat longer,
+// the MSVC compiler cannot cope; as a workaround, we split the
+// macro into its 3 constituent parts below (see original macro
+// definition in specializedOopClosures.hpp).
+SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
+PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
+
+
+void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
+  // ugghh... how would one do this efficiently for a non-contiguous space?
+  guarantee(false, "NYI");
+}
+
+bool CompactibleFreeListSpace::linearAllocationWouldFail() {
+  return _smallLinearAllocBlock._word_size == 0;
+}
+
+void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
+  // Fix up linear allocation blocks to look like free blocks
+  repairLinearAllocBlock(&_smallLinearAllocBlock);
+}
+
+void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
+  assert_locked();
+  if (blk->_ptr != NULL) {
+    assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
+           "Minimum block size requirement");
+    FreeChunk* fc = (FreeChunk*)(blk->_ptr);
+    fc->setSize(blk->_word_size);
+    fc->linkPrev(NULL);   // mark as free
+    fc->dontCoalesce();
+    assert(fc->isFree(), "just marked it free");
+    assert(fc->cantCoalesce(), "just marked it uncoalescable");
+  }
+}
+
+void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
+  assert_locked();
+  if (_smallLinearAllocBlock._ptr == NULL) {
+    assert(_smallLinearAllocBlock._word_size == 0, 
+      "Size of linAB should be zero if the ptr is NULL");
+    // Reset the linAB refill and allocation size limit.
+    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
+  }
+  refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
+}
+
+void
+CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
+  assert_locked();
+  assert((blk->_ptr == NULL && blk->_word_size == 0) ||
+         (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
+         "blk invariant");
+  if (blk->_ptr == NULL) {
+    refillLinearAllocBlock(blk);
+  }
+  if (PrintMiscellaneous && Verbose) {
+    if (blk->_word_size == 0) {
+      warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
+    }
+  }
+}
+
+void
+CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
+  assert_locked();
+  assert(blk->_word_size == 0 && blk->_ptr == NULL,
+         "linear allocation block should be empty");
+  FreeChunk* fc;
+  if (blk->_refillSize < SmallForDictionary && 
+      (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
+    // A linAB's strategy might be to use small sizes to reduce
+    // fragmentation but still get the benefits of allocation from a
+    // linAB.
+  } else {
+    fc = getChunkFromDictionary(blk->_refillSize);
+  }
+  if (fc != NULL) {
+    blk->_ptr  = (HeapWord*)fc;
+    blk->_word_size = fc->size();
+    fc->dontCoalesce();   // to prevent sweeper from sweeping us up
+  }
+}
+
+// Support for compaction
+
+void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
+  SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
+  // prepare_for_compaction() uses the space between live objects
+  // so that later phase can skip dead space quickly.  So verification
+  // of the free lists doesn't work after.
+}
+
+#define obj_size(q) adjustObjectSize(oop(q)->size())
+#define adjust_obj_size(s) adjustObjectSize(s)
+
+void CompactibleFreeListSpace::adjust_pointers() {
+  // In other versions of adjust_pointers(), a bail out
+  // based on the amount of live data in the generation
+  // (i.e., if 0, bail out) may be used.
+  // Cannot test used() == 0 here because the free lists have already
+  // been mangled by the compaction.
+
+  SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
+  // See note about verification in prepare_for_compaction().
+}
+
+void CompactibleFreeListSpace::compact() {
+  SCAN_AND_COMPACT(obj_size);
+}
+
+// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
+// where fbs is free block sizes
+double CompactibleFreeListSpace::flsFrag() const {
+  size_t itabFree = totalSizeInIndexedFreeLists();
+  double frag = 0.0;
+  size_t i;
+
+  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    double sz  = i;
+    frag      += _indexedFreeList[i].count() * (sz * sz);
+  }
+
+  double totFree = itabFree +
+                   _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
+  if (totFree > 0) {
+    frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / 
+            (totFree * totFree));
+    frag = (double)1.0  - frag;
+  } else {
+    assert(frag == 0.0, "Follows from totFree == 0");
+  }
+  return frag;
+}
+
+#define CoalSurplusPercent 1.05
+#define SplitSurplusPercent 1.10
+
+void CompactibleFreeListSpace::beginSweepFLCensus(
+  float inter_sweep_current,
+  float inter_sweep_estimate) {
+  assert_locked();
+  size_t i;
+  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    FreeList* fl    = &_indexedFreeList[i];
+    fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
+    fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
+    fl->set_beforeSweep(fl->count());
+    fl->set_bfrSurp(fl->surplus());
+  }
+  _dictionary->beginSweepDictCensus(CoalSurplusPercent,
+                                    inter_sweep_current,
+                                    inter_sweep_estimate);
+}
+
+void CompactibleFreeListSpace::setFLSurplus() {
+  assert_locked();
+  size_t i;
+  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    FreeList *fl = &_indexedFreeList[i];
+    fl->set_surplus(fl->count() - 
+                    (ssize_t)((double)fl->desired() * SplitSurplusPercent));
+  }
+}
+
+void CompactibleFreeListSpace::setFLHints() {
+  assert_locked();
+  size_t i;
+  size_t h = IndexSetSize;
+  for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
+    FreeList *fl = &_indexedFreeList[i];
+    fl->set_hint(h);
+    if (fl->surplus() > 0) {
+      h = i;
+    }
+  }
+}
+
+void CompactibleFreeListSpace::clearFLCensus() {
+  assert_locked();
+  int i;
+  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    FreeList *fl = &_indexedFreeList[i];
+    fl->set_prevSweep(fl->count());
+    fl->set_coalBirths(0);
+    fl->set_coalDeaths(0);
+    fl->set_splitBirths(0);
+    fl->set_splitDeaths(0);  
+  }
+}
+
+void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
+  setFLSurplus();
+  setFLHints();
+  if (PrintGC && PrintFLSCensus > 0) {
+    printFLCensus(sweepCt);
+  }
+  clearFLCensus();
+  assert_locked();
+  _dictionary->endSweepDictCensus(SplitSurplusPercent);
+}
+
+bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
+  if (size < SmallForDictionary) {
+    FreeList *fl = &_indexedFreeList[size];
+    return (fl->coalDesired() < 0) ||
+           ((int)fl->count() > fl->coalDesired());
+  } else {
+    return dictionary()->coalDictOverPopulated(size);
+  }
+}
+
+void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
+  assert(size < SmallForDictionary, "Size too large for indexed list");
+  FreeList *fl = &_indexedFreeList[size];
+  fl->increment_coalBirths();
+  fl->increment_surplus();
+}
+
+void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
+  assert(size < SmallForDictionary, "Size too large for indexed list");
+  FreeList *fl = &_indexedFreeList[size];
+  fl->increment_coalDeaths();
+  fl->decrement_surplus();
+}
+
+void CompactibleFreeListSpace::coalBirth(size_t size) {
+  if (size  < SmallForDictionary) {
+    smallCoalBirth(size);
+  } else {
+    dictionary()->dictCensusUpdate(size, 
+			           false /* split */, 
+				   true /* birth */);
+  }
+}
+
+void CompactibleFreeListSpace::coalDeath(size_t size) {
+  if(size  < SmallForDictionary) {
+    smallCoalDeath(size);
+  } else {
+    dictionary()->dictCensusUpdate(size, 
+				   false /* split */, 
+				   false /* birth */);
+  }
+}
+
+void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
+  assert(size < SmallForDictionary, "Size too large for indexed list");
+  FreeList *fl = &_indexedFreeList[size];
+  fl->increment_splitBirths();
+  fl->increment_surplus();
+}
+
+void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
+  assert(size < SmallForDictionary, "Size too large for indexed list");
+  FreeList *fl = &_indexedFreeList[size];
+  fl->increment_splitDeaths();
+  fl->decrement_surplus();
+}
+
+void CompactibleFreeListSpace::splitBirth(size_t size) {
+  if (size  < SmallForDictionary) {
+    smallSplitBirth(size);
+  } else {
+    dictionary()->dictCensusUpdate(size, 
+				   true /* split */, 
+				   true /* birth */);
+  }
+}
+
+void CompactibleFreeListSpace::splitDeath(size_t size) {
+  if (size  < SmallForDictionary) {
+    smallSplitDeath(size);
+  } else {
+    dictionary()->dictCensusUpdate(size, 
+				   true /* split */, 
+				   false /* birth */);
+  }
+}
+
+void CompactibleFreeListSpace::split(size_t from, size_t to1) {
+  size_t to2 = from - to1;
+  splitDeath(from);
+  splitBirth(to1);
+  splitBirth(to2);
+}
+
+
+void CompactibleFreeListSpace::print() const {
+  tty->print(" CompactibleFreeListSpace");
+  Space::print();
+}
+
+void CompactibleFreeListSpace::prepare_for_verify() {
+  assert_locked();
+  repairLinearAllocationBlocks();
+  // Verify that the SpoolBlocks look like free blocks of
+  // appropriate sizes... To be done ...
+}
+
+class VerifyAllBlksClosure: public BlkClosure {
+  const CompactibleFreeListSpace* _sp;
+  const MemRegion                 _span;
+
+ public:
+  VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
+    MemRegion span) :  _sp(sp), _span(span) { }
+
+  size_t do_blk(HeapWord* addr) {
+    size_t res;
+    if (_sp->block_is_obj(addr)) {
+      oop p = oop(addr);
+      guarantee(p->is_oop(), "Should be an oop");
+      res = _sp->adjustObjectSize(p->size());
+      if (_sp->obj_is_alive(addr)) {
+        p->verify();
+      }
+    } else {
+      FreeChunk* fc = (FreeChunk*)addr;
+      res = fc->size();
+      if (FLSVerifyLists && !fc->cantCoalesce()) {
+        guarantee(_sp->verifyChunkInFreeLists(fc),
+                  "Chunk should be on a free list");
+      }
+    }
+    guarantee(res != 0, "Livelock: no rank reduction!");
+    return res;
+  }
+};
+
+class VerifyAllOopsClosure: public OopClosure {
+  const CMSCollector*             _collector;
+  const CompactibleFreeListSpace* _sp;
+  const MemRegion                 _span;
+  const bool                      _past_remark;
+  const CMSBitMap*                _bit_map;
+
+ public:
+  VerifyAllOopsClosure(const CMSCollector* collector,
+    const CompactibleFreeListSpace* sp, MemRegion span,
+    bool past_remark, CMSBitMap* bit_map) :
+    OopClosure(), _collector(collector), _sp(sp), _span(span),
+    _past_remark(past_remark), _bit_map(bit_map) { }
+
+  void do_oop(oop* ptr) {
+    oop p = *ptr;
+    if (p != NULL) {
+      if (_span.contains(p)) { // the interior oop points into CMS heap
+        if (!_span.contains(ptr)) { // reference from outside CMS heap
+          // Should be a valid object; the first disjunct below allows
+          // us to sidestep an assertion in block_is_obj() that insists
+          // that p be in _sp. Note that several generations (and spaces)
+          // are spanned by _span (CMS heap) above.
+          guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
+                    "Should be an object");
+          guarantee(p->is_oop(), "Should be an oop");
+          p->verify();
+          if (_past_remark) {
+            // Remark has been completed, the object should be marked
+            _bit_map->isMarked((HeapWord*)p);
+          }
+        }
+        else { // reference within CMS heap
+          if (_past_remark) {
+            // Remark has been completed -- so the referent should have
+            // been marked, if referring object is.
+            if (_bit_map->isMarked(_collector->block_start(ptr))) {
+              guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
+            }
+          }
+        }
+      } else if (_sp->is_in_reserved(ptr)) {
+        // the reference is from FLS, and points out of FLS
+        guarantee(p->is_oop(), "Should be an oop");
+        p->verify();
+      }
+    }
+  }
+};
+
+void CompactibleFreeListSpace::verify(bool ignored) const {
+  assert_lock_strong(&_freelistLock);
+  verify_objects_initialized();
+  MemRegion span = _collector->_span;
+  bool past_remark = (_collector->abstract_state() ==
+                      CMSCollector::Sweeping);
+
+  ResourceMark rm;
+  HandleMark  hm;
+
+  // Check integrity of CFL data structures
+  _promoInfo.verify();
+  _dictionary->verify();
+  if (FLSVerifyIndexTable) {
+    verifyIndexedFreeLists();
+  }
+  // Check integrity of all objects and free blocks in space
+  {
+    VerifyAllBlksClosure cl(this, span);
+    ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
+  }
+  // Check that all references in the heap to FLS
+  // are to valid objects in FLS or that references in
+  // FLS are to valid objects elsewhere in the heap
+  if (FLSVerifyAllHeapReferences)
+  {
+    VerifyAllOopsClosure cl(_collector, this, span, past_remark,
+      _collector->markBitMap());
+    CollectedHeap* ch = Universe::heap();
+    ch->oop_iterate(&cl);              // all oops in generations
+    ch->permanent_oop_iterate(&cl);    // all oops in perm gen
+  }
+
+  if (VerifyObjectStartArray) {
+    // Verify the block offset table
+    _bt.verify();
+  }
+}
+
+#ifndef PRODUCT
+void CompactibleFreeListSpace::verifyFreeLists() const {
+  if (FLSVerifyLists) {
+    _dictionary->verify();
+    verifyIndexedFreeLists();
+  } else {
+    if (FLSVerifyDictionary) {
+      _dictionary->verify();
+    }
+    if (FLSVerifyIndexTable) {
+      verifyIndexedFreeLists();
+    }
+  }
+}
+#endif
+
+void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
+  size_t i = 0;
+  for (; i < MinChunkSize; i++) {
+    guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
+  }
+  for (; i < IndexSetSize; i++) {
+    verifyIndexedFreeList(i);
+  }
+}
+
+void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
+  guarantee(size % 2 == 0, "Odd slots should be empty");
+  for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL;
+    fc = fc->next()) {
+    guarantee(fc->size() == size, "Size inconsistency");
+    guarantee(fc->isFree(), "!free?");
+    guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
+  }
+}
+
+#ifndef PRODUCT
+void CompactibleFreeListSpace::checkFreeListConsistency() const {
+  assert(_dictionary->minSize() <= IndexSetSize,
+    "Some sizes can't be allocated without recourse to"
+    " linear allocation buffers");
+  assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
+    "else MIN_TREE_CHUNK_SIZE is wrong");
+  assert((IndexSetStride == 2 && IndexSetStart == 2) ||
+         (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
+  assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
+      "Some for-loops may be incorrectly initialized");
+  assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
+      "For-loops that iterate over IndexSet with stride 2 may be wrong");
+}
+#endif
+
+void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
+  assert_lock_strong(&_freelistLock);
+  ssize_t bfrSurp     = 0;
+  ssize_t surplus     = 0;
+  ssize_t desired     = 0;
+  ssize_t prevSweep   = 0;
+  ssize_t beforeSweep = 0;
+  ssize_t count       = 0;
+  ssize_t coalBirths  = 0;
+  ssize_t coalDeaths  = 0;
+  ssize_t splitBirths = 0;
+  ssize_t splitDeaths = 0;
+  gclog_or_tty->print("end sweep# %d\n", sweepCt);
+  gclog_or_tty->print("%4s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
+             "%7s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
+             "%7s\t"    "\n",
+             "size",    "bfrsurp",   "surplus",   "desired",   "prvSwep",     
+             "bfrSwep", "count",     "cBirths",   "cDeaths",   "sBirths",
+             "sDeaths");
+
+  size_t totalFree = 0;
+  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+    const FreeList *fl = &_indexedFreeList[i];                                                       
+	totalFree += fl->count() * fl->size();
+
+    gclog_or_tty->print("%4d\t"          "%7d\t"             "%7d\t"        "%7d\t"
+               "%7d\t"          "%7d\t"             "%7d\t"        "%7d\t"
+               "%7d\t"          "%7d\t"             "%7d\t"        "\n",
+               fl->size(),       fl->bfrSurp(),     fl->surplus(), fl->desired(), 
+	       fl->prevSweep(),  fl->beforeSweep(), fl->count(),   fl->coalBirths(), 
+	       fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
+    bfrSurp     += fl->bfrSurp();
+    surplus     += fl->surplus();
+    desired     += fl->desired();
+    prevSweep   += fl->prevSweep();
+    beforeSweep += fl->beforeSweep();
+    count       += fl->count();
+    coalBirths  += fl->coalBirths();
+    coalDeaths  += fl->coalDeaths();
+    splitBirths += fl->splitBirths();
+    splitDeaths += fl->splitDeaths();
+  }                                                                                             
+  gclog_or_tty->print("%4s\t"
+            "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t"
+            "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t" "\n",
+            "totl",
+            bfrSurp,     surplus,     desired,     prevSweep,     beforeSweep,
+            count,       coalBirths,  coalDeaths,  splitBirths,   splitDeaths);
+  gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
+  gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
+    (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
+	    (prevSweep != 0 ? (double)prevSweep : 1.0),
+    (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
+  _dictionary->printDictCensus();
+}
+
+// Return the next displaced header, incrementing the pointer and
+// recycling spool area as necessary.
+markOop PromotionInfo::nextDisplacedHeader() {
+  assert(_spoolHead != NULL, "promotionInfo inconsistency");
+  assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
+         "Empty spool space: no displaced header can be fetched");
+  assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
+  markOop hdr = _spoolHead->displacedHdr[_firstIndex];
+  // Spool forward
+  if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
+    // forward to next block, recycling this block into spare spool buffer
+    SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
+    assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
+    _spoolHead->nextSpoolBlock = _spareSpool;
+    _spareSpool = _spoolHead;
+    _spoolHead = tmp;
+    _firstIndex = 1;
+    NOT_PRODUCT(
+      if (_spoolHead == NULL) {  // all buffers fully consumed
+        assert(_spoolTail == NULL && _nextIndex == 1,
+               "spool buffers processing inconsistency");
+      }
+    )
+  } 
+  return hdr;
+}
+
+void PromotionInfo::track(PromotedObject* trackOop) {
+  track(trackOop, oop(trackOop)->klass());
+}
+
+void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
+  // make a copy of header as it may need to be spooled
+  markOop mark = oop(trackOop)->mark();
+  trackOop->clearNext();
+  if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
+    // save non-prototypical header, and mark oop
+    saveDisplacedHeader(mark);
+    trackOop->setDisplacedMark();
+  } else {
+    // we'd like to assert something like the following:
+    // assert(mark == markOopDesc::prototype(), "consistency check");
+    // ... but the above won't work because the age bits have not (yet) been
+    // cleared. The remainder of the check would be identical to the
+    // condition checked in must_be_preserved() above, so we don't really
+    // have anything useful to check here!
+  }
+  if (_promoTail != NULL) {
+    assert(_promoHead != NULL, "List consistency");
+    _promoTail->setNext(trackOop);
+    _promoTail = trackOop;
+  } else {
+    assert(_promoHead == NULL, "List consistency");
+    _promoHead = _promoTail = trackOop;
+  }
+  // Mask as newly promoted, so we can skip over such objects
+  // when scanning dirty cards
+  assert(!trackOop->hasPromotedMark(), "Should not have been marked");
+  trackOop->setPromotedMark();
+}
+
+// Save the given displaced header, incrementing the pointer and
+// obtaining more spool area as necessary.
+void PromotionInfo::saveDisplacedHeader(markOop hdr) {
+  assert(_spoolHead != NULL && _spoolTail != NULL,
+         "promotionInfo inconsistency");
+  assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
+  _spoolTail->displacedHdr[_nextIndex] = hdr;
+  // Spool forward
+  if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
+    // get a new spooling block
+    assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
+    _splice_point = _spoolTail;                   // save for splicing
+    _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
+    _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
+    // ... but will attempt filling before next promotion attempt
+    _nextIndex = 1;
+  }
+}
+
+// Ensure that spooling space exists. Return false if spooling space
+// could not be obtained.
+bool PromotionInfo::ensure_spooling_space_work() {
+  assert(!has_spooling_space(), "Only call when there is no spooling space");
+  // Try and obtain more spooling space
+  SpoolBlock* newSpool = getSpoolBlock();
+  assert(newSpool == NULL ||
+         (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
+        "getSpoolBlock() sanity check");
+  if (newSpool == NULL) {
+    return false;
+  }
+  _nextIndex = 1;
+  if (_spoolTail == NULL) {
+    _spoolTail = newSpool;
+    if (_spoolHead == NULL) {
+      _spoolHead = newSpool;
+      _firstIndex = 1;
+    } else {
+      assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
+             "Splice point invariant");
+      // Extra check that _splice_point is connected to list
+      #ifdef ASSERT
+      {
+        SpoolBlock* blk = _spoolHead;
+        for (; blk->nextSpoolBlock != NULL;
+             blk = blk->nextSpoolBlock);
+        assert(blk != NULL && blk == _splice_point,
+               "Splice point incorrect");
+      }
+      #endif // ASSERT
+      _splice_point->nextSpoolBlock = newSpool;
+    }
+  } else {
+    assert(_spoolHead != NULL, "spool list consistency");
+    _spoolTail->nextSpoolBlock = newSpool;
+    _spoolTail = newSpool;
+  }
+  return true;
+}
+
+// Get a free spool buffer from the free pool, getting a new block
+// from the heap if necessary.
+SpoolBlock* PromotionInfo::getSpoolBlock() {
+  SpoolBlock* res;
+  if ((res = _spareSpool) != NULL) {
+    _spareSpool = _spareSpool->nextSpoolBlock;
+    res->nextSpoolBlock = NULL;
+  } else {  // spare spool exhausted, get some from heap
+    res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
+    if (res != NULL) {
+      res->init();
+    }
+  }
+  assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
+  return res;
+}
+
+void PromotionInfo::startTrackingPromotions() {
+  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+         "spooling inconsistency?");
+  _firstIndex = _nextIndex = 1;
+  _tracking = true;
+}
+
+void PromotionInfo::stopTrackingPromotions() {
+  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+         "spooling inconsistency?");
+  _firstIndex = _nextIndex = 1;
+  _tracking = false;
+}
+
+// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
+// points to the next slot available for filling.
+// The set of slots holding displaced headers are then all those in the
+// right-open interval denoted by: 
+// 
+//    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
+// 
+// When _spoolTail is NULL, then the set of slots with displaced headers
+// is all those starting at the slot <_spoolHead, _firstIndex> and
+// going up to the last slot of last block in the linked list.
+// In this lartter case, _splice_point points to the tail block of
+// this linked list of blocks holding displaced headers.
+void PromotionInfo::verify() const {
+  // Verify the following:
+  // 1. the number of displaced headers matches the number of promoted
+  //    objects that have displaced headers
+  // 2. each promoted object lies in this space
+  debug_only(
+    PromotedObject* junk = NULL;
+    assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
+           "Offset of PromotedObject::_next is expected to align with "
+           "  the OopDesc::_mark within OopDesc");
+  )
+  // FIXME: guarantee????
+  guarantee(_spoolHead == NULL || _spoolTail != NULL ||
+            _splice_point != NULL, "list consistency");
+  guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
+  // count the number of objects with displaced headers
+  size_t numObjsWithDisplacedHdrs = 0;
+  for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
+    guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
+    // the last promoted object may fail the mark() != NULL test of is_oop().
+    guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
+    if (curObj->hasDisplacedMark()) {
+      numObjsWithDisplacedHdrs++;
+    }
+  }
+  // Count the number of displaced headers
+  size_t numDisplacedHdrs = 0;
+  for (SpoolBlock* curSpool = _spoolHead;
+       curSpool != _spoolTail && curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    // the first entry is just a self-pointer; indices 1 through
+    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
+    guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
+              "first entry of displacedHdr should be self-referential");
+    numDisplacedHdrs += curSpool->bufferSize - 1;
+  }
+  guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
+            "internal consistency");
+  guarantee(_spoolTail != NULL || _nextIndex == 1,
+            "Inconsistency between _spoolTail and _nextIndex");
+  // We overcounted (_firstIndex-1) worth of slots in block
+  // _spoolHead and we undercounted (_nextIndex-1) worth of
+  // slots in block _spoolTail. We make an appropriate
+  // adjustment by subtracting the first and adding the
+  // second:  - (_firstIndex - 1) + (_nextIndex - 1) 
+  numDisplacedHdrs += (_nextIndex - _firstIndex);
+  guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
+}
+
+
+CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
+  _cfls(cfls)
+{
+  _blocks_to_claim = CMSParPromoteBlocksToClaim;
+  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+       i < CompactibleFreeListSpace::IndexSetSize;
+       i += CompactibleFreeListSpace::IndexSetStride) {
+    _indexedFreeList[i].set_size(i);
+  }
+}
+
+HeapWord* CFLS_LAB::alloc(size_t word_sz) {
+  FreeChunk* res;
+  word_sz = _cfls->adjustObjectSize(word_sz);
+  if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
+    // This locking manages sync with other large object allocations.
+    MutexLockerEx x(_cfls->parDictionaryAllocLock(),
+                    Mutex::_no_safepoint_check_flag);
+    res = _cfls->getChunkFromDictionaryExact(word_sz);
+    if (res == NULL) return NULL;
+  } else {
+    FreeList* fl = &_indexedFreeList[word_sz];
+    bool filled = false; //TRAP
+    if (fl->count() == 0) {
+      bool filled = true; //TRAP
+      // Attempt to refill this local free list.
+      _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
+      // If it didn't work, give up.
+      if (fl->count() == 0) return NULL;
+    }
+    res = fl->getChunkAtHead();
+    assert(res != NULL, "Why was count non-zero?");
+  }
+  res->markNotFree();
+  assert(!res->isFree(), "shouldn't be marked free");
+  assert(oop(res)->klass() == NULL, "should look uninitialized");
+  // mangle a just allocated object with a distinct pattern.
+  debug_only(res->mangleAllocated(word_sz));
+  return (HeapWord*)res;
+}
+
+void CFLS_LAB::retire() {
+  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+       i < CompactibleFreeListSpace::IndexSetSize;
+       i += CompactibleFreeListSpace::IndexSetStride) {
+    if (_indexedFreeList[i].count() > 0) {
+      MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
+                      Mutex::_no_safepoint_check_flag);
+      _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
+      // Reset this list.
+      _indexedFreeList[i] = FreeList();
+      _indexedFreeList[i].set_size(i);
+    }
+  }
+}
+
+void
+CompactibleFreeListSpace::
+par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
+  assert(fl->count() == 0, "Precondition.");
+  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+	 "Precondition");
+
+  // We'll try all multiples of word_sz in the indexed set (starting with
+  // word_sz itself), then try getting a big chunk and splitting it.
+  int k = 1;
+  size_t cur_sz = k * word_sz;
+  bool found = false;
+  while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
+    FreeList* gfl = &_indexedFreeList[cur_sz];
+    FreeList fl_for_cur_sz;  // Empty.
+    fl_for_cur_sz.set_size(cur_sz);
+    {
+      MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
+                      Mutex::_no_safepoint_check_flag);
+      if (gfl->count() != 0) {
+	size_t nn = MAX2(n/k, (size_t)1);
+	gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
+	found = true;
+      }
+    }
+    // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
+    if (found) {
+      if (k == 1) {
+	fl->prepend(&fl_for_cur_sz);
+      } else {
+	// Divide each block on fl_for_cur_sz up k ways.
+	FreeChunk* fc;
+	while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
+	  // Must do this in reverse order, so that anybody attempting to
+	  // access the main chunk sees it as a single free block until we
+	  // change it.
+          size_t fc_size = fc->size();
+	  for (int i = k-1; i >= 0; i--) {
+	    FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
+	    ffc->setSize(word_sz);
+	    ffc->linkNext(NULL);
+	    ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+            // Above must occur before BOT is updated below.
+            // splitting from the right, fc_size == (k - i + 1) * wordsize
+	    _bt.mark_block((HeapWord*)ffc, word_sz);
+            fc_size -= word_sz;
+            _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
+            _bt.verify_single_block((HeapWord*)fc, fc_size);
+            _bt.verify_single_block((HeapWord*)ffc, ffc->size());
+	    // Push this on "fl".
+	    fl->returnChunkAtHead(ffc);
+	  }
+	  // TRAP
+	  assert(fl->tail()->next() == NULL, "List invariant.");
+	}
+      }
+      return;
+    }
+    k++; cur_sz = k * word_sz;
+  }
+  // Otherwise, we'll split a block from the dictionary.
+  FreeChunk* fc = NULL;
+  FreeChunk* rem_fc = NULL;
+  size_t rem;
+  {
+    MutexLockerEx x(parDictionaryAllocLock(),
+                    Mutex::_no_safepoint_check_flag);
+    while (n > 0) {
+      fc = dictionary()->getChunk(MAX2(n * word_sz, 
+				  _dictionary->minSize()),
+				  FreeBlockDictionary::atLeast);
+      if (fc != NULL) {
+        _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
+        dictionary()->dictCensusUpdate(fc->size(),
+				       true /*split*/,
+				       false /*birth*/);
+        break;
+      } else {
+        n--;
+      }
+    }
+    if (fc == NULL) return;
+    // Otherwise, split up that block.
+    size_t nn = fc->size() / word_sz;
+    n = MIN2(nn, n);
+    rem = fc->size() - n * word_sz;
+    // If there is a remainder, and it's too small, allocate one fewer.
+    if (rem > 0 && rem < MinChunkSize) {
+      n--; rem += word_sz;
+    }
+    // First return the remainder, if any.
+    // Note that we hold the lock until we decide if we're going to give
+    // back the remainder to the dictionary, since a contending allocator
+    // may otherwise see the heap as empty.  (We're willing to take that
+    // hit if the block is a small block.)
+    if (rem > 0) {
+      size_t prefix_size = n * word_sz;
+      rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
+      rem_fc->setSize(rem);
+      rem_fc->linkNext(NULL);
+      rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+      // Above must occur before BOT is updated below.
+      _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
+      if (rem >= IndexSetSize) {
+	returnChunkToDictionary(rem_fc);
+	dictionary()->dictCensusUpdate(fc->size(),
+				       true /*split*/,
+				       true /*birth*/);
+	rem_fc = NULL;
+      }
+      // Otherwise, return it to the small list below.
+    }
+  }
+  // 
+  if (rem_fc != NULL) {
+    MutexLockerEx x(_indexedFreeListParLocks[rem],
+                    Mutex::_no_safepoint_check_flag);
+    _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
+    _indexedFreeList[rem].returnChunkAtHead(rem_fc);
+    smallSplitBirth(rem);
+  }
+
+  // Now do the splitting up.
+  // Must do this in reverse order, so that anybody attempting to
+  // access the main chunk sees it as a single free block until we
+  // change it.
+  size_t fc_size = n * word_sz;
+  // All but first chunk in this loop
+  for (ssize_t i = n-1; i > 0; i--) {
+    FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
+    ffc->setSize(word_sz);
+    ffc->linkNext(NULL);
+    ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+    // Above must occur before BOT is updated below.
+    // splitting from the right, fc_size == (n - i + 1) * wordsize
+    _bt.mark_block((HeapWord*)ffc, word_sz);
+    fc_size -= word_sz;
+    _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
+    _bt.verify_single_block((HeapWord*)ffc, ffc->size());
+    _bt.verify_single_block((HeapWord*)fc, fc_size);
+    // Push this on "fl".
+    fl->returnChunkAtHead(ffc);
+  }
+  // First chunk
+  fc->setSize(word_sz);
+  fc->linkNext(NULL);
+  fc->linkPrev(NULL);
+  _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
+  _bt.verify_single_block((HeapWord*)fc, fc->size());
+  fl->returnChunkAtHead(fc);
+
+  {
+    MutexLockerEx x(_indexedFreeListParLocks[word_sz],
+                    Mutex::_no_safepoint_check_flag);
+    ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
+    _indexedFreeList[word_sz].set_splitBirths(new_births);
+    ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
+    _indexedFreeList[word_sz].set_surplus(new_surplus);
+  }
+
+  // TRAP
+  assert(fl->tail()->next() == NULL, "List invariant.");
+}
+
+// Set up the space's par_seq_tasks structure for work claiming
+// for parallel rescan. See CMSParRemarkTask where this is currently used.
+// XXX Need to suitably abstract and generalize this and the next
+// method into one.
+void
+CompactibleFreeListSpace::
+initialize_sequential_subtasks_for_rescan(int n_threads) {
+  // The "size" of each task is fixed according to rescan_task_size.
+  assert(n_threads > 0, "Unexpected n_threads argument");
+  const size_t task_size = rescan_task_size();
+  size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
+  assert((used_region().start() + (n_tasks - 1)*task_size <
+          used_region().end()) &&
+         (used_region().start() + n_tasks*task_size >=
+          used_region().end()), "n_task calculation incorrect");
+  SequentialSubTasksDone* pst = conc_par_seq_tasks();
+  assert(!pst->valid(), "Clobbering existing data?");
+  pst->set_par_threads(n_threads);
+  pst->set_n_tasks((int)n_tasks);
+}
+
+// Set up the space's par_seq_tasks structure for work claiming
+// for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
+void
+CompactibleFreeListSpace::
+initialize_sequential_subtasks_for_marking(int n_threads,
+                                           HeapWord* low) {
+  // The "size" of each task is fixed according to rescan_task_size.
+  assert(n_threads > 0, "Unexpected n_threads argument");
+  const size_t task_size = marking_task_size();
+  assert(task_size > CardTableModRefBS::card_size_in_words &&
+         (task_size %  CardTableModRefBS::card_size_in_words == 0),
+         "Otherwise arithmetic below would be incorrect");
+  MemRegion span = _gen->reserved();
+  if (low != NULL) {
+    if (span.contains(low)) {
+      // Align low down to  a card boundary so that
+      // we can use block_offset_careful() on span boundaries.
+      HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
+                                 CardTableModRefBS::card_size);
+      // Clip span prefix at aligned_low
+      span = span.intersection(MemRegion(aligned_low, span.end()));
+    } else if (low > span.end()) {
+      span = MemRegion(low, low);  // Null region
+    } // else use entire span
+  }
+  assert(span.is_empty() || 
+         ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
+        "span should start at a card boundary");
+  size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
+  assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
+  assert(n_tasks == 0 ||
+         ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
+          (span.start() + n_tasks*task_size >= span.end())),
+         "n_task calculation incorrect");
+  SequentialSubTasksDone* pst = conc_par_seq_tasks();
+  assert(!pst->valid(), "Clobbering existing data?");
+  pst->set_par_threads(n_threads);
+  pst->set_n_tasks((int)n_tasks);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,751 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)compactibleFreeListSpace.hpp	1.91 07/05/05 17:05:45 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// Classes in support of keeping track of promotions into a non-Contiguous
+// space, in this case a CompactibleFreeListSpace.
+
+#define CFLS_LAB_REFILL_STATS 0
+
+// Forward declarations
+class CompactibleFreeListSpace;
+class BlkClosure;
+class BlkClosureCareful;
+class UpwardsObjectClosure;
+class ObjectClosureCareful;
+class Klass;
+
+class PromotedObject VALUE_OBJ_CLASS_SPEC {
+ private:
+  enum {
+    promoted_mask  = right_n_bits(2),   // i.e. 0x3
+    displaced_mark = nth_bit(2),        // i.e. 0x4
+    next_mask      = ~(right_n_bits(3)) // i.e. ~(0x7)
+  };
+  intptr_t _next;
+ public:
+  inline PromotedObject* next() const {
+    return (PromotedObject*)(_next & next_mask);
+  }
+  inline void setNext(PromotedObject* x) { 
+    assert(((intptr_t)x & ~next_mask) == 0,
+           "Conflict in bit usage, "
+           " or insufficient alignment of objects");
+    _next |= (intptr_t)x;
+  }
+  inline void setPromotedMark() {
+    _next |= promoted_mask;
+  }
+  inline bool hasPromotedMark() const {
+    return (_next & promoted_mask) == promoted_mask;
+  }
+  inline void setDisplacedMark() {
+    _next |= displaced_mark;
+  }
+  inline bool hasDisplacedMark() const {
+    return (_next & displaced_mark) != 0;
+  }
+  inline void clearNext()        { _next = 0; }
+  debug_only(void *next_addr() { return (void *) &_next; })
+};
+
+class SpoolBlock: public FreeChunk {
+  friend class PromotionInfo;
+ protected:
+  SpoolBlock*  nextSpoolBlock;
+  size_t       bufferSize;        // number of usable words in this block
+  markOop*     displacedHdr;      // the displaced headers start here
+
+  // Note about bufferSize: it denotes the number of entries available plus 1;
+  // legal indices range from 1 through BufferSize - 1.  See the verification
+  // code verify() that counts the number of displaced headers spooled.
+  size_t computeBufferSize() {
+    return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
+  }
+
+ public:
+  void init() {
+    bufferSize = computeBufferSize();
+    displacedHdr = (markOop*)&displacedHdr;
+    nextSpoolBlock = NULL;
+  }
+};
+
+class PromotionInfo VALUE_OBJ_CLASS_SPEC {
+  bool            _tracking;      // set if tracking
+  CompactibleFreeListSpace* _space; // the space to which this belongs
+  PromotedObject* _promoHead;     // head of list of promoted objects
+  PromotedObject* _promoTail;     // tail of list of promoted objects
+  SpoolBlock*     _spoolHead;     // first spooling block
+  SpoolBlock*     _spoolTail;     // last  non-full spooling block or null
+  SpoolBlock*     _splice_point;  // when _spoolTail is null, holds list tail
+  SpoolBlock*     _spareSpool;    // free spool buffer
+  size_t          _firstIndex;    // first active index in
+                                  // first spooling block (_spoolHead)
+  size_t          _nextIndex;     // last active index + 1 in last
+                                  // spooling block (_spoolTail)
+ private:
+  // ensure that spooling space exists; return true if there is spooling space
+  bool ensure_spooling_space_work();
+
+ public:
+  PromotionInfo() :
+    _tracking(0), _space(NULL),
+    _promoHead(NULL), _promoTail(NULL),
+    _spoolHead(NULL), _spoolTail(NULL),
+    _spareSpool(NULL), _firstIndex(1),
+    _nextIndex(1) {}
+
+  bool noPromotions() const {
+    assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
+    return _promoHead == NULL;
+  }
+  void startTrackingPromotions();
+  void stopTrackingPromotions();
+  bool tracking() const          { return _tracking;  }
+  void track(PromotedObject* trackOop);      // keep track of a promoted oop
+  // The following variant must be used when trackOop is not fully
+  // initialized and has a NULL klass:
+  void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop
+  void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
+  CompactibleFreeListSpace* space() const     { return _space; }
+  markOop nextDisplacedHeader(); // get next header & forward spool pointer
+  void    saveDisplacedHeader(markOop hdr);
+                                 // save header and forward spool
+
+  inline size_t refillSize() const;
+
+  SpoolBlock* getSpoolBlock();   // return a free spooling block
+  inline bool has_spooling_space() {
+    return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
+  }
+  // ensure that spooling space exists
+  bool ensure_spooling_space() {
+    return has_spooling_space() || ensure_spooling_space_work();
+  }
+  #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix)  \
+    void promoted_oops_iterate##nv_suffix(OopClosureType* cl);
+  ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL)
+  #undef PROMOTED_OOPS_ITERATE_DECL
+  void promoted_oops_iterate(OopsInGenClosure* cl) {
+    promoted_oops_iterate_v(cl);
+  }
+  void verify()  const;
+  void reset() {
+    _promoHead = NULL;
+    _promoTail = NULL;
+    _spoolHead = NULL; 
+    _spoolTail = NULL;
+    _spareSpool = NULL;
+    _firstIndex = 0;
+    _nextIndex = 0;
+
+  }
+};
+
+class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
+ public:
+  LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 
+    _allocation_size_limit(0) {}
+  void set(HeapWord* ptr, size_t word_size, size_t refill_size, 
+    size_t allocation_size_limit) {
+    _ptr = ptr;
+    _word_size = word_size;
+    _refillSize = refill_size;
+    _allocation_size_limit = allocation_size_limit; 
+  }
+  HeapWord* _ptr;
+  size_t    _word_size;
+  size_t    _refillSize;
+  size_t    _allocation_size_limit;  // largest size that will be allocated
+};
+
+// Concrete subclass of CompactibleSpace that implements
+// a free list space, such as used in the concurrent mark sweep
+// generation.
+
+class CompactibleFreeListSpace: public CompactibleSpace {
+  friend class VMStructs;
+  friend class ConcurrentMarkSweepGeneration;
+  friend class ASConcurrentMarkSweepGeneration;
+  friend class CMSCollector;
+  friend class CMSPermGenGen;
+  // Local alloc buffer for promotion into this space.
+  friend class CFLS_LAB;   
+
+  // "Size" of chunks of work (executed during parallel remark phases
+  // of CMS collection); this probably belongs in CMSCollector, although
+  // it's cached here because it's used in
+  // initialize_sequential_subtasks_for_rescan() which modifies
+  // par_seq_tasks which also lives in Space. XXX
+  const size_t _rescan_task_size;
+  const size_t _marking_task_size;
+
+  // Yet another sequential tasks done structure. This supports
+  // CMS GC, where we have threads dynamically
+  // claiming sub-tasks from a larger parallel task.
+  SequentialSubTasksDone _conc_par_seq_tasks;
+
+  BlockOffsetArrayNonContigSpace _bt;
+
+  CMSCollector* _collector;
+  ConcurrentMarkSweepGeneration* _gen;
+
+  // Data structures for free blocks (used during allocation/sweeping)
+
+  // Allocation is done linearly from two different blocks depending on
+  // whether the request is small or large, in an effort to reduce
+  // fragmentation. We assume that any locking for allocation is done
+  // by the containing generation. Thus, none of the methods in this
+  // space are re-entrant.
+  enum SomeConstants {
+    SmallForLinearAlloc = 16,        // size < this then use _sLAB
+    SmallForDictionary  = 257,       // size < this then use _indexedFreeList
+    IndexSetSize        = SmallForDictionary,  // keep this odd-sized
+    IndexSetStart       = MinObjAlignment,
+    IndexSetStride      = MinObjAlignment
+  };
+
+ private:
+  enum FitStrategyOptions {
+    FreeBlockStrategyNone = 0,
+    FreeBlockBestFitFirst
+  };
+
+  PromotionInfo _promoInfo;
+
+  // helps to impose a global total order on freelistLock ranks;
+  // assumes that CFLSpace's are allocated in global total order
+  static int   _lockRank;
+
+  // a lock protecting the free lists and free blocks;
+  // mutable because of ubiquity of locking even for otherwise const methods
+  mutable Mutex _freelistLock; 
+  // locking verifier convenience function
+  void assert_locked() const PRODUCT_RETURN;
+
+  // Linear allocation blocks
+  LinearAllocBlock _smallLinearAllocBlock;
+
+  FreeBlockDictionary::DictionaryChoice _dictionaryChoice;
+  FreeBlockDictionary* _dictionary;    // ptr to dictionary for large size blocks
+
+  FreeList _indexedFreeList[IndexSetSize];
+                                       // indexed array for small size blocks
+  // allocation stategy
+  bool       _fitStrategy;      // Use best fit strategy.
+  bool	     _adaptive_freelists; // Use adaptive freelists
+
+  // This is an address close to the largest free chunk in the heap.
+  // It is currently assumed to be at the end of the heap.  Free
+  // chunks with addresses greater than nearLargestChunk are coalesced
+  // in an effort to maintain a large chunk at the end of the heap.
+  HeapWord*  _nearLargestChunk;
+
+  // Used to keep track of limit of sweep for the space
+  HeapWord* _sweep_limit;
+
+  // Support for compacting cms
+  HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
+  HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
+
+  // Initialization helpers.
+  void initializeIndexedFreeListArray();
+
+  // Extra stuff to manage promotion parallelism.
+
+  // a lock protecting the dictionary during par promotion allocation.
+  mutable Mutex _parDictionaryAllocLock;
+  Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
+
+  // Locks protecting the exact lists during par promotion allocation.
+  Mutex* _indexedFreeListParLocks[IndexSetSize];
+
+#if CFLS_LAB_REFILL_STATS
+  // Some statistics.
+  jint  _par_get_chunk_from_small;
+  jint  _par_get_chunk_from_large;
+#endif
+
+
+  // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
+  // required to be smaller than "IndexSetSize".)  If successful,
+  // adds them to "fl", which is required to be an empty free list.
+  // If the count of "fl" is negative, it's absolute value indicates a
+  // number of free chunks that had been previously "borrowed" from global
+  // list of size "word_sz", and must now be decremented.
+  void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl);
+
+  // Allocation helper functions
+  // Allocate using a strategy that takes from the indexed free lists
+  // first.  This allocation strategy assumes a companion sweeping
+  // strategy that attempts to keep the needed number of chunks in each
+  // indexed free lists.
+  HeapWord* allocate_adaptive_freelists(size_t size);
+  // Allocate from the linear allocation buffers first.  This allocation
+  // strategy assumes maximal coalescing can maintain chunks large enough
+  // to be used as linear allocation buffers.
+  HeapWord* allocate_non_adaptive_freelists(size_t size);
+
+  // Gets a chunk from the linear allocation block (LinAB).  If there 
+  // is not enough space in the LinAB, refills it.
+  HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
+  HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
+  // Get a chunk from the space remaining in the linear allocation block.  Do
+  // not attempt to refill if the space is not available, return NULL.  Do the
+  // repairs on the linear allocation block as appropriate.
+  HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
+  inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
+
+  // Helper function for getChunkFromIndexedFreeList.
+  // Replenish the indexed free list for this "size".  Do not take from an
+  // underpopulated size.
+  FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size);
+
+  // Get a chunk from the indexed free list.  If the indexed free list
+  // does not have a free chunk, try to replenish the indexed free list
+  // then get the free chunk from the replenished indexed free list.
+  inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
+
+  // The returned chunk may be larger than requested (or null).
+  FreeChunk* getChunkFromDictionary(size_t size);
+  // The returned chunk is the exact size requested (or null).
+  FreeChunk* getChunkFromDictionaryExact(size_t size);
+
+  // Find a chunk in the indexed free list that is the best
+  // fit for size "numWords".
+  FreeChunk* bestFitSmall(size_t numWords);
+  // For free list "fl" of chunks of size > numWords, 
+  // remove a chunk, split off a chunk of size numWords
+  // and return it.  The split off remainder is returned to
+  // the free lists.  The old name for getFromListGreater
+  // was lookInListGreater.
+  FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
+  // Get a chunk in the indexed free list or dictionary, 
+  // by considering a larger chunk and splitting it.
+  FreeChunk* getChunkFromGreater(size_t numWords);
+  //  Verify that the given chunk is in the indexed free lists.
+  bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
+  // Remove the specified chunk from the indexed free lists.
+  void       removeChunkFromIndexedFreeList(FreeChunk* fc);
+  // Remove the specified chunk from the dictionary.
+  void       removeChunkFromDictionary(FreeChunk* fc);
+  // Split a free chunk into a smaller free chunk of size "new_size".
+  // Return the smaller free chunk and return the remainder to the 
+  // free lists.
+  FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
+  // Add a chunk to the free lists.
+  void       addChunkToFreeLists(HeapWord* chunk, size_t size);
+  // Add a chunk to the free lists, preferring to suffix it
+  // to the last free chunk at end of space if possible, and
+  // updating the block census stats as well as block offset table.
+  // Take any locks as appropriate if we are multithreaded.
+  void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
+  // Add a free chunk to the indexed free lists.
+  void       returnChunkToFreeList(FreeChunk* chunk);
+  // Add a free chunk to the dictionary.
+  void       returnChunkToDictionary(FreeChunk* chunk);
+
+  // Functions for maintaining the linear allocation buffers (LinAB).
+  // Repairing a linear allocation block refers to operations
+  // performed on the remainder of a LinAB after an allocation
+  // has been made from it.
+  void       repairLinearAllocationBlocks();
+  void       repairLinearAllocBlock(LinearAllocBlock* blk);
+  void       refillLinearAllocBlock(LinearAllocBlock* blk);
+  void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
+  void       refillLinearAllocBlocksIfNeeded();
+
+  void       verify_objects_initialized() const;
+  
+  // Statistics reporting helper functions
+  void       reportFreeListStatistics() const;
+  void       reportIndexedFreeListStatistics() const;
+  size_t     maxChunkSizeInIndexedFreeLists() const;
+  size_t     numFreeBlocksInIndexedFreeLists() const;
+  // Accessor
+  HeapWord* unallocated_block() const {
+    HeapWord* ub = _bt.unallocated_block();
+    assert(ub >= bottom() &&
+           ub <= end(), "space invariant");
+    return ub;
+  }
+  void freed(HeapWord* start, size_t size) {
+    _bt.freed(start, size);
+  }
+
+ protected:
+  // reset the indexed free list to its initial empty condition.
+  void resetIndexedFreeListArray();
+  // reset to an initial state with a single free block described
+  // by the MemRegion parameter.
+  void reset(MemRegion mr);
+  // Return the total number of words in the indexed free lists.
+  size_t     totalSizeInIndexedFreeLists() const;
+
+ public:
+  // Constructor...
+  CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
+			   bool use_adaptive_freelists,
+                           FreeBlockDictionary::DictionaryChoice);
+  // accessors
+  bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
+  FreeBlockDictionary* dictionary() const { return _dictionary; }
+  HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
+  void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
+
+  // Return the free chunk at the end of the space.  If no such
+  // chunk exists, return NULL.
+  FreeChunk* find_chunk_at_end();
+
+  bool adaptive_freelists() { return _adaptive_freelists; }
+
+  void set_collector(CMSCollector* collector) { _collector = collector; }
+
+  // Support for parallelization of rescan and marking
+  const size_t rescan_task_size()  const { return _rescan_task_size;  }
+  const size_t marking_task_size() const { return _marking_task_size; }
+  SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
+  void initialize_sequential_subtasks_for_rescan(int n_threads);
+  void initialize_sequential_subtasks_for_marking(int n_threads,
+         HeapWord* low = NULL);
+
+#if CFLS_LAB_REFILL_STATS
+  void print_par_alloc_stats();
+#endif
+
+  // Space enquiries
+  size_t used() const;
+  size_t free() const;
+  size_t max_alloc_in_words() const;
+  // XXX: should have a less conservative used_region() than that of
+  // Space; we could consider keeping track of highest allocated
+  // address and correcting that at each sweep, as the sweeper
+  // goes through the entire allocated part of the generation. We
+  // could also use that information to keep the sweeper from
+  // sweeping more than is necessary. The allocator and sweeper will
+  // of course need to synchronize on this, since the sweeper will
+  // try to bump down the address and the allocator will try to bump it up.
+  // For now, however, we'll just use the default used_region()
+  // which overestimates the region by returning the entire
+  // committed region (this is safe, but inefficient).
+
+  // Returns a subregion of the space containing all the objects in
+  // the space.
+  MemRegion used_region() const {
+    return MemRegion(bottom(),
+                     BlockOffsetArrayUseUnallocatedBlock ?
+                     unallocated_block() : end());
+  }
+
+  // This is needed because the default implementation uses block_start()
+  // which can;t be used at certain times (for example phase 3 of mark-sweep).
+  // A better fix is to change the assertions in phase 3 of mark-sweep to
+  // use is_in_reserved(), but that is deferred since the is_in() assertions
+  // are buried through several layers of callers and are used elsewhere
+  // as well.
+  bool is_in(const void* p) const {
+    return used_region().contains(p);
+  }
+    
+  virtual bool is_free_block(const HeapWord* p) const;
+
+  // Resizing support
+  void set_end(HeapWord* value);  // override
+
+  // mutual exclusion support
+  Mutex* freelistLock() const { return &_freelistLock; }
+
+  // Iteration support
+  void oop_iterate(MemRegion mr, OopClosure* cl);
+  void oop_iterate(OopClosure* cl);
+
+  void object_iterate(ObjectClosure* blk);
+  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
+
+  // Requires that "mr" be entirely within the space.
+  // Apply "cl->do_object" to all objects that intersect with "mr".
+  // If the iteration encounters an unparseable portion of the region,
+  // terminate the iteration and return the address of the start of the
+  // subregion that isn't done.  Return of "NULL" indicates that the
+  // interation completed.
+  virtual HeapWord*
+       object_iterate_careful_m(MemRegion mr,
+                                ObjectClosureCareful* cl);
+  virtual HeapWord*
+       object_iterate_careful(ObjectClosureCareful* cl);
+
+  // Override: provides a DCTO_CL specific to this kind of space.
+  DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
+				     CardTableModRefBS::PrecisionStyle precision,
+				     HeapWord* boundary);
+
+  void blk_iterate(BlkClosure* cl);
+  void blk_iterate_careful(BlkClosureCareful* cl);
+  HeapWord* block_start(const void* p) const;
+  HeapWord* block_start_careful(const void* p) const;
+  size_t block_size(const HeapWord* p) const;
+  size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
+  bool block_is_obj(const HeapWord* p) const;
+  bool obj_is_alive(const HeapWord* p) const;
+  size_t block_size_nopar(const HeapWord* p) const;
+  bool block_is_obj_nopar(const HeapWord* p) const;
+
+  // iteration support for promotion
+  void save_marks();
+  bool no_allocs_since_save_marks();
+  void object_iterate_since_last_GC(ObjectClosure* cl);
+
+  // iteration support for sweeping
+  void save_sweep_limit() {
+    _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
+                   unallocated_block() : end();
+  }
+  NOT_PRODUCT(
+    void clear_sweep_limit() { _sweep_limit = NULL; }
+  )
+  HeapWord* sweep_limit() { return _sweep_limit; }
+
+  // Apply "blk->do_oop" to the addresses of all reference fields in objects
+  // promoted into this generation since the most recent save_marks() call.
+  // Fields in objects allocated by applications of the closure
+  // *are* included in the iteration. Thus, when the iteration completes
+  // there should be no further such objects remaining.
+  #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
+    void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
+  ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
+  #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
+
+  // Allocation support
+  HeapWord* allocate(size_t size);
+  HeapWord* par_allocate(size_t size);
+
+  oop       promote(oop obj, size_t obj_size, oop* ref);
+  void      gc_prologue();
+  void      gc_epilogue();
+
+  // This call is used by a containing CMS generation / collector
+  // to inform the CFLS space that a sweep has been completed
+  // and that the space can do any related house-keeping functions.
+  void      sweep_completed();
+
+  // For an object in this space, the mark-word's two
+  // LSB's having the value [11] indicates that it has been
+  // promoted since the most recent call to save_marks() on
+  // this generation and has not subsequently been iterated
+  // over (using oop_since_save_marks_iterate() above).
+  bool obj_allocated_since_save_marks(const oop obj) const {
+    assert(is_in_reserved(obj), "Wrong space?");
+    return ((PromotedObject*)obj)->hasPromotedMark();
+  }
+
+  // A worst-case estimate of the space required (in HeapWords) to expand the
+  // heap when promoting an obj of size obj_size.
+  size_t expansionSpaceRequired(size_t obj_size) const;
+
+  FreeChunk* allocateScratch(size_t size);
+
+  // returns true if either the small or large linear allocation buffer is empty.
+  bool       linearAllocationWouldFail();
+
+  // Adjust the chunk for the minimum size.  This version is called in
+  // most cases in CompactibleFreeListSpace methods.
+  inline static size_t adjustObjectSize(size_t size) {
+    return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
+  }
+  // This is a virtual version of adjustObjectSize() that is called
+  // only occasionally when the compaction space changes and the type
+  // of the new compaction space is is only known to be CompactibleSpace.
+  size_t adjust_object_size_v(size_t size) const {
+    return adjustObjectSize(size);
+  }
+  // Minimum size of a free block.
+  virtual size_t minimum_free_block_size() const { return MinChunkSize; }
+  void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
+  void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
+              bool coalesced);
+
+  // Support for compaction
+  void prepare_for_compaction(CompactPoint* cp);
+  void adjust_pointers();
+  void compact();
+  // reset the space to reflect the fact that a compaction of the
+  // space has been done.
+  virtual void reset_after_compaction();
+
+  // Debugging support
+  void print()                            const;
+  void prepare_for_verify();
+  void verify(bool allow_dirty)           const;
+  void verifyFreeLists()                  const PRODUCT_RETURN;
+  void verifyIndexedFreeLists()           const;
+  void verifyIndexedFreeList(size_t size) const;
+  // verify that the given chunk is in the free lists.
+  bool verifyChunkInFreeLists(FreeChunk* fc) const;
+  // Do some basic checks on the the free lists.
+  void checkFreeListConsistency()	  const PRODUCT_RETURN;
+
+  NOT_PRODUCT (
+    void initializeIndexedFreeListArrayReturnedBytes();
+    size_t sumIndexedFreeListArrayReturnedBytes();
+    // Return the total number of chunks in the indexed free lists.
+    size_t totalCountInIndexedFreeLists() const;
+    // Return the total numberof chunks in the space.
+    size_t totalCount();
+  )
+
+  // The census consists of counts of the quantities such as
+  // the current count of the free chunks, number of chunks
+  // created as a result of the split of a larger chunk or
+  // coalescing of smaller chucks, etc.  The counts in the 
+  // census is used to make decisions on splitting and
+  // coalescing of chunks during the sweep of garbage.
+
+  // Print the statistics for the free lists.
+  void printFLCensus(int sweepCt)	  const;
+
+  // Statistics functions
+  // Initialize census for lists before the sweep.
+  void beginSweepFLCensus(float sweep_current,
+                          float sweep_estimate);
+  // Set the surplus for each of the free lists.
+  void setFLSurplus();
+  // Set the hint for each of the free lists.
+  void setFLHints();
+  // Clear the census for each of the free lists.
+  void clearFLCensus();
+  // Perform functions for the census after the end of the sweep.
+  void endSweepFLCensus(int sweepCt);
+  // Return true if the count of free chunks is greater
+  // than the desired number of free chunks.
+  bool coalOverPopulated(size_t size);
+
+
+// Record (for each size):
+// 
+//   split-births = #chunks added due to splits in (prev-sweep-end, 
+// 	this-sweep-start)
+//   split-deaths = #chunks removed for splits in (prev-sweep-end, 
+// 	this-sweep-start)
+//   num-curr     = #chunks at start of this sweep
+//   num-prev     = #chunks at end of previous sweep
+// 
+// The above are quantities that are measured. Now define:
+// 
+//   num-desired := num-prev + split-births - split-deaths - num-curr
+// 
+// Roughly, num-prev + split-births is the supply,
+// split-deaths is demand due to other sizes
+// and num-curr is what we have left.
+// 
+// Thus, num-desired is roughly speaking the "legitimate demand"
+// for blocks of this size and what we are striving to reach at the
+// end of the current sweep.
+// 
+// For a given list, let num-len be its current population.
+// Define, for a free list of a given size:
+// 
+//   coal-overpopulated := num-len >= num-desired * coal-surplus
+// (coal-surplus is set to 1.05, i.e. we allow a little slop when
+// coalescing -- we do not coalesce unless we think that the current
+// supply has exceeded the estimated demand by more than 5%).
+// 
+// For the set of sizes in the binary tree, which is neither dense nor
+// closed, it may be the case that for a particular size we have never
+// had, or do not now have, or did not have at the previous sweep,
+// chunks of that size. We need to extend the definition of
+// coal-overpopulated to such sizes as well:
+// 
+//   For a chunk in/not in the binary tree, extend coal-overpopulated
+//   defined above to include all sizes as follows:
+// 
+//   . a size that is non-existent is coal-overpopulated
+//   . a size that has a num-desired <= 0 as defined above is
+//     coal-overpopulated.  
+// 
+// Also define, for a chunk heap-offset C and mountain heap-offset M:
+// 
+//   close-to-mountain := C >= 0.99 * M
+// 
+// Now, the coalescing strategy is:
+// 
+//    Coalesce left-hand chunk with right-hand chunk if and
+//    only if:
+// 
+//      EITHER
+//        . left-hand chunk is of a size that is coal-overpopulated
+//      OR
+//        . right-hand chunk is close-to-mountain
+  void smallCoalBirth(size_t size);
+  void smallCoalDeath(size_t size);
+  void coalBirth(size_t size);
+  void coalDeath(size_t size);
+  void smallSplitBirth(size_t size);
+  void smallSplitDeath(size_t size);
+  void splitBirth(size_t size);
+  void splitDeath(size_t size);
+  void split(size_t from, size_t to1);
+
+  double flsFrag() const;
+};
+
+// A parallel-GC-thread-local allocation buffer for allocation into a
+// CompactibleFreeListSpace.
+class CFLS_LAB : public CHeapObj {
+  // The space that this buffer allocates into.
+  CompactibleFreeListSpace* _cfls;
+
+  // Our local free lists.
+  FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
+
+  // Initialized from a command-line arg.
+  size_t _blocks_to_claim;
+
+#if CFLS_LAB_REFILL_STATS
+  // Some statistics.
+  int _refills;
+  int _blocksTaken;
+  static int _tot_refills;
+  static int _tot_blocksTaken;
+  static int _next_threshold;
+#endif
+
+public:
+  CFLS_LAB(CompactibleFreeListSpace* cfls);
+
+  // Allocate and return a block of the given size, or else return NULL.
+  HeapWord* alloc(size_t word_sz);
+
+  // Return any unused portions of the buffer to the global pool.
+  void retire();
+};
+
+size_t PromotionInfo::refillSize() const {
+  const size_t CMSSpoolBlockSize = 256;
+  const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
+                                   * CMSSpoolBlockSize);
+  return CompactibleFreeListSpace::adjustObjectSize(sz);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// CopyrightVersion 1.2
+
+# include "incls/_precompiled.incl"
+# include "incls/_concurrentGCThread.cpp.incl"
+
+bool ConcurrentGCThread::_should_terminate    = false;
+bool ConcurrentGCThread::_has_terminated      = false;
+int  ConcurrentGCThread::_CGC_flag            = CGC_nil;
+
+SuspendibleThreadSet ConcurrentGCThread::_sts;
+
+ConcurrentGCThread::ConcurrentGCThread() {
+  _sts.initialize();
+};
+
+void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) {
+  MutexLockerEx x(Heap_lock,
+                  Mutex::_no_safepoint_check_flag);
+  // warning("CGC: about to try stopping world");
+  SafepointSynchronize::begin();
+  // warning("CGC: successfully stopped world");
+  op->do_void();
+  SafepointSynchronize::end();
+  // warning("CGC: successfully restarted world");
+}
+
+void ConcurrentGCThread::safepoint_synchronize() {
+  _sts.suspend_all();
+}
+
+void ConcurrentGCThread::safepoint_desynchronize() {
+  _sts.resume_all();
+}
+
+void ConcurrentGCThread::create_and_start() {
+  if (os::create_thread(this, os::cgc_thread)) {
+    // XXX: need to set this to low priority
+    // unless "agressive mode" set; priority
+    // should be just less than that of VMThread.
+    os::set_priority(this, NearMaxPriority);
+    if (!_should_terminate && !DisableStartThread) {
+      os::start_thread(this);
+    }
+  }
+}
+
+void ConcurrentGCThread::initialize_in_thread() {
+  this->record_stack_base_and_size();
+  this->initialize_thread_local_storage();
+  this->set_active_handles(JNIHandleBlock::allocate_block());
+  // From this time Thread::current() should be working.
+  assert(this == Thread::current(), "just checking");
+}
+
+void ConcurrentGCThread::wait_for_universe_init() {
+  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+  while (!is_init_completed() && !_should_terminate) {
+    CGC_lock->wait(Mutex::_no_safepoint_check_flag, 200);
+  }
+}
+
+void ConcurrentGCThread::terminate() {
+  // Signal that it is terminated
+  {
+    MutexLockerEx mu(Terminator_lock,
+                     Mutex::_no_safepoint_check_flag);
+    _has_terminated = true;
+    Terminator_lock->notify();
+  }
+
+  // Thread destructor usually does this..
+  ThreadLocalStorage::set_thread(NULL);
+}
+
+
+void SuspendibleThreadSet::initialize_work() {
+  MutexLocker x(STS_init_lock);
+  if (!_initialized) {
+    _m             = new Monitor(Mutex::leaf,
+				 "SuspendibleThreadSetLock", true);
+    _async         = 0;
+    _async_stop    = false;
+    _async_stopped = 0;
+    _initialized   = true;
+  }
+}
+
+void SuspendibleThreadSet::join() {
+  initialize();
+  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
+  while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
+  _async++;
+  assert(_async > 0, "Huh.");
+}
+
+void SuspendibleThreadSet::leave() {
+  assert(_initialized, "Must be initialized.");
+  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
+  _async--;
+  assert(_async >= 0, "Huh.");
+  if (_async_stop) _m->notify_all();
+}
+
+void SuspendibleThreadSet::yield(const char* id) {
+  assert(_initialized, "Must be initialized.");
+  if (_async_stop) {
+    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
+    if (_async_stop) {
+      _async_stopped++;
+      assert(_async_stopped > 0, "Huh.");
+      if (_async_stopped == _async) {
+	if (ConcGCYieldTimeout > 0) {
+	  double now = os::elapsedTime();
+	  guarantee((now - _suspend_all_start) * 1000.0 <
+		    (double)ConcGCYieldTimeout,
+		    "Long delay; whodunit?");
+	}
+      }
+      _m->notify_all();
+      while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
+      _async_stopped--;
+      assert(_async >= 0, "Huh");
+      _m->notify_all();
+    }
+  }
+}
+
+void SuspendibleThreadSet::suspend_all() {
+  initialize();  // If necessary.
+  if (ConcGCYieldTimeout > 0) {
+    _suspend_all_start = os::elapsedTime();
+  }
+  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
+  assert(!_async_stop, "Only one at a time.");
+  _async_stop = true;
+  while (_async_stopped < _async) _m->wait(Mutex::_no_safepoint_check_flag);
+}
+
+void SuspendibleThreadSet::resume_all() {
+  assert(_initialized, "Must be initialized.");
+  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
+  assert(_async_stopped == _async, "Huh.");
+  _async_stop = false;
+  _m->notify_all();
+}
+
+static void _sltLoop(JavaThread* thread, TRAPS) {
+  SurrogateLockerThread* slt = (SurrogateLockerThread*)thread;
+  slt->loop();
+}
+
+SurrogateLockerThread::SurrogateLockerThread() :
+  JavaThread(&_sltLoop),
+  _monitor(Mutex::nonleaf, "SLTMonitor"),
+  _buffer(empty)
+{}
+
+SurrogateLockerThread* SurrogateLockerThread::make(TRAPS) {
+  klassOop k =
+    SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(),
+                                      true, CHECK_NULL);
+  instanceKlassHandle klass (THREAD, k);
+  instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL);
+
+  const char thread_name[] = "Surrogate Locker Thread (CMS)";
+  Handle string = java_lang_String::create_from_str(thread_name, CHECK_NULL);
+
+  // Initialize thread_oop to put it into the system threadGroup
+  Handle thread_group (THREAD, Universe::system_thread_group());
+  JavaValue result(T_VOID);
+  JavaCalls::call_special(&result, thread_oop,
+			  klass,
+			  vmSymbolHandles::object_initializer_name(),
+			  vmSymbolHandles::threadgroup_string_void_signature(),
+			  thread_group,
+			  string,
+			  CHECK_NULL);
+
+  SurrogateLockerThread* res;
+  {
+    MutexLocker mu(Threads_lock);
+    res = new SurrogateLockerThread();
+
+    // At this point it may be possible that no osthread was created for the
+    // JavaThread due to lack of memory. We would have to throw an exception
+    // in that case. However, since this must work and we do not allow
+    // exceptions anyway, check and abort if this fails.
+    if (res == NULL || res->osthread() == NULL) {
+      vm_exit_during_initialization("java.lang.OutOfMemoryError",
+                                    "unable to create new native thread");
+    }
+    java_lang_Thread::set_thread(thread_oop(), res);
+    java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+    java_lang_Thread::set_daemon(thread_oop());
+
+    res->set_threadObj(thread_oop());
+    Threads::add(res);
+    Thread::start(res);
+  }
+  os::yield(); // This seems to help with initial start-up of SLT
+  return res;
+}
+
+void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) {
+  MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
+  assert(_buffer == empty, "Should be empty");
+  assert(msg != empty, "empty message");
+  _buffer = msg;
+  while (_buffer != empty) {
+    _monitor.notify();
+    _monitor.wait(Mutex::_no_safepoint_check_flag);
+  }
+}
+
+// ======= Surrogate Locker Thread =============
+
+void SurrogateLockerThread::loop() {
+  BasicLock pll_basic_lock;
+  SLT_msg_type msg;
+  debug_only(unsigned int owned = 0;)
+
+  while (/* !isTerminated() */ 1) {
+    {
+      MutexLocker x(&_monitor);
+      // Since we are a JavaThread, we can't be here at a safepoint.
+      assert(!SafepointSynchronize::is_at_safepoint(),
+             "SLT is a JavaThread");
+      // wait for msg buffer to become non-empty
+      while (_buffer == empty) {
+        _monitor.notify();
+        _monitor.wait();
+      }
+      msg = _buffer;
+    }
+    switch(msg) {
+      case acquirePLL: {
+        instanceRefKlass::acquire_pending_list_lock(&pll_basic_lock);
+        debug_only(owned++;)
+        break;
+      }
+      case releaseAndNotifyPLL: {
+        assert(owned > 0, "Don't have PLL");
+        instanceRefKlass::release_and_notify_pending_list_lock(&pll_basic_lock);
+        debug_only(owned--;)
+        break;
+      }
+      case empty:
+      default: {
+        guarantee(false,"Unexpected message in _buffer");
+        break;
+      }
+    }
+    {
+      MutexLocker x(&_monitor);
+      // Since we are a JavaThread, we can't be here at a safepoint.
+      assert(!SafepointSynchronize::is_at_safepoint(),
+             "SLT is a JavaThread");
+      _buffer = empty;
+      _monitor.notify();
+    }
+  }
+  assert(!_monitor.owned_by_self(), "Should unlock before exit.");
+}
+
+
+// ===== STS Access From Outside CGCT =====
+
+void ConcurrentGCThread::stsYield(const char* id) {
+  assert( Thread::current()->is_ConcurrentGC_thread(),
+	  "only a conc GC thread can call this" );
+  _sts.yield(id);
+}
+
+bool ConcurrentGCThread::stsShouldYield() {
+  assert( Thread::current()->is_ConcurrentGC_thread(),
+	  "only a conc GC thread can call this" );
+  return _sts.should_yield();
+}
+
+void ConcurrentGCThread::stsJoin() {
+  assert( Thread::current()->is_ConcurrentGC_thread(),
+	  "only a conc GC thread can call this" );
+  _sts.join();
+}
+
+void ConcurrentGCThread::stsLeave() {
+  assert( Thread::current()->is_ConcurrentGC_thread(),
+	  "only a conc GC thread can call this" );
+  _sts.leave();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentGCThread.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class VoidClosure;
+
+// A SuspendibleThreadSet is (obviously) a set of threads that can be
+// suspended.  A thread can join and later leave the set, and periodically
+// yield.  If some thread (not in the set) requests, via suspend_all, that
+// the threads be suspended, then the requesting thread is blocked until
+// all the threads in the set have yielded or left the set.  (Threads may
+// not enter the set when an attempted suspension is in progress.)  The
+// suspending thread later calls resume_all, allowing the suspended threads
+// to continue.
+
+class SuspendibleThreadSet {
+  Monitor* _m;
+  int      _async;
+  bool     _async_stop;
+  int      _async_stopped;
+  bool     _initialized;
+  double   _suspend_all_start;
+
+  void initialize_work();
+
+ public:
+  SuspendibleThreadSet() : _initialized(false) {}
+
+  // Add the current thread to the set.  May block if a suspension
+  // is in progress.    
+  void join();
+  // Removes the current thread from the set.
+  void leave();
+  // Returns "true" iff an suspension is in progress.
+  bool should_yield() { return _async_stop; }
+  // Suspends the current thread if a suspension is in progress (for
+  // the duration of the suspension.)
+  void yield(const char* id);
+  // Return when all threads in the set are suspended.
+  void suspend_all();
+  // Allow suspended threads to resume.
+  void resume_all();
+  // Redundant initializations okay.
+  void initialize() {
+    // Double-check dirty read idiom.
+    if (!_initialized) initialize_work();
+  }
+};
+
+
+class ConcurrentGCThread: public NamedThread {
+  friend class VMStructs;
+
+protected:
+  static bool _should_terminate;
+  static bool _has_terminated;
+
+  enum CGC_flag_type {
+    CGC_nil           = 0x0,
+    CGC_dont_suspend  = 0x1,
+    CGC_CGC_safepoint = 0x2,
+    CGC_VM_safepoint  = 0x4
+  };
+
+  static int _CGC_flag;
+
+  static bool CGC_flag_is_set(int b)       { return (_CGC_flag & b) != 0; }
+  static int set_CGC_flag(int b)           { return _CGC_flag |= b; }
+  static int reset_CGC_flag(int b)         { return _CGC_flag &= ~b; }
+
+  void stopWorldAndDo(VoidClosure* op);
+
+  // All instances share this one set.
+  static SuspendibleThreadSet _sts;
+
+  // Create and start the thread (setting it's priority high.)
+  void create_and_start();
+
+  // Do initialization steps in the thread: record stack base and size,
+  // init thread local storage, set JNI handle block.
+  void initialize_in_thread();
+
+  // Wait until Universe::is_fully_initialized();
+  void wait_for_universe_init();
+
+  // Record that the current thread is terminating, and will do more
+  // concurrent work.
+  void terminate();
+
+public:
+  // Constructor
+
+  ConcurrentGCThread();
+  ~ConcurrentGCThread() {} // Exists to call NamedThread destructor.
+  
+  // Tester
+  bool is_ConcurrentGC_thread() const          { return true;       }
+
+  static void safepoint_synchronize();
+  static void safepoint_desynchronize();
+
+  // All overridings should probably do _sts::yield, but we allow
+  // overriding for distinguished debugging messages.  Default is to do
+  // nothing.
+  virtual void yield() {}
+
+  bool should_yield() { return _sts.should_yield(); }
+
+  // they are prefixed by sts since there are already yield() and
+  // should_yield() (non-static) methods in this class and it was an
+  // easy way to differentiate them.
+  static void stsYield(const char* id);
+  static bool stsShouldYield();
+  static void stsJoin();
+  static void stsLeave();
+
+};
+
+// The SurrogateLockerThread is used by concurrent GC threads for
+// manipulating Java monitors, in particular, currently for
+// manipulating the pending_list_lock. XXX
+class SurrogateLockerThread: public JavaThread {
+  friend class VMStructs;
+ public:
+  enum SLT_msg_type {
+    empty = 0,           // no message
+    acquirePLL,          // acquire pending list lock
+    releaseAndNotifyPLL  // notify and release pending list lock
+  };
+ private:
+  // the following are shared with the CMSThread
+  SLT_msg_type  _buffer;  // communication buffer
+  Monitor       _monitor; // monitor controlling buffer
+  BasicLock     _basicLock; // used for PLL locking
+  
+ public:
+  static SurrogateLockerThread* make(TRAPS);
+
+  SurrogateLockerThread();
+
+  void loop(); // main method
+
+  void manipulatePLL(SLT_msg_type msg);
+
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,8718 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)concurrentMarkSweepGeneration.cpp	1.286 07/05/17 15:52:02 JVM"
+#endif
+/*
+ * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_concurrentMarkSweepGeneration.cpp.incl"
+
+// statics
+CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
+bool          CMSCollector::_full_gc_requested          = false;
+
+//////////////////////////////////////////////////////////////////
+// In support of CMS/VM thread synchronization
+//////////////////////////////////////////////////////////////////
+// We split use of the CGC_lock into 2 "levels".
+// The low-level locking is of the usual CGC_lock monitor. We introduce
+// a higher level "token" (hereafter "CMS token") built on top of the
+// low level monitor (hereafter "CGC lock").
+// The token-passing protocol gives priority to the VM thread. The
+// CMS-lock doesn't provide any fairness guarantees, but clients
+// should ensure that it is only held for very short, bounded
+// durations.
+// 
+// When either of the CMS thread or the VM thread is involved in
+// collection operations during which it does not want the other
+// thread to interfere, it obtains the CMS token.
+// 
+// If either thread tries to get the token while the other has
+// it, that thread waits. However, if the VM thread and CMS thread
+// both want the token, then the VM thread gets priority while the
+// CMS thread waits. This ensures, for instance, that the "concurrent"
+// phases of the CMS thread's work do not block out the VM thread
+// for long periods of time as the CMS thread continues to hog
+// the token. (See bug 4616232).
+// 
+// The baton-passing functions are, however, controlled by the
+// flags _foregroundGCShouldWait and _foregroundGCIsActive,
+// and here the low-level CMS lock, not the high level token,
+// ensures mutual exclusion.
+// 
+// Two important conditions that we have to satisfy:
+// 1. if a thread does a low-level wait on the CMS lock, then it
+//    relinquishes the CMS token if it were holding that token
+//    when it acquired the low-level CMS lock.
+// 2. any low-level notifications on the low-level lock
+//    should only be sent when a thread has relinquished the token.
+// 
+// In the absence of either property, we'd have potential deadlock.
+// 
+// We protect each of the CMS (concurrent and sequential) phases
+// with the CMS _token_, not the CMS _lock_.
+// 
+// The only code protected by CMS lock is the token acquisition code
+// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
+// baton-passing code.
+// 
+// Unfortunately, i couldn't come up with a good abstraction to factor and
+// hide the naked CGC_lock manipulation in the baton-passing code
+// further below. That's something we should try to do. Also, the proof
+// of correctness of this 2-level locking scheme is far from obvious,
+// and potentially quite slippery. We have an uneasy supsicion, for instance,
+// that there may be a theoretical possibility of delay/starvation in the
+// low-level lock/wait/notify scheme used for the baton-passing because of
+// potential intereference with the priority scheme embodied in the
+// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
+// invocation further below and marked with "XXX 20011219YSR".
+// Indeed, as we note elsewhere, this may become yet more slippery
+// in the presence of multiple CMS and/or multiple VM threads. XXX
+
+class CMSTokenSync: public StackObj {
+ private:
+  bool _is_cms_thread;
+ public:
+  CMSTokenSync(bool is_cms_thread):
+    _is_cms_thread(is_cms_thread) {
+    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
+           "Incorrect argument to constructor");
+    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
+  }
+
+  ~CMSTokenSync() {
+    assert(_is_cms_thread ?
+             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
+             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
+          "Incorrect state");
+    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
+  }
+};
+
+// Convenience class that does a CMSTokenSync, and then acquires
+// upto three locks.
+class CMSTokenSyncWithLocks: public CMSTokenSync {
+ private:
+  // Note: locks are acquired in textual declaration order
+  // and released in the opposite order
+  MutexLockerEx _locker1, _locker2, _locker3;
+ public:
+  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
+                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
+    CMSTokenSync(is_cms_thread),
+    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
+    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
+    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
+  { }
+};
+
+
+// Wrapper class to temporarily disable icms during a foreground cms collection.
+class ICMSDisabler: public StackObj {
+ public:
+  // The ctor disables icms and wakes up the thread so it notices the change;
+  // the dtor re-enables icms.  Note that the CMSCollector methods will check
+  // CMSIncrementalMode.
+  ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
+  ~ICMSDisabler() { CMSCollector::enable_icms(); }
+};
+
+//////////////////////////////////////////////////////////////////
+//  Concurrent Mark-Sweep Generation /////////////////////////////
+//////////////////////////////////////////////////////////////////
+
+NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
+
+// This struct contains per-thread things necessary to support parallel
+// young-gen collection.
+class CMSParGCThreadState: public CHeapObj {
+ public:
+  CFLS_LAB lab;
+  PromotionInfo promo;
+
+  // Constructor.
+  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
+    promo.setSpace(cfls);
+  }
+};
+
+ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
+     ReservedSpace rs, size_t initial_byte_size, int level,
+     CardTableRS* ct, bool use_adaptive_freelists,
+     FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
+  CardGeneration(rs, initial_byte_size, level, ct),
+  _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
+  _debug_collection_type(Concurrent_collection_type)
+{
+  HeapWord* bottom = (HeapWord*) _virtual_space.low();
+  HeapWord* end    = (HeapWord*) _virtual_space.high();
+
+  _direct_allocated_words = 0;
+  NOT_PRODUCT(
+    _numObjectsPromoted = 0;
+    _numWordsPromoted = 0;
+    _numObjectsAllocated = 0;
+    _numWordsAllocated = 0;
+  )
+
+  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
+                                           use_adaptive_freelists,
+					   dictionaryChoice);
+  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
+  if (_cmsSpace == NULL) {
+    vm_exit_during_initialization(
+      "CompactibleFreeListSpace allocation failure");
+  }
+  _cmsSpace->_gen = this;
+
+  _gc_stats = new CMSGCStats();
+
+  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
+  // offsets match. The ability to tell free chunks from objects
+  // depends on this property.
+  debug_only(
+    FreeChunk* junk = NULL;
+    assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
+           "Offset of FreeChunk::_prev within FreeChunk must match"
+           "  that of OopDesc::_klass within OopDesc");
+  )
+  if (ParallelGCThreads > 0) {
+    typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
+    _par_gc_thread_states =
+      NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
+    if (_par_gc_thread_states == NULL) {
+      vm_exit_during_initialization("Could not allocate par gc structs");
+    }
+    for (uint i = 0; i < ParallelGCThreads; i++) {
+      _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
+      if (_par_gc_thread_states[i] == NULL) { 
+        vm_exit_during_initialization("Could not allocate par gc structs");
+      }
+    }
+  } else {
+    _par_gc_thread_states = NULL;
+  }
+  _incremental_collection_failed = false;
+  // The "dilatation_factor" is the expansion that can occur on
+  // account of the fact that the minimum object size in the CMS
+  // generation may be larger than that in, say, a contiguous young
+  //  generation.
+  // Ideally, in the calculation below, we'd compute the dilatation
+  // factor as: MinChunkSize/(promoting_gen's min object size)
+  // Since we do not have such a general query interface for the
+  // promoting generation, we'll instead just use the mimimum
+  // object size (which today is a header's worth of space);
+  // note that all arithmetic is in units of HeapWords.
+  assert(MinChunkSize >= oopDesc::header_size(), "just checking");
+  assert(_dilatation_factor >= 1.0, "from previous assert");
+}
+
+void ConcurrentMarkSweepGeneration::ref_processor_init() {
+  assert(collector() != NULL, "no collector");
+  collector()->ref_processor_init();
+}
+
+void CMSCollector::ref_processor_init() {
+  if (_ref_processor == NULL) {
+    // Allocate and initialize a reference processor
+    _ref_processor = ReferenceProcessor::create_ref_processor(
+        _span,                               // span
+        _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
+        _cmsGen->refs_discovery_is_mt(),     // mt_discovery
+        &_is_alive_closure,
+        ParallelGCThreads,
+        ParallelRefProcEnabled);
+    // Initialize the _ref_processor field of CMSGen
+    _cmsGen->set_ref_processor(_ref_processor);
+
+    // Allocate a dummy ref processor for perm gen.
+    ReferenceProcessor* rp2 = new ReferenceProcessor();
+    if (rp2 == NULL) {
+      vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
+    }
+    _permGen->set_ref_processor(rp2);
+  }
+}
+
+CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
+    "Wrong type of heap");
+  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
+    gch->gen_policy()->size_policy();
+  assert(sp->is_gc_cms_adaptive_size_policy(),
+    "Wrong type of size policy");
+  return sp;
+}
+
+CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
+  CMSGCAdaptivePolicyCounters* results = 
+    (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
+  assert(
+    results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
+    "Wrong gc policy counter kind");
+  return results;
+}
+
+
+void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
+
+  const char* gen_name = "old";
+
+  // Generation Counters - generation 1, 1 subspace
+  _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
+
+  _space_counters = new GSpaceCounters(gen_name, 0,
+                                       _virtual_space.reserved_size(),
+                                       this, _gen_counters);
+}
+
+CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
+  _cms_gen(cms_gen)
+{
+  assert(alpha <= 100, "bad value");
+  _saved_alpha = alpha;
+
+  // Initialize the alphas to the bootstrap value of 100.
+  _gc0_alpha = _cms_alpha = 100;
+
+  _cms_begin_time.update();
+  _cms_end_time.update();
+
+  _gc0_duration = 0.0;
+  _gc0_period = 0.0;
+  _gc0_promoted = 0;
+
+  _cms_duration = 0.0;
+  _cms_period = 0.0;
+  _cms_allocated = 0;
+
+  _cms_used_at_gc0_begin = 0;
+  _cms_used_at_gc0_end = 0;
+  _allow_duty_cycle_reduction = false;
+  _valid_bits = 0;
+  _icms_duty_cycle = CMSIncrementalDutyCycle;
+}
+
+// If promotion failure handling is on use
+// the padded average size of the promotion for each
+// young generation collection.
+double CMSStats::time_until_cms_gen_full() const {
+  size_t cms_free = _cms_gen->cmsSpace()->free();
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  size_t expected_promotion = gch->get_gen(0)->capacity();
+  if (HandlePromotionFailure) {
+    expected_promotion = MIN2(
+	(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
+	expected_promotion);
+  }
+  if (cms_free > expected_promotion) {
+    // Start a cms collection if there isn't enough space to promote
+    // for the next minor collection.  Use the padded average as
+    // a safety factor.
+    cms_free -= expected_promotion;
+
+    // Adjust by the safety factor.
+    double cms_free_dbl = (double)cms_free;
+    cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
+
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
+	SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
+	cms_free, expected_promotion);
+      gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
+	cms_free_dbl, cms_consumption_rate() + 1.0);
+    }
+    // Add 1 in case the consumption rate goes to zero.
+    return cms_free_dbl / (cms_consumption_rate() + 1.0);
+  }
+  return 0.0;
+}
+
+// Compare the duration of the cms collection to the
+// time remaining before the cms generation is empty.
+// Note that the time from the start of the cms collection
+// to the start of the cms sweep (less than the total
+// duration of the cms collection) can be used.  This
+// has been tried and some applications experienced
+// promotion failures early in execution.  This was
+// possibly because the averages were not accurate
+// enough at the beginning.
+double CMSStats::time_until_cms_start() const {
+  // We add "gc0_period" to the "work" calculation
+  // below because this query is done (mostly) at the
+  // end of a scavenge, so we need to conservatively
+  // account for that much possible delay
+  // in the query so as to avoid concurrent mode failures
+  // due to starting the collection just a wee bit too
+  // late.
+  double work = cms_duration() + gc0_period();
+  double deadline = time_until_cms_gen_full();
+  if (work > deadline) {
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print(
+        " CMSCollector: collect because of anticipated promotion "
+        "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
+        gc0_period(), time_until_cms_gen_full());
+    }
+    return 0.0;
+  }
+  return work - deadline;
+}
+
+// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
+// amount of change to prevent wild oscillation.
+unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
+					      unsigned int new_duty_cycle) {
+  assert(old_duty_cycle <= 100, "bad input value");
+  assert(new_duty_cycle <= 100, "bad input value");
+
+  // Note:  use subtraction with caution since it may underflow (values are
+  // unsigned).  Addition is safe since we're in the range 0-100.
+  unsigned int damped_duty_cycle = new_duty_cycle;
+  if (new_duty_cycle < old_duty_cycle) {
+    const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
+    if (new_duty_cycle + largest_delta < old_duty_cycle) {
+      damped_duty_cycle = old_duty_cycle - largest_delta;
+    }
+  } else if (new_duty_cycle > old_duty_cycle) {
+    const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
+    if (new_duty_cycle > old_duty_cycle + largest_delta) {
+      damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
+    }
+  }
+  assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
+
+  if (CMSTraceIncrementalPacing) {
+    gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
+			   old_duty_cycle, new_duty_cycle, damped_duty_cycle);
+  }
+  return damped_duty_cycle;
+}
+
+unsigned int CMSStats::icms_update_duty_cycle_impl() {
+  assert(CMSIncrementalPacing && valid(),
+	 "should be handled in icms_update_duty_cycle()");
+
+  double cms_time_so_far = cms_timer().seconds();
+  double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
+  double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
+
+  // Avoid division by 0.
+  double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
+  double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
+
+  unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
+  if (new_duty_cycle > _icms_duty_cycle) {
+    // Avoid very small duty cycles (1 or 2); 0 is allowed.
+    if (new_duty_cycle > 2) {
+      _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
+						new_duty_cycle);
+    }
+  } else if (_allow_duty_cycle_reduction) {
+    // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
+    new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
+    // Respect the minimum duty cycle.
+    unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
+    _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
+  }
+
+  if (PrintGCDetails || CMSTraceIncrementalPacing) {
+    gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
+  }
+
+  _allow_duty_cycle_reduction = false;
+  return _icms_duty_cycle;
+}
+
+#ifndef PRODUCT
+void CMSStats::print_on(outputStream *st) const {
+  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
+  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
+	       gc0_duration(), gc0_period(), gc0_promoted());
+  st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
+	    cms_duration(), cms_duration_per_mb(),
+	    cms_period(), cms_allocated());
+  st->print(",cms_since_beg=%g,cms_since_end=%g",
+	    cms_time_since_begin(), cms_time_since_end());
+  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
+	    _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
+  if (CMSIncrementalMode) {
+    st->print(",dc=%d", icms_duty_cycle());
+  }
+
+  if (valid()) {
+    st->print(",promo_rate=%g,cms_alloc_rate=%g",
+	      promotion_rate(), cms_allocation_rate());
+    st->print(",cms_consumption_rate=%g,time_until_full=%g",
+	      cms_consumption_rate(), time_until_cms_gen_full());
+  }
+  st->print(" ");
+}
+#endif // #ifndef PRODUCT
+
+CMSCollector::CollectorState CMSCollector::_collectorState =
+                             CMSCollector::Idling;
+bool CMSCollector::_foregroundGCIsActive = false;
+bool CMSCollector::_foregroundGCShouldWait = false;
+
+CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
+                           ConcurrentMarkSweepGeneration* permGen,
+                           CardTableRS*                   ct,
+			   ConcurrentMarkSweepPolicy*	  cp):
+  _cmsGen(cmsGen),
+  _permGen(permGen),
+  _ct(ct),
+  _ref_processor(NULL),    // will be set later
+  _conc_workers(NULL),     // may be set later
+  _abort_preclean(false),
+  _start_sampling(false),
+  _between_prologue_and_epilogue(false),
+  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
+  _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
+  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
+                 -1 /* lock-free */, "No_lock" /* dummy */),
+  _modUnionClosure(&_modUnionTable),
+  _modUnionClosurePar(&_modUnionTable),
+  _is_alive_closure(&_markBitMap),
+  _restart_addr(NULL),
+  _overflow_list(NULL),
+  _preserved_oop_stack(NULL),
+  _preserved_mark_stack(NULL),
+  _stats(cmsGen),
+  _eden_chunk_array(NULL),     // may be set in ctor body
+  _eden_chunk_capacity(0),     // -- ditto --
+  _eden_chunk_index(0),        // -- ditto --
+  _survivor_plab_array(NULL),  // -- ditto --
+  _survivor_chunk_array(NULL), // -- ditto --
+  _survivor_chunk_capacity(0), // -- ditto --
+  _survivor_chunk_index(0),    // -- ditto --
+  _ser_pmc_preclean_ovflw(0),
+  _ser_pmc_remark_ovflw(0),
+  _par_pmc_remark_ovflw(0),
+  _ser_kac_ovflw(0),
+  _par_kac_ovflw(0),
+  _collection_count_start(0),
+  _verifying(false),
+  _icms_start_limit(NULL),
+  _icms_stop_limit(NULL),
+  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
+  _completed_initialization(false),
+  _collector_policy(cp),
+  _unload_classes(false),
+  _unloaded_classes_last_cycle(false),
+  _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
+{
+  if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
+    ExplicitGCInvokesConcurrent = true;
+  }
+  // Now expand the span and allocate the collection support structures
+  // (MUT, marking bit map etc.) to cover both generations subject to
+  // collection.
+
+  // First check that _permGen is adjacent to _cmsGen and above it.
+  assert(   _cmsGen->reserved().word_size()  > 0
+         && _permGen->reserved().word_size() > 0,
+         "generations should not be of zero size");
+  assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
+         "_cmsGen and _permGen should not overlap");
+  assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
+         "_cmsGen->end() different from _permGen->start()");
+
+  // For use by dirty card to oop closures.
+  _cmsGen->cmsSpace()->set_collector(this);
+  _permGen->cmsSpace()->set_collector(this);
+
+  // Adjust my span to cover old (cms) gen and perm gen
+  _span = _cmsGen->reserved()._union(_permGen->reserved());
+  // Initialize the span of is_alive_closure
+  _is_alive_closure.set_span(_span);
+
+  // Allocate MUT and marking bit map
+  {
+    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
+    if (!_markBitMap.allocate(_span)) {
+      warning("Failed to allocate CMS Bit Map");
+      return;
+    }
+    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
+  }
+  {
+    _modUnionTable.allocate(_span);
+    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
+  }
+
+  if (!_markStack.allocate(CMSMarkStackSize)) {
+    warning("Failed to allocate CMS Marking Stack");
+    return;
+  }
+  if (!_revisitStack.allocate(CMSRevisitStackSize)) {
+    warning("Failed to allocate CMS Revisit Stack");
+    return;
+  }
+
+  // Support for multi-threaded concurrent phases
+  if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
+    if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
+      // just for now
+      FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
+    }
+    if (ParallelCMSThreads > 1) {
+      _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
+                                 ParallelCMSThreads, true);
+      if (_conc_workers == NULL) {
+        warning("GC/CMS: _conc_workers allocation failure: "
+              "forcing -CMSConcurrentMTEnabled");
+        CMSConcurrentMTEnabled = false;
+      }
+    } else {
+      CMSConcurrentMTEnabled = false;
+    }
+  }
+  if (!CMSConcurrentMTEnabled) {
+    ParallelCMSThreads = 0;
+  } else {
+    // Turn off CMSCleanOnEnter optimization temporarily for
+    // the MT case where it's not fixed yet; see 6178663.
+    CMSCleanOnEnter = false;
+  }
+  assert((_conc_workers != NULL) == (ParallelCMSThreads > 1), 
+         "Inconsistency");
+
+  // Parallel task queues; these are shared for the
+  // concurrent and stop-world phases of CMS, but
+  // are not shared with parallel scavenge (ParNew).
+  {
+    uint i;
+    uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
+  
+    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
+         || ParallelRefProcEnabled)
+        && num_queues > 0) {
+      _task_queues = new OopTaskQueueSet(num_queues);
+      if (_task_queues == NULL) {
+        warning("task_queues allocation failure.");
+        return;
+      }
+      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
+      if (_hash_seed == NULL) {
+        warning("_hash_seed array allocation failure");
+        return;
+      }
+
+      // XXX use a global constant instead of 64!
+      typedef struct OopTaskQueuePadded {
+        OopTaskQueue work_queue;
+        char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
+      } OopTaskQueuePadded;
+    
+      for (i = 0; i < num_queues; i++) {
+        OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
+        if (q_padded == NULL) {
+          warning("work_queue allocation failure.");
+          return;
+        }
+        _task_queues->register_queue(i, &q_padded->work_queue);
+      }
+      for (i = 0; i < num_queues; i++) {
+        _task_queues->queue(i)->initialize();
+        _hash_seed[i] = 17;  // copied from ParNew
+      }
+    }
+  }
+
+  // "initiatingOccupancy" is the occupancy ratio at which we trigger
+  // a new collection cycle.  Unless explicitly specified via
+  // CMSTriggerRatio, it is calculated by:
+  //   Let "f" be MinHeapFreeRatio in
+  //
+  //    intiatingOccupancy = 100-f +
+  //                         f * (CMSTriggerRatio/100)
+  // That is, if we assume the heap is at its desired maximum occupancy at the
+  // end of a collection, we let CMSTriggerRatio of the (purported) free
+  // space be allocated before initiating a new collection cycle.
+  if (CMSInitiatingOccupancyFraction > 0) {
+    _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
+  } else {
+    _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
+                           (double)(CMSTriggerRatio *
+                                    MinHeapFreeRatio) / 100.0)
+			   / 100.0;
+  }
+  // Clip CMSBootstrapOccupancy between 0 and 100.
+  _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
+                         /(double)100;
+
+  _full_gcs_since_conc_gc = 0;
+
+  // Now tell CMS generations the identity of their collector
+  ConcurrentMarkSweepGeneration::set_collector(this);
+
+  // Create & start a CMS thread for this CMS collector
+  _cmsThread = ConcurrentMarkSweepThread::start(this);
+  assert(cmsThread() != NULL, "CMS Thread should have been created");
+  assert(cmsThread()->collector() == this,
+         "CMS Thread should refer to this gen");
+  assert(CGC_lock != NULL, "Where's the CGC_lock?");
+
+  // Support for parallelizing young gen rescan
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  _young_gen = gch->prev_gen(_cmsGen);
+  if (gch->supports_inline_contig_alloc()) {
+    _top_addr = gch->top_addr();
+    _end_addr = gch->end_addr();
+    assert(_young_gen != NULL, "no _young_gen");
+    _eden_chunk_index = 0;
+    _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
+    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
+    if (_eden_chunk_array == NULL) {
+      _eden_chunk_capacity = 0;
+      warning("GC/CMS: _eden_chunk_array allocation failure");
+    }
+  }
+  assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
+
+  // Support for parallelizing survivor space rescan
+  if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
+    size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
+    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
+    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
+    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
+    if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
+        || _cursor == NULL) {
+      warning("Failed to allocate survivor plab/chunk array");
+      if (_survivor_plab_array  != NULL) {
+        FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
+        _survivor_plab_array = NULL;
+      }
+      if (_survivor_chunk_array != NULL) {
+        FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
+        _survivor_chunk_array = NULL;
+      }
+      if (_cursor != NULL) {
+        FREE_C_HEAP_ARRAY(size_t, _cursor);
+        _cursor = NULL;
+      }
+    } else {
+      _survivor_chunk_capacity = 2*max_plab_samples;
+      for (uint i = 0; i < ParallelGCThreads; i++) {
+        HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
+        if (vec == NULL) {
+          warning("Failed to allocate survivor plab array");
+          for (int j = i; j > 0; j--) {
+            FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
+          }
+          FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
+          FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
+          _survivor_plab_array = NULL;
+          _survivor_chunk_array = NULL;
+          _survivor_chunk_capacity = 0;
+          break;
+        } else {
+          ChunkArray* cur =
+            ::new (&_survivor_plab_array[i]) ChunkArray(vec,
+                                                        max_plab_samples);
+          assert(cur->end() == 0, "Should be 0");
+          assert(cur->array() == vec, "Should be vec");
+          assert(cur->capacity() == max_plab_samples, "Error");
+        }
+      }
+    }
+  }
+  assert(   (   _survivor_plab_array  != NULL
+             && _survivor_chunk_array != NULL)
+         || (   _survivor_chunk_capacity == 0
+             && _survivor_chunk_index == 0),
+         "Error");
+
+  // Choose what strong roots should be scanned depending on verification options
+  // and perm gen collection mode.
+  if (!CMSClassUnloadingEnabled) {
+    // If class unloading is disabled we want to include all classes into the root set.
+    add_root_scanning_option(SharedHeap::SO_AllClasses);
+  } else {
+    add_root_scanning_option(SharedHeap::SO_SystemClasses);
+  }
+
+  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
+  _gc_counters = new CollectorCounters("CMS", 1);
+  _completed_initialization = true;
+  _sweep_timer.start();  // start of time
+}
+
+const char* ConcurrentMarkSweepGeneration::name() const {
+  return "concurrent mark-sweep generation";
+}
+void ConcurrentMarkSweepGeneration::update_counters() {
+  if (UsePerfData) {
+    _space_counters->update_all();
+    _gen_counters->update_all();
+  }
+}
+
+// this is an optimized version of update_counters(). it takes the
+// used value as a parameter rather than computing it. 
+//
+void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
+  if (UsePerfData) {
+    _space_counters->update_used(used);
+    _space_counters->update_capacity();
+    _gen_counters->update_all();
+  }
+}
+
+void ConcurrentMarkSweepGeneration::print() const {
+  Generation::print();
+  cmsSpace()->print();
+}
+
+#ifndef PRODUCT
+void ConcurrentMarkSweepGeneration::print_statistics() {
+  cmsSpace()->printFLCensus(0);
+}
+#endif
+
+void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  if (PrintGCDetails) {
+    if (Verbose) {
+      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", 
+	level(), short_name(), s, used(), capacity());
+    } else {
+      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", 
+	level(), short_name(), s, used() / K, capacity() / K);
+    }
+  }
+  if (Verbose) {
+    gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
+              gch->used(), gch->capacity());
+  } else {
+    gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
+              gch->used() / K, gch->capacity() / K);
+  }
+}
+
+size_t
+ConcurrentMarkSweepGeneration::contiguous_available() const {
+  // dld proposes an improvement in precision here. If the committed
+  // part of the space ends in a free block we should add that to
+  // uncommitted size in the calculation below. Will make this
+  // change later, staying with the approximation below for the
+  // time being. -- ysr.
+  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
+}
+
+size_t
+ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
+  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
+}
+
+size_t ConcurrentMarkSweepGeneration::max_available() const {
+  return free() + _virtual_space.uncommitted_size();
+}
+
+bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
+    size_t max_promotion_in_bytes,
+    bool younger_handles_promotion_failure) const {
+
+  // This is the most conservative test.  Full promotion is 
+  // guaranteed if this is used. The multiplicative factor is to
+  // account for the worst case "dilatation".
+  double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
+  if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
+    adjusted_max_promo_bytes = (double)max_uintx;
+  }
+  bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
+
+  if (younger_handles_promotion_failure && !result) {
+    // Full promotion is not guaranteed because fragmentation
+    // of the cms generation can prevent the full promotion.
+    result = (max_available() >= (size_t)adjusted_max_promo_bytes);
+
+    if (!result) {
+      // With promotion failure handling the test for the ability
+      // to support the promotion does not have to be guaranteed.
+      // Use an average of the amount promoted.
+      result = max_available() >= (size_t) 
+	gc_stats()->avg_promoted()->padded_average();
+      if (PrintGC && Verbose && result) {
+        gclog_or_tty->print_cr(
+	  "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
+          " max_available: " SIZE_FORMAT
+          " avg_promoted: " SIZE_FORMAT,
+          max_available(), (size_t)
+          gc_stats()->avg_promoted()->padded_average());
+      }
+    } else {
+      if (PrintGC && Verbose) {
+        gclog_or_tty->print_cr(
+          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
+          " max_available: " SIZE_FORMAT
+          " adj_max_promo_bytes: " SIZE_FORMAT,
+          max_available(), (size_t)adjusted_max_promo_bytes);
+      }
+    }
+  } else {
+    if (PrintGC && Verbose) {
+      gclog_or_tty->print_cr(
+        "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
+        " contiguous_available: " SIZE_FORMAT
+        " adj_max_promo_bytes: " SIZE_FORMAT,
+        max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
+    }
+  }
+  return result;
+}
+
+CompactibleSpace*
+ConcurrentMarkSweepGeneration::first_compaction_space() const {
+  return _cmsSpace;
+}
+
+void ConcurrentMarkSweepGeneration::reset_after_compaction() {
+  // Clear the promotion information.  These pointers can be adjusted
+  // along with all the other pointers into the heap but
+  // compaction is expected to be a rare event with 
+  // a heap using cms so don't do it without seeing the need.
+  if (ParallelGCThreads > 0) {
+    for (uint i = 0; i < ParallelGCThreads; i++) {
+      _par_gc_thread_states[i]->promo.reset();
+    }
+  }
+}
+
+void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
+  blk->do_space(_cmsSpace);
+}
+
+void ConcurrentMarkSweepGeneration::compute_new_size() {
+  assert_locked_or_safepoint(Heap_lock);
+
+  // If incremental collection failed, we just want to expand
+  // to the limit.
+  if (incremental_collection_failed()) {
+    clear_incremental_collection_failed();
+    grow_to_reserved();
+    return;
+  }
+
+  size_t expand_bytes = 0;
+  double free_percentage = ((double) free()) / capacity();
+  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
+  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
+
+  // compute expansion delta needed for reaching desired free percentage
+  if (free_percentage < desired_free_percentage) {
+    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
+    assert(desired_capacity >= capacity(), "invalid expansion size");
+    expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
+  }
+  if (expand_bytes > 0) {
+    if (PrintGCDetails && Verbose) {
+      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
+      gclog_or_tty->print_cr("\nFrom compute_new_size: ");
+      gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
+      gclog_or_tty->print_cr("  Desired free fraction %f", 
+        desired_free_percentage);
+      gclog_or_tty->print_cr("  Maximum free fraction %f", 
+        maximum_free_percentage);
+      gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
+      gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT, 
+        desired_capacity/1000);
+      int prev_level = level() - 1;
+      if (prev_level >= 0) {
+        size_t prev_size = 0;
+        GenCollectedHeap* gch = GenCollectedHeap::heap();
+        Generation* prev_gen = gch->_gens[prev_level];
+        prev_size = prev_gen->capacity();
+          gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
+                                 prev_size/1000);
+      }
+      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
+	unsafe_max_alloc_nogc()/1000);
+      gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT, 
+	contiguous_available()/1000);
+      gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
+        expand_bytes);
+    }
+    // safe if expansion fails
+    expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); 
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print_cr("  Expanded free fraction %f", 
+	((double) free()) / capacity());
+    }
+  }
+}
+
+Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
+  return cmsSpace()->freelistLock();
+}
+
+HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
+                                                  bool   tlab) {
+  CMSSynchronousYieldRequest yr;
+  MutexLockerEx x(freelistLock(),
+                  Mutex::_no_safepoint_check_flag);
+  return have_lock_and_allocate(size, tlab);
+}
+
+HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
+                                                  bool   tlab) {
+  assert_lock_strong(freelistLock());
+  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
+  HeapWord* res = cmsSpace()->allocate(adjustedSize);
+  // Allocate the object live (grey) if the background collector has
+  // started marking. This is necessary because the marker may
+  // have passed this address and consequently this object will
+  // not otherwise be greyed and would be incorrectly swept up.
+  // Note that if this object contains references, the writing
+  // of those references will dirty the card containing this object
+  // allowing the object to be blackened (and its references scanned)
+  // either during a preclean phase or at the final checkpoint.
+  if (res != NULL) {
+    collector()->direct_allocated(res, adjustedSize);
+    _direct_allocated_words += adjustedSize;
+    // allocation counters
+    NOT_PRODUCT(
+      _numObjectsAllocated++;
+      _numWordsAllocated += (int)adjustedSize;
+    )
+  }
+  return res;
+}
+
+// In the case of direct allocation by mutators in a generation that
+// is being concurrently collected, the object must be allocated
+// live (grey) if the background collector has started marking.
+// This is necessary because the marker may
+// have passed this address and consequently this object will
+// not otherwise be greyed and would be incorrectly swept up.
+// Note that if this object contains references, the writing
+// of those references will dirty the card containing this object
+// allowing the object to be blackened (and its references scanned)
+// either during a preclean phase or at the final checkpoint.
+void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
+  assert(_markBitMap.covers(start, size), "Out of bounds");
+  if (_collectorState >= Marking) {
+    MutexLockerEx y(_markBitMap.lock(),
+                    Mutex::_no_safepoint_check_flag);
+    // [see comments preceding SweepClosure::do_blk() below for details]
+    // 1. need to mark the object as live so it isn't collected
+    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
+    // 3. need to mark the end of the object so sweeper can skip over it
+    //    if it's uninitialized when the sweeper reaches it.
+    _markBitMap.mark(start);          // object is live
+    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
+    _markBitMap.mark(start + size - 1);
+                                      // mark end of object
+  }
+  // check that oop looks uninitialized
+  assert(oop(start)->klass() == NULL, "_klass should be NULL");
+}
+
+void CMSCollector::promoted(bool par, HeapWord* start,
+                            bool is_obj_array, size_t obj_size) {
+  assert(_markBitMap.covers(start), "Out of bounds");
+  // See comment in direct_allocated() about when objects should
+  // be allocated live.
+  if (_collectorState >= Marking) {
+    // we already hold the marking bit map lock, taken in
+    // the prologue
+    if (par) {
+      _markBitMap.par_mark(start);
+    } else {
+      _markBitMap.mark(start);
+    }
+    // We don't need to mark the object as uninitialized (as
+    // in direct_allocated above) because this is being done with the
+    // world stopped and the object will be initialized by the
+    // time the sweeper gets to look at it.
+    assert(SafepointSynchronize::is_at_safepoint(),
+           "expect promotion only at safepoints");
+
+    if (_collectorState < Sweeping) {
+      // Mark the appropriate cards in the modUnionTable, so that
+      // this object gets scanned before the sweep. If this is
+      // not done, CMS generation references in the object might
+      // not get marked.
+      // For the case of arrays, which are otherwise precisely
+      // marked, we need to dirty the entire array, not just its head.
+      if (is_obj_array) {
+        // The [par_]mark_range() method expects mr.end() below to
+        // be aligned to the granularity of a bit's representation
+        // in the heap. In the case of the MUT below, that's a
+        // card size.
+        MemRegion mr(start,
+                     (HeapWord*)round_to((intptr_t)(start + obj_size),
+                        CardTableModRefBS::card_size /* bytes */));
+        if (par) {
+          _modUnionTable.par_mark_range(mr);
+        } else {
+	  _modUnionTable.mark_range(mr);
+        }
+      } else {  // not an obj array; we can just mark the head
+        if (par) {
+	  _modUnionTable.par_mark(start);
+        } else {
+	  _modUnionTable.mark(start);
+        }
+      }
+    }
+  }
+}
+
+static inline size_t percent_of_space(Space* space, HeapWord* addr)
+{
+  size_t delta = pointer_delta(addr, space->bottom());
+  return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
+}
+
+void CMSCollector::icms_update_allocation_limits()
+{
+  Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
+  EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
+
+  const unsigned int duty_cycle = stats().icms_update_duty_cycle();
+  if (CMSTraceIncrementalPacing) {
+    stats().print();
+  }
+
+  assert(duty_cycle <= 100, "invalid duty cycle");
+  if (duty_cycle != 0) {
+    // The duty_cycle is a percentage between 0 and 100; convert to words and
+    // then compute the offset from the endpoints of the space.
+    size_t free_words = eden->free() / HeapWordSize;
+    double free_words_dbl = (double)free_words;
+    size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
+    size_t offset_words = (free_words - duty_cycle_words) / 2;
+
+    _icms_start_limit = eden->top() + offset_words;
+    _icms_stop_limit = eden->end() - offset_words;
+
+    // The limits may be adjusted (shifted to the right) by
+    // CMSIncrementalOffset, to allow the application more mutator time after a
+    // young gen gc (when all mutators were stopped) and before CMS starts and
+    // takes away one or more cpus.
+    if (CMSIncrementalOffset != 0) {
+      double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
+      size_t adjustment = (size_t)adjustment_dbl;
+      HeapWord* tmp_stop = _icms_stop_limit + adjustment;
+      if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
+	_icms_start_limit += adjustment;
+	_icms_stop_limit = tmp_stop;
+      }
+    }
+  }
+  if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
+    _icms_start_limit = _icms_stop_limit = eden->end();
+  }
+
+  // Install the new start limit.
+  eden->set_soft_end(_icms_start_limit);
+
+  if (CMSTraceIncrementalMode) {
+    gclog_or_tty->print(" icms alloc limits:  "
+			   PTR_FORMAT "," PTR_FORMAT
+			   " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
+			   _icms_start_limit, _icms_stop_limit,
+			   percent_of_space(eden, _icms_start_limit),
+			   percent_of_space(eden, _icms_stop_limit));
+    if (Verbose) {
+      gclog_or_tty->print("eden:  ");
+      eden->print_on(gclog_or_tty);
+    }
+  }
+}
+
+// Any changes here should try to maintain the invariant
+// that if this method is called with _icms_start_limit
+// and _icms_stop_limit both NULL, then it should return NULL
+// and not notify the icms thread.
+HeapWord* 
+CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
+				       size_t word_size)
+{
+  // A start_limit equal to end() means the duty cycle is 0, so treat that as a
+  // nop.
+  if (CMSIncrementalMode && _icms_start_limit != space->end()) {
+    if (top <= _icms_start_limit) {
+      if (CMSTraceIncrementalMode) {
+	space->print_on(gclog_or_tty);
+	gclog_or_tty->stamp();
+	gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
+			       ", new limit=" PTR_FORMAT
+			       " (" SIZE_FORMAT "%%)",
+			       top, _icms_stop_limit,
+			       percent_of_space(space, _icms_stop_limit));
+      }
+      ConcurrentMarkSweepThread::start_icms();
+      assert(top < _icms_stop_limit, "Tautology"); 
+      if (word_size < pointer_delta(_icms_stop_limit, top)) { 
+	return _icms_stop_limit;
+      }
+
+      // The allocation will cross both the _start and _stop limits, so do the
+      // stop notification also and return end().
+      if (CMSTraceIncrementalMode) {
+	space->print_on(gclog_or_tty);
+	gclog_or_tty->stamp();
+	gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
+			       ", new limit=" PTR_FORMAT
+			       " (" SIZE_FORMAT "%%)",
+			       top, space->end(),
+			       percent_of_space(space, space->end()));
+      }
+      ConcurrentMarkSweepThread::stop_icms();
+      return space->end();
+    }
+
+    if (top <= _icms_stop_limit) {
+      if (CMSTraceIncrementalMode) {
+	space->print_on(gclog_or_tty);
+	gclog_or_tty->stamp();
+	gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
+			       ", new limit=" PTR_FORMAT
+			       " (" SIZE_FORMAT "%%)",
+			       top, space->end(),
+			       percent_of_space(space, space->end()));
+      }
+      ConcurrentMarkSweepThread::stop_icms();
+      return space->end();
+    }
+
+    if (CMSTraceIncrementalMode) {
+      space->print_on(gclog_or_tty);
+      gclog_or_tty->stamp();
+      gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
+			     ", new limit=" PTR_FORMAT,
+			     top, NULL);
+    }
+  }
+
+  return NULL;
+}
+
+oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
+  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
+  // allocate, copy and if necessary update promoinfo --
+  // delegate to underlying space.
+  assert_lock_strong(freelistLock());
+
+#ifndef	PRODUCT
+  if (Universe::heap()->promotion_should_fail()) {
+    return NULL;
+  }
+#endif	// #ifndef PRODUCT
+
+  oop res = _cmsSpace->promote(obj, obj_size, ref);
+  if (res == NULL) {
+    // expand and retry
+    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
+    expand(s*HeapWordSize, MinHeapDeltaBytes, 
+      CMSExpansionCause::_satisfy_promotion);
+    // Since there's currently no next generation, we don't try to promote
+    // into a more senior generation.
+    assert(next_gen() == NULL, "assumption, based upon which no attempt "
+                               "is made to pass on a possibly failing "
+                               "promotion to next generation");
+    res = _cmsSpace->promote(obj, obj_size, ref);
+  }
+  if (res != NULL) {
+    // See comment in allocate() about when objects should
+    // be allocated live.
+    assert(obj->is_oop(), "Will dereference klass pointer below");
+    collector()->promoted(false,           // Not parallel
+                          (HeapWord*)res, obj->is_objArray(), obj_size);
+    // promotion counters
+    NOT_PRODUCT(
+      _numObjectsPromoted++;
+      _numWordsPromoted +=
+        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
+    )
+  }
+  return res;
+}
+
+
+HeapWord*
+ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
+					     HeapWord* top,
+					     size_t word_sz)
+{
+  return collector()->allocation_limit_reached(space, top, word_sz);
+}
+
+// Things to support parallel young-gen collection.
+oop
+ConcurrentMarkSweepGeneration::par_promote(int thread_num,
+					   oop old, markOop m,
+					   size_t word_sz) {
+#ifndef	PRODUCT
+  if (Universe::heap()->promotion_should_fail()) {
+    return NULL;
+  }
+#endif	// #ifndef PRODUCT
+
+  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
+  PromotionInfo* promoInfo = &ps->promo;
+  // if we are tracking promotions, then first ensure space for
+  // promotion (including spooling space for saving header if necessary).
+  // then allocate and copy, then track promoted info if needed.
+  // When tracking (see PromotionInfo::track()), the mark word may
+  // be displaced and in this case restoration of the mark word
+  // occurs in the (oop_since_save_marks_)iterate phase.
+  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
+    // Out of space for allocating spooling buffers;
+    // try expanding and allocating spooling buffers.
+    if (!expand_and_ensure_spooling_space(promoInfo)) {
+      return NULL;
+    }
+  }
+  assert(promoInfo->has_spooling_space(), "Control point invariant");
+  HeapWord* obj_ptr = ps->lab.alloc(word_sz);
+  if (obj_ptr == NULL) {
+     obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
+     if (obj_ptr == NULL) {
+       return NULL;
+     }
+  }
+  oop obj = oop(obj_ptr);
+  assert(obj->klass() == NULL, "Object should be uninitialized here.");
+  // Otherwise, copy the object.  Here we must be careful to insert the
+  // klass pointer last, since this marks the block as an allocated object.
+  HeapWord* old_ptr = (HeapWord*)old;
+  if (word_sz > (size_t)oopDesc::header_size()) {
+    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
+				 obj_ptr + oopDesc::header_size(),
+				 word_sz - oopDesc::header_size());
+  }
+  // Restore the mark word copied above.
+  obj->set_mark(m);
+  // Now we can track the promoted object, if necessary.  We take care 
+  // To delay the transition from uninitialized to full object
+  // (i.e., insertion of klass pointer) until after, so that it
+  // atomically becomes a promoted object.
+  if (promoInfo->tracking()) {
+    promoInfo->track((PromotedObject*)obj, old->klass());
+  }
+  // Finally, install the klass pointer.
+  obj->set_klass(old->klass());
+
+  assert(old->is_oop(), "Will dereference klass ptr below");
+  collector()->promoted(true,          // parallel
+                        obj_ptr, old->is_objArray(), word_sz);
+  
+  NOT_PRODUCT(
+    Atomic::inc(&_numObjectsPromoted);
+    Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
+                &_numWordsPromoted);
+  )
+
+  return obj; 
+}
+
+void
+ConcurrentMarkSweepGeneration::
+par_promote_alloc_undo(int thread_num,
+		       HeapWord* obj, size_t word_sz) {
+  // CMS does not support promotion undo.
+  ShouldNotReachHere();
+}
+
+void
+ConcurrentMarkSweepGeneration::
+par_promote_alloc_done(int thread_num) {
+  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
+  ps->lab.retire();
+#if CFLS_LAB_REFILL_STATS
+  if (thread_num == 0) {
+    _cmsSpace->print_par_alloc_stats();
+  }
+#endif
+}
+
+void
+ConcurrentMarkSweepGeneration::
+par_oop_since_save_marks_iterate_done(int thread_num) {
+  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
+  ParScanWithoutBarrierClosure* dummy_cl = NULL;
+  ps->promo.promoted_oops_iterate_nv(dummy_cl);
+}
+
+// XXXPERM
+bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
+                                                   size_t size,
+                                                   bool   tlab)
+{
+  // We allow a STW collection only if a full
+  // collection was requested.
+  return full || should_allocate(size, tlab); // FIX ME !!!
+  // This and promotion failure handling are connected at the
+  // hip and should be fixed by untying them.
+}
+
+bool CMSCollector::shouldConcurrentCollect() {
+  if (_full_gc_requested) {
+    assert(ExplicitGCInvokesConcurrent, "Unexpected state");
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
+                             " gc request");
+    }
+    return true;
+  }
+
+  // For debugging purposes, change the type of collection.
+  // If the rotation is not on the concurrent collection
+  // type, don't start a concurrent collection.
+  NOT_PRODUCT(
+    if (RotateCMSCollectionTypes && 
+	(_cmsGen->debug_collection_type() != 
+	  ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
+      assert(_cmsGen->debug_collection_type() != 
+	ConcurrentMarkSweepGeneration::Unknown_collection_type,
+	"Bad cms collection type");
+      return false;
+    }
+  )
+
+  FreelistLocker x(this);
+  // ------------------------------------------------------------------
+  // Print out lots of information which affects the initiation of
+  // a collection.
+  if (PrintCMSInitiationStatistics && stats().valid()) {
+    gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
+    gclog_or_tty->stamp();
+    gclog_or_tty->print_cr("");
+    stats().print_on(gclog_or_tty);
+    gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
+      stats().time_until_cms_gen_full());
+    gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
+    gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
+                           _cmsGen->contiguous_available());
+    gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
+    gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
+    gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
+    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
+  }
+  // ------------------------------------------------------------------
+
+  // If the estimated time to complete a cms collection (cms_duration())
+  // is less than the estimated time remaining until the cms generation
+  // is full, start a collection.
+  if (!UseCMSInitiatingOccupancyOnly) {
+    if (stats().valid()) {
+      if (stats().time_until_cms_start() == 0.0) {
+        return true;
+      }
+    } else {
+      // We want to conservatively collect somewhat early in order
+      // to try and "bootstrap" our CMS/promotion statistics;
+      // this branch will not fire after the first successful CMS
+      // collection because the stats should then be valid.
+      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
+        if (Verbose && PrintGCDetails) {
+          gclog_or_tty->print_cr(
+            " CMSCollector: collect for bootstrapping statistics:"
+            " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
+            _bootstrap_occupancy);
+        }
+        return true;
+      }
+    }
+  }
+
+  // Otherwise, we start a collection cycle if either the perm gen or
+  // old gen want a collection cycle started. Each may use
+  // an appropriate criterion for making this decision.
+  // XXX We need to make sure that the gen expansion
+  // criterion dovetails well with this.
+  if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print_cr("CMS old gen initiated");
+    }
+    return true;
+  }
+
+  if (cms_should_unload_classes() &&
+      _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
+    if (Verbose && PrintGCDetails) {
+     gclog_or_tty->print_cr("CMS perm gen initiated");
+    }
+    return true;
+  }
+
+  return false;
+}
+
+// Clear _expansion_cause fields of constituent generations
+void CMSCollector::clear_expansion_cause() {
+  _cmsGen->clear_expansion_cause();
+  _permGen->clear_expansion_cause();
+}
+
+bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
+  double initiatingOccupancy) {
+  // We should be conservative in starting a collection cycle.  To
+  // start too eagerly runs the risk of collecting too often in the
+  // extreme.  To collect too rarely falls back on full collections,
+  // which works, even if not optimum in terms of concurrent work.
+  // As a work around for too eagerly collecting, use the flag
+  // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
+  // giving the user an easily understandable way of controlling the
+  // collections.
+  // We want to start a new collection cycle if any of the following
+  // conditions hold:
+  // . our current occupancy exceeds the initiating occupancy, or
+  // . we recently needed to expand and have not since that expansion,
+  //   collected, or
+  // . we are not using adaptive free lists and linear allocation is
+  //   going to fail, or
+  // . (for old gen) incremental collection has already failed or
+  //   may soon fail in the near future as we may not be able to absorb
+  //   promotions.
+  assert_lock_strong(freelistLock());
+
+  if (occupancy() > initiatingOccupancy) {
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
+	short_name(), occupancy(), initiatingOccupancy);
+    }
+    return true;
+  }
+  if (UseCMSInitiatingOccupancyOnly) {
+    return false;
+  }
+  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print(" %s: collect because expanded for allocation ",
+	short_name());
+    }
+    return true;
+  }
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->collector_policy()->is_two_generation_policy(),
+         "You may want to check the correctness of the following");
+  if (gch->incremental_collection_will_fail()) {
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print(" %s: collect because incremental collection will fail ",
+	short_name());
+    }
+    return true;
+  }
+  if (!_cmsSpace->adaptive_freelists() && 
+      _cmsSpace->linearAllocationWouldFail()) {
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print(" %s: collect because of linAB ",
+	short_name());
+    }
+    return true;
+  }
+  return false;
+}
+
+void ConcurrentMarkSweepGeneration::collect(bool   full,
+                                            bool   clear_all_soft_refs,
+                                            size_t size,
+                                            bool   tlab)
+{
+  collector()->collect(full, clear_all_soft_refs, size, tlab);
+}
+
+void CMSCollector::collect(bool   full,
+                           bool   clear_all_soft_refs,
+                           size_t size,
+                           bool   tlab)
+{
+  if (!UseCMSCollectionPassing && _collectorState > Idling) {
+    // For debugging purposes skip the collection if the state
+    // is not currently idle
+    if (TraceCMSState) {
+      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", 
+	Thread::current(), full, _collectorState);
+    }
+    return;
+  }
+
+  // The following "if" branch is present for defensive reasons.
+  // In the current uses of this interface, it can be replaced with:
+  // assert(!GC_locker.is_active(), "Can't be called otherwise");
+  // But I am not placing that assert here to allow future
+  // generality in invoking this interface.
+  if (GC_locker::is_active()) {
+    // A consistency test for GC_locker
+    assert(GC_locker::needs_gc(), "Should have been set already");
+    // Skip this foreground collection, instead
+    // expanding the heap if necessary.
+    // Need the free list locks for the call to free() in compute_new_size()
+    compute_new_size();
+    return;
+  }
+  acquire_control_and_collect(full, clear_all_soft_refs);
+  _full_gcs_since_conc_gc++;
+
+}
+
+void CMSCollector::request_full_gc(unsigned int full_gc_count) {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  unsigned int gc_count = gch->total_full_collections();
+  if (gc_count == full_gc_count) {
+    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
+    _full_gc_requested = true;
+    CGC_lock->notify();   // nudge CMS thread
+  }
+}
+  
+
+// The foreground and background collectors need to coordinate in order
+// to make sure that they do not mutually interfere with CMS collections.
+// When a background collection is active,
+// the foreground collector may need to take over (preempt) and
+// synchronously complete an ongoing collection. Depending on the 
+// frequency of the background collections and the heap usage
+// of the application, this preemption can be seldom or frequent.
+// There are only certain
+// points in the background collection that the "collection-baton"
+// can be passed to the foreground collector.
+//
+// The foreground collector will wait for the baton before
+// starting any part of the collection.  The foreground collector
+// will only wait at one location.
+//
+// The background collector will yield the baton before starting a new
+// phase of the collection (e.g., before initial marking, marking from roots,
+// precleaning, final re-mark, sweep etc.)  This is normally done at the head
+// of the loop which switches the phases. The background collector does some
+// of the phases (initial mark, final re-mark) with the world stopped.
+// Because of locking involved in stopping the world,
+// the foreground collector should not block waiting for the background
+// collector when it is doing a stop-the-world phase.  The background
+// collector will yield the baton at an additional point just before
+// it enters a stop-the-world phase.  Once the world is stopped, the
+// background collector checks the phase of the collection.  If the
+// phase has not changed, it proceeds with the collection.  If the
+// phase has changed, it skips that phase of the collection.  See
+// the comments on the use of the Heap_lock in collect_in_background().
+//
+// Variable used in baton passing.
+//   _foregroundGCIsActive - Set to true by the foreground collector when
+//	it wants the baton.  The foreground clears it when it has finished
+//	the collection.
+//   _foregroundGCShouldWait - Set to true by the background collector
+//        when it is running.  The foreground collector waits while
+//	_foregroundGCShouldWait is true.
+//  CGC_lock - monitor used to protect access to the above variables
+//	and to notify the foreground and background collectors.
+//  _collectorState - current state of the CMS collection.
+// 
+// The foreground collector 
+//   acquires the CGC_lock
+//   sets _foregroundGCIsActive
+//   waits on the CGC_lock for _foregroundGCShouldWait to be false
+//     various locks acquired in preparation for the collection
+//     are released so as not to block the background collector
+//     that is in the midst of a collection
+//   proceeds with the collection
+//   clears _foregroundGCIsActive
+//   returns
+//
+// The background collector in a loop iterating on the phases of the
+//	collection
+//   acquires the CGC_lock
+//   sets _foregroundGCShouldWait
+//   if _foregroundGCIsActive is set
+//     clears _foregroundGCShouldWait, notifies _CGC_lock
+//     waits on _CGC_lock for _foregroundGCIsActive to become false
+//     and exits the loop.
+//   otherwise
+//     proceed with that phase of the collection
+//     if the phase is a stop-the-world phase,
+//	 yield the baton once more just before enqueueing
+//	 the stop-world CMS operation (executed by the VM thread).
+//   returns after all phases of the collection are done
+//   
+
+void CMSCollector::acquire_control_and_collect(bool full,
+	bool clear_all_soft_refs) {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
+  assert(!Thread::current()->is_ConcurrentGC_thread(),
+         "shouldn't try to acquire control from self!");
+
+  // Start the protocol for acquiring control of the
+  // collection from the background collector (aka CMS thread).
+  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
+         "VM thread should have CMS token");
+  // Remember the possibly interrupted state of an ongoing
+  // concurrent collection
+  CollectorState first_state = _collectorState;
+
+  // Signal to a possibly ongoing concurrent collection that
+  // we want to do a foreground collection.
+  _foregroundGCIsActive = true;
+
+  // Disable incremental mode during a foreground collection.
+  ICMSDisabler icms_disabler;
+
+  // release locks and wait for a notify from the background collector
+  // releasing the locks in only necessary for phases which
+  // do yields to improve the granularity of the collection.
+  assert_lock_strong(bitMapLock());
+  // We need to lock the Free list lock for the space that we are
+  // currently collecting.
+  assert(haveFreelistLocks(), "Must be holding free list locks");
+  bitMapLock()->unlock();
+  releaseFreelistLocks();
+  {
+    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+    if (_foregroundGCShouldWait) {
+      // We are going to be waiting for action for the CMS thread;
+      // it had better not be gone (for instance at shutdown)!
+      assert(ConcurrentMarkSweepThread::cmst() != NULL,
+             "CMS thread must be running");
+      // Wait here until the background collector gives us the go-ahead
+      ConcurrentMarkSweepThread::clear_CMS_flag(
+        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
+      // Get a possibly blocked CMS thread going:
+      //   Note that we set _foregroundGCIsActive true above,
+      //   without protection of the CGC_lock.
+      CGC_lock->notify();
+      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
+             "Possible deadlock");
+      while (_foregroundGCShouldWait) {
+        // wait for notification
+        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
+        // Possibility of delay/starvation here, since CMS token does
+        // not know to give priority to VM thread? Actually, i think
+        // there wouldn't be any delay/starvation, but the proof of
+        // that "fact" (?) appears non-trivial. XXX 20011219YSR
+      }
+      ConcurrentMarkSweepThread::set_CMS_flag(
+        ConcurrentMarkSweepThread::CMS_vm_has_token);
+    }
+  }
+  // The CMS_token is already held.  Get back the other locks.
+  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
+         "VM thread should have CMS token");
+  getFreelistLocks();
+  bitMapLock()->lock_without_safepoint_check();
+  if (TraceCMSState) {
+    gclog_or_tty->print_cr("CMS foreground collector has asked for control "
+      INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
+    gclog_or_tty->print_cr("	gets control with state %d", _collectorState);
+  }
+
+  // Check if we need to do a compaction, or if not, whether
+  // we need to start the mark-sweep from scratch.
+  bool should_compact    = false;
+  bool should_start_over = false;
+  decide_foreground_collection_type(clear_all_soft_refs,
+    &should_compact, &should_start_over);
+
+NOT_PRODUCT(
+  if (RotateCMSCollectionTypes) {
+    if (_cmsGen->debug_collection_type() == 
+	ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
+      should_compact = true;
+    } else if (_cmsGen->debug_collection_type() == 
+	       ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
+      should_compact = false;
+    }
+  }
+)
+
+  if (PrintGCDetails && first_state > Idling) {
+    GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
+    if (GCCause::is_user_requested_gc(cause) ||
+	GCCause::is_serviceability_requested_gc(cause)) {
+      gclog_or_tty->print(" (concurrent mode interrupted)");
+    } else {
+      gclog_or_tty->print(" (concurrent mode failure)");
+    }
+  }
+
+  if (should_compact) {
+    // If the collection is being acquired from the background
+    // collector, there may be references on the discovered
+    // references lists that have NULL referents (being those
+    // that were concurrently cleared by a mutator) or
+    // that are no longer active (having been enqueued concurrently
+    // by the mutator).
+    // Scrub the list of those references because Mark-Sweep-Compact
+    // code assumes referents are not NULL and that all discovered
+    // Reference objects are active.
+    ref_processor()->clean_up_discovered_references();
+
+    do_compaction_work(clear_all_soft_refs);
+
+    // Has the GC time limit been exceeded?
+    check_gc_time_limit();
+
+  } else {
+    do_mark_sweep_work(clear_all_soft_refs, first_state,
+      should_start_over);
+  }
+  // Reset the expansion cause, now that we just completed
+  // a collection cycle.
+  clear_expansion_cause();
+  _foregroundGCIsActive = false;
+  return;
+}
+
+void CMSCollector::check_gc_time_limit() {
+
+  // Ignore explicit GC's.  Exiting here does not set the flag and
+  // does not reset the count.  Updating of the averages for system
+  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
+  GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
+  if (GCCause::is_user_requested_gc(gc_cause) ||
+      GCCause::is_serviceability_requested_gc(gc_cause)) {
+    return;
+  }
+
+  // Calculate the fraction of the CMS generation was freed during
+  // the last collection. 
+  // Only consider the STW compacting cost for now.
+  //
+  // Note that the gc time limit test only works for the collections
+  // of the young gen + tenured gen and not for collections of the
+  // permanent gen.  That is because the calculation of the space
+  // freed by the collection is the free space in the young gen +
+  // tenured gen.
+
+  double fraction_free = 
+    ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
+  if ((100.0 * size_policy()->compacting_gc_cost()) > 
+	 ((double) GCTimeLimit) &&
+	((fraction_free * 100) < GCHeapFreeLimit)) {
+    size_policy()->inc_gc_time_limit_count();
+    if (UseGCOverheadLimit && 
+	(size_policy()->gc_time_limit_count() > 
+	 AdaptiveSizePolicyGCTimeLimitThreshold)) {
+      size_policy()->set_gc_time_limit_exceeded(true);
+      // Avoid consecutive OOM due to the gc time limit by resetting
+      // the counter.
+      size_policy()->reset_gc_time_limit_count();
+      if (PrintGCDetails) {
+        gclog_or_tty->print_cr("      GC is exceeding overhead limit "
+          "of %d%%", GCTimeLimit);
+      }
+    } else {
+      if (PrintGCDetails) {
+        gclog_or_tty->print_cr("      GC would exceed overhead limit "
+          "of %d%%", GCTimeLimit);
+      }
+    }
+  } else {
+    size_policy()->reset_gc_time_limit_count();
+  }
+}
+
+// Resize the perm generation and the tenured generation
+// after obtaining the free list locks for the
+// two generations.
+void CMSCollector::compute_new_size() {
+  assert_locked_or_safepoint(Heap_lock);
+  FreelistLocker z(this);
+  _permGen->compute_new_size();
+  _cmsGen->compute_new_size();
+}
+
+// A work method used by foreground collection to determine
+// what type of collection (compacting or not, continuing or fresh)
+// it should do.
+// NOTE: the intent is to make UseCMSCompactAtFullCollection
+// and CMSCompactWhenClearAllSoftRefs the default in the future
+// and do away with the flags after a suitable period.
+void CMSCollector::decide_foreground_collection_type(
+  bool clear_all_soft_refs, bool* should_compact,
+  bool* should_start_over) {
+  // Normally, we'll compact only if the UseCMSCompactAtFullCollection
+  // flag is set, and we have either requested a System.gc() or
+  // the number of full gc's since the last concurrent cycle
+  // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
+  // or if an incremental collection has failed
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->collector_policy()->is_two_generation_policy(),
+         "You may want to check the correctness of the following");
+  // Inform cms gen if this was due to partial collection failing.
+  // The CMS gen may use this fact to determine its expansion policy.
+  if (gch->incremental_collection_will_fail()) {
+    assert(!_cmsGen->incremental_collection_failed(),
+           "Should have been noticed, reacted to and cleared");
+    _cmsGen->set_incremental_collection_failed();
+  }
+  *should_compact =
+    UseCMSCompactAtFullCollection &&
+    ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
+     GCCause::is_user_requested_gc(gch->gc_cause()) ||
+     gch->incremental_collection_will_fail());
+  *should_start_over = false;
+  if (clear_all_soft_refs && !*should_compact) {
+    // We are about to do a last ditch collection attempt
+    // so it would normally make sense to do a compaction
+    // to reclaim as much space as possible.
+    if (CMSCompactWhenClearAllSoftRefs) {
+      // Default: The rationale is that in this case either
+      // we are past the final marking phase, in which case
+      // we'd have to start over, or so little has been done
+      // that there's little point in saving that work. Compaction
+      // appears to be the sensible choice in either case.
+      *should_compact = true;
+    } else {
+      // We have been asked to clear all soft refs, but not to
+      // compact. Make sure that we aren't past the final checkpoint
+      // phase, for that is where we process soft refs. If we are already
+      // past that phase, we'll need to redo the refs discovery phase and
+      // if necessary clear soft refs that weren't previously
+      // cleared. We do so by remembering the phase in which
+      // we came in, and if we are past the refs processing
+      // phase, we'll choose to just redo the mark-sweep
+      // collection from scratch.
+      if (_collectorState > FinalMarking) {
+        // We are past the refs processing phase;
+        // start over and do a fresh synchronous CMS cycle
+        _collectorState = Resetting; // skip to reset to start new cycle
+        reset(false /* == !asynch */);
+        *should_start_over = true;
+      } // else we can continue a possibly ongoing current cycle
+    }
+  }
+}
+
+// A work method used by the foreground collector to do
+// a mark-sweep-compact.
+void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
+  if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
+    gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
+      "collections passed to foreground collector", _full_gcs_since_conc_gc);
+  }
+
+  // Sample collection interval time and reset for collection pause.
+  if (UseAdaptiveSizePolicy) {
+    size_policy()->msc_collection_begin();
+  }
+
+  // Temporarily widen the span of the weak reference processing to
+  // the entire heap.
+  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
+  ReferenceProcessorSpanMutator x(ref_processor(), new_span);
+
+  // Temporarily, clear the "is_alive_non_header" field of the
+  // reference processor.
+  ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
+
+  // Temporarily make reference _processing_ single threaded (non-MT).
+  ReferenceProcessorMTProcMutator z(ref_processor(), false);
+
+  // Temporarily make refs discovery atomic
+  ReferenceProcessorAtomicMutator w(ref_processor(), true);
+
+  ref_processor()->set_enqueuing_is_done(false);
+  ref_processor()->enable_discovery();
+  // If an asynchronous collection finishes, the _modUnionTable is
+  // all clear.  If we are assuming the collection from an asynchronous
+  // collection, clear the _modUnionTable.
+  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
+    "_modUnionTable should be clear if the baton was not passed");
+  _modUnionTable.clear_all();
+
+  // We must adjust the allocation statistics being maintained
+  // in the free list space. We do so by reading and clearing
+  // the sweep timer and updating the block flux rate estimates below.
+  assert(_sweep_timer.is_active(), "We should never see the timer inactive");
+  _sweep_timer.stop();
+  // Note that we do not use this sample to update the _sweep_estimate.
+  _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
+                                          _sweep_estimate.padded_average());
+  
+  GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
+    ref_processor(), clear_all_soft_refs);
+  #ifdef ASSERT
+    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
+    size_t free_size = cms_space->free();
+    assert(free_size ==
+           pointer_delta(cms_space->end(), cms_space->compaction_top())
+           * HeapWordSize,
+      "All the free space should be compacted into one chunk at top");
+    assert(cms_space->dictionary()->totalChunkSize(
+                                      debug_only(cms_space->freelistLock())) == 0 ||
+           cms_space->totalSizeInIndexedFreeLists() == 0,
+      "All the free space should be in a single chunk");
+    size_t num = cms_space->totalCount();
+    assert((free_size == 0 && num == 0) ||
+           (free_size > 0  && (num == 1 || num == 2)),
+         "There should be at most 2 free chunks after compaction");
+  #endif // ASSERT
+  _collectorState = Resetting;
+  assert(_restart_addr == NULL,
+         "Should have been NULL'd before baton was passed");
+  reset(false /* == !asynch */);
+  _cmsGen->reset_after_compaction();
+
+  if (verifying() && !cms_should_unload_classes()) {
+    perm_gen_verify_bit_map()->clear_all();
+  }
+
+  // Clear any data recorded in the PLAB chunk arrays.
+  if (_survivor_plab_array != NULL) {
+    reset_survivor_plab_arrays();
+  }
+
+  // Adjust the per-size allocation stats for the next epoch.
+  _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
+  // Restart the "sweep timer" for next epoch.
+  _sweep_timer.reset();
+  _sweep_timer.start();
+  
+  // Sample collection pause time and reset for collection interval.
+  if (UseAdaptiveSizePolicy) {
+    size_policy()->msc_collection_end(gch->gc_cause());
+  }
+
+  // For a mark-sweep-compact, compute_new_size() will be called
+  // in the heap's do_collection() method.
+}
+
+// A work method used by the foreground collector to do
+// a mark-sweep, after taking over from a possibly on-going
+// concurrent mark-sweep collection.
+void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
+  CollectorState first_state, bool should_start_over) {
+  if (PrintGC && Verbose) {
+    gclog_or_tty->print_cr("Pass concurrent collection to foreground "
+      "collector with count %d",
+      _full_gcs_since_conc_gc);
+  }
+  switch (_collectorState) {
+    case Idling:
+      if (first_state == Idling || should_start_over) {
+        // The background GC was not active, or should
+        // restarted from scratch;  start the cycle.
+        _collectorState = InitialMarking;
+      }
+      // If first_state was not Idling, then a background GC
+      // was in progress and has now finished.  No need to do it
+      // again.  Leave the state as Idling.
+      break;
+    case Precleaning:
+      // In the foreground case don't do the precleaning since
+      // it is not done concurrently and there is extra work
+      // required.
+      _collectorState = FinalMarking;
+  }
+  if (PrintGCDetails &&
+      (_collectorState > Idling ||
+       !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
+    gclog_or_tty->print(" (concurrent mode failure)");
+  }
+  collect_in_foreground(clear_all_soft_refs);
+
+  // For a mark-sweep, compute_new_size() will be called
+  // in the heap's do_collection() method.
+}
+
+
+void CMSCollector::getFreelistLocks() const {
+  // Get locks for all free lists in all generations that this
+  // collector is responsible for
+  _cmsGen->freelistLock()->lock_without_safepoint_check();
+  _permGen->freelistLock()->lock_without_safepoint_check();
+}
+
+void CMSCollector::releaseFreelistLocks() const {
+  // Release locks for all free lists in all generations that this
+  // collector is responsible for
+  _cmsGen->freelistLock()->unlock();
+  _permGen->freelistLock()->unlock();
+}
+
+bool CMSCollector::haveFreelistLocks() const {
+  // Check locks for all free lists in all generations that this
+  // collector is responsible for
+  assert_lock_strong(_cmsGen->freelistLock());
+  assert_lock_strong(_permGen->freelistLock());
+  PRODUCT_ONLY(ShouldNotReachHere());
+  return true;
+}
+
+// A utility class that is used by the CMS collector to
+// temporarily "release" the foreground collector from its
+// usual obligation to wait for the background collector to
+// complete an ongoing phase before proceeding.
+class ReleaseForegroundGC: public StackObj {
+ private:
+  CMSCollector* _c;
+ public:
+  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
+    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
+    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+    // allow a potentially blocked foreground collector to proceed
+    _c->_foregroundGCShouldWait = false;
+    if (_c->_foregroundGCIsActive) {
+      CGC_lock->notify();
+    }
+    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+           "Possible deadlock");
+  }
+
+  ~ReleaseForegroundGC() {
+    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
+    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+    _c->_foregroundGCShouldWait = true;
+  }
+};
+
+// There are separate collect_in_background and collect_in_foreground because of
+// the different locking requirements of the background collector and the
+// foreground collector.  There was originally an attempt to share
+// one "collect" method between the background collector and the foreground
+// collector but the if-then-else required made it cleaner to have
+// separate methods.
+void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
+  assert(Thread::current()->is_ConcurrentGC_thread(),
+    "A CMS asynchronous collection is only allowed on a CMS thread.");
+    
+  GenCollectedHeap* gch = GenCollectedHeap::heap(); 
+  {
+    bool safepoint_check = Mutex::_no_safepoint_check_flag;
+    MutexLockerEx hl(Heap_lock, safepoint_check);
+    MutexLockerEx x(CGC_lock, safepoint_check);
+    if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
+      // The foreground collector is active or we're
+      // not using asynchronous collections.  Skip this
+      // background collection.
+      assert(!_foregroundGCShouldWait, "Should be clear");
+      return;
+    } else {
+      assert(_collectorState == Idling, "Should be idling before start.");
+      _collectorState = InitialMarking;
+      // Reset the expansion cause, now that we are about to begin
+      // a new cycle.
+      clear_expansion_cause();
+    }
+    _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
+    // This controls class unloading in response to an explicit gc request.
+    // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
+    // we will unload classes even if CMSClassUnloadingEnabled is not set.
+    // See CR 6541037 and related CRs.
+    _unload_classes = _full_gc_requested                      // ... for this cycle
+                      && ExplicitGCInvokesConcurrentAndUnloadsClasses;
+    _full_gc_requested = false;           // acks all outstanding full gc requests
+    // Signal that we are about to start a collection
+    gch->increment_total_full_collections();  // ... starting a collection cycle
+    _collection_count_start = gch->total_full_collections();
+  }
+
+  // Used for PrintGC
+  size_t prev_used;
+  if (PrintGC && Verbose) {
+    prev_used = _cmsGen->used(); // XXXPERM
+  }
+
+  // The change of the collection state is normally done at this level;
+  // the exceptions are phases that are executed while the world is
+  // stopped.  For those phases the change of state is done while the
+  // world is stopped.  For baton passing purposes this allows the 
+  // background collector to finish the phase and change state atomically.
+  // The foreground collector cannot wait on a phase that is done
+  // while the world is stopped because the foreground collector already
+  // has the world stopped and would deadlock.
+  while (_collectorState != Idling) {
+    if (TraceCMSState) {
+      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 
+	Thread::current(), _collectorState);
+    }
+    // The foreground collector 
+    //   holds the Heap_lock throughout its collection.
+    //	 holds the CMS token (but not the lock)
+    //     except while it is waiting for the background collector to yield.
+    //
+    // The foreground collector should be blocked (not for long)
+    //   if the background collector is about to start a phase
+    //   executed with world stopped.  If the background
+    //   collector has already started such a phase, the
+    //   foreground collector is blocked waiting for the
+    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
+    //   are executed in the VM thread.
+    //
+    // The locking order is
+    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
+    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
+    //   CMS token  (claimed in
+    //                stop_world_and_do() -->
+    //                  safepoint_synchronize() -->
+    //                    CMSThread::synchronize())
+
+    {
+      // Check if the FG collector wants us to yield.
+      CMSTokenSync x(true); // is cms thread
+      if (waitForForegroundGC()) {
+        // We yielded to a foreground GC, nothing more to be
+        // done this round.
+        assert(_foregroundGCShouldWait == false, "We set it to false in "
+               "waitForForegroundGC()");
+        if (TraceCMSState) {
+          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
+            " exiting collection CMS state %d", 
+            Thread::current(), _collectorState);
+        }
+        return;
+      } else {
+        // The background collector can run but check to see if the
+        // foreground collector has done a collection while the
+        // background collector was waiting to get the CGC_lock
+        // above.  If yes, break so that _foregroundGCShouldWait
+        // is cleared before returning.
+        if (_collectorState == Idling) {
+          break;
+        }
+      }
+    }
+
+    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
+      "should be waiting");
+
+    switch (_collectorState) {
+      case InitialMarking:
+        {
+          ReleaseForegroundGC x(this);
+	  stats().record_cms_begin();
+
+          VM_CMS_Initial_Mark_Operation initial_mark_op(this);
+	  VMThread::execute(&initial_mark_op);
+        }
+	// The collector state may be any legal state at this point
+	// since the background collector may have yielded to the
+	// foreground collector.
+	break;
+      case Marking:
+	// initial marking in checkpointRootsInitialWork has been completed
+        if (markFromRoots(true)) { // we were successful
+	  assert(_collectorState == Precleaning, "Collector state should "
+	    "have changed");
+        } else {
+          assert(_foregroundGCIsActive, "Internal state inconsistency");
+        }
+	break;
+      case Precleaning:
+	if (UseAdaptiveSizePolicy) {
+          size_policy()->concurrent_precleaning_begin();
+	}
+	// marking from roots in markFromRoots has been completed
+	preclean();
+	if (UseAdaptiveSizePolicy) {
+          size_policy()->concurrent_precleaning_end();
+	}
+	assert(_collectorState == AbortablePreclean ||
+               _collectorState == FinalMarking,
+               "Collector state should have changed");
+	break;
+      case AbortablePreclean:
+	if (UseAdaptiveSizePolicy) {
+        size_policy()->concurrent_phases_resume();
+	}
+        abortable_preclean();
+	if (UseAdaptiveSizePolicy) {
+          size_policy()->concurrent_precleaning_end();
+	}
+        assert(_collectorState == FinalMarking, "Collector state should "
+          "have changed");
+        break;
+      case FinalMarking:
+        {
+          ReleaseForegroundGC x(this);
+
+          VM_CMS_Final_Remark_Operation final_remark_op(this);
+          VMThread::execute(&final_remark_op);
+	  }
+        assert(_foregroundGCShouldWait, "block post-condition");
+	break;
+      case Sweeping:
+	if (UseAdaptiveSizePolicy) {
+          size_policy()->concurrent_sweeping_begin();
+	}
+	// final marking in checkpointRootsFinal has been completed
+        sweep(true);
+	assert(_collectorState == Resizing, "Collector state change "
+	  "to Resizing must be done under the free_list_lock");
+        _full_gcs_since_conc_gc = 0;
+
+        // Stop the timers for adaptive size policy for the concurrent phases
+        if (UseAdaptiveSizePolicy) {
+          size_policy()->concurrent_sweeping_end();
+          size_policy()->concurrent_phases_end(gch->gc_cause(),
+					     gch->prev_gen(_cmsGen)->capacity(),
+                                             _cmsGen->free());
+	}
+
+      case Resizing: {
+        // Sweeping has been completed...
+        // At this point the background collection has completed.
+        // Don't move the call to compute_new_size() down
+        // into code that might be executed if the background
+        // collection was preempted.
+        {
+          ReleaseForegroundGC x(this);   // unblock FG collection
+          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
+          CMSTokenSync        z(true);   // not strictly needed.
+          if (_collectorState == Resizing) {
+            compute_new_size();
+            _collectorState = Resetting;
+          } else {
+            assert(_collectorState == Idling, "The state should only change"
+                   " because the foreground collector has finished the collection");
+          }
+        }
+        break;
+      }
+      case Resetting:
+	// CMS heap resizing has been completed
+        reset(true);
+	assert(_collectorState == Idling, "Collector state should "
+	  "have changed");
+	stats().record_cms_end();
+	// Don't move the concurrent_phases_end() and compute_new_size()
+	// calls to here because a preempted background collection
+	// has it's state set to "Resetting".
+	break;
+      case Idling:
+      default:
+	ShouldNotReachHere();
+	break;
+    }
+    if (TraceCMSState) {
+      gclog_or_tty->print_cr("	Thread " INTPTR_FORMAT " done - next CMS state %d", 
+	Thread::current(), _collectorState);
+    }
+    assert(_foregroundGCShouldWait, "block post-condition");
+  }
+
+  // Should this be in gc_epilogue? 
+  collector_policy()->counters()->update_counters();
+
+  {
+    // Clear _foregroundGCShouldWait and, in the event that the
+    // foreground collector is waiting, notify it, before
+    // returning.
+    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+    _foregroundGCShouldWait = false;
+    if (_foregroundGCIsActive) {
+      CGC_lock->notify();
+    }
+    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+           "Possible deadlock");
+  }
+  if (TraceCMSState) {
+    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
+      " exiting collection CMS state %d", 
+      Thread::current(), _collectorState);
+  }
+  if (PrintGC && Verbose) {
+    _cmsGen->print_heap_change(prev_used);
+  }
+}
+
+void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
+  assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
+         "Foreground collector should be waiting, not executing");
+  assert(Thread::current()->is_VM_thread(), "A foreground collection" 
+    "may only be done by the VM Thread with the world stopped");
+  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
+         "VM thread should have CMS token");
+
+  NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, 
+    true, gclog_or_tty);)
+  if (UseAdaptiveSizePolicy) {
+    size_policy()->ms_collection_begin();
+  }
+  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
+
+  HandleMark hm;  // Discard invalid handles created during verification
+
+  if (VerifyBeforeGC &&
+      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+    Universe::verify(true);
+  }
+
+  bool init_mark_was_synchronous = false; // until proven otherwise
+  while (_collectorState != Idling) {
+    if (TraceCMSState) {
+      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 
+	Thread::current(), _collectorState);
+    }
+    switch (_collectorState) {
+      case InitialMarking:
+        init_mark_was_synchronous = true;  // fact to be exploited in re-mark
+        checkpointRootsInitial(false);
+	assert(_collectorState == Marking, "Collector state should have changed"
+	  " within checkpointRootsInitial()");
+	break;
+      case Marking:
+	// initial marking in checkpointRootsInitialWork has been completed
+        if (VerifyDuringGC &&
+            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+          gclog_or_tty->print("Verify before initial mark: ");
+          Universe::verify(true);
+        }
+        { 
+          bool res = markFromRoots(false);
+	  assert(res && _collectorState == FinalMarking, "Collector state should "
+	    "have changed");
+	  break;
+        }
+      case FinalMarking:
+        if (VerifyDuringGC &&
+            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+          gclog_or_tty->print("Verify before re-mark: ");
+          Universe::verify(true);
+        }
+        checkpointRootsFinal(false, clear_all_soft_refs,
+                             init_mark_was_synchronous);
+	assert(_collectorState == Sweeping, "Collector state should not "
+	  "have changed within checkpointRootsFinal()");
+	break;
+      case Sweeping:
+	// final marking in checkpointRootsFinal has been completed
+        if (VerifyDuringGC &&
+            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+          gclog_or_tty->print("Verify before sweep: ");
+          Universe::verify(true);
+        }
+        sweep(false);
+	assert(_collectorState == Resizing, "Incorrect state");
+	break;
+      case Resizing: {
+        // Sweeping has been completed; the actual resize in this case
+        // is done separately; nothing to be done in this state.
+        _collectorState = Resetting;
+        break;
+      }
+      case Resetting:
+	// The heap has been resized.
+        if (VerifyDuringGC &&
+            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+          gclog_or_tty->print("Verify before reset: ");
+          Universe::verify(true);
+        }
+        reset(false);
+	assert(_collectorState == Idling, "Collector state should "
+	  "have changed");
+	break;
+      case Precleaning:
+      case AbortablePreclean:
+        // Elide the preclean phase
+        _collectorState = FinalMarking;
+        break;
+      default:
+	ShouldNotReachHere();
+    }
+    if (TraceCMSState) {
+      gclog_or_tty->print_cr("	Thread " INTPTR_FORMAT " done - next CMS state %d", 
+	Thread::current(), _collectorState);
+    }
+  }
+
+  if (UseAdaptiveSizePolicy) {
+    GenCollectedHeap* gch = GenCollectedHeap::heap();
+    size_policy()->ms_collection_end(gch->gc_cause());
+  }
+
+  if (VerifyAfterGC &&
+      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+    Universe::verify(true);
+  }
+  if (TraceCMSState) {
+    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
+      " exiting collection CMS state %d", 
+      Thread::current(), _collectorState);
+  }
+}
+
+bool CMSCollector::waitForForegroundGC() {
+  bool res = false;
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should have CMS token");
+  // Block the foreground collector until the
+  // background collectors decides whether to
+  // yield.
+  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+  _foregroundGCShouldWait = true;
+  if (_foregroundGCIsActive) {
+    // The background collector yields to the
+    // foreground collector and returns a value
+    // indicating that it has yielded.  The foreground
+    // collector can proceed.
+    res = true;
+    _foregroundGCShouldWait = false;
+    ConcurrentMarkSweepThread::clear_CMS_flag(
+      ConcurrentMarkSweepThread::CMS_cms_has_token);
+    ConcurrentMarkSweepThread::set_CMS_flag(
+      ConcurrentMarkSweepThread::CMS_cms_wants_token);
+    // Get a possibly blocked foreground thread going
+    CGC_lock->notify();
+    if (TraceCMSState) {
+      gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
+        Thread::current(), _collectorState);
+    }
+    while (_foregroundGCIsActive) {
+      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
+    }
+    ConcurrentMarkSweepThread::set_CMS_flag(
+      ConcurrentMarkSweepThread::CMS_cms_has_token);
+    ConcurrentMarkSweepThread::clear_CMS_flag(
+      ConcurrentMarkSweepThread::CMS_cms_wants_token);
+  }
+  if (TraceCMSState) {
+    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
+      Thread::current(), _collectorState);
+  }
+  return res;
+}
+
+// Because of the need to lock the free lists and other structures in
+// the collector, common to all the generations that the collector is
+// collecting, we need the gc_prologues of individual CMS generations
+// delegate to their collector. It may have been simpler had the
+// current infrastructure allowed one to call a prologue on a
+// collector. In the absence of that we have the generation's
+// prologue delegate to the collector, which delegates back
+// some "local" work to a worker method in the individual generations
+// that it's responsible for collecting, while itself doing any
+// work common to all generations it's responsible for. A similar
+// comment applies to the  gc_epilogue()'s.
+// The role of the varaible _between_prologue_and_epilogue is to
+// enforce the invocation protocol.
+void CMSCollector::gc_prologue(bool full) {
+  // Call gc_prologue_work() for each CMSGen and PermGen that
+  // we are responsible for.
+
+  // The following locking discipline assumes that we are only called
+  // when the world is stopped.
+  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
+
+  // The CMSCollector prologue must call the gc_prologues for the
+  // "generations" (including PermGen if any) that it's responsible
+  // for.
+
+  assert(   Thread::current()->is_VM_thread()
+         || (   CMSScavengeBeforeRemark
+             && Thread::current()->is_ConcurrentGC_thread()),
+         "Incorrect thread type for prologue execution");
+
+  if (_between_prologue_and_epilogue) {
+    // We have already been invoked; this is a gc_prologue delegation
+    // from yet another CMS generation that we are responsible for, just
+    // ignore it since all relevant work has already been done.
+    return;
+  }
+  
+  // set a bit saying prologue has been called; cleared in epilogue
+  _between_prologue_and_epilogue = true;
+  // Claim locks for common data structures, then call gc_prologue_work()
+  // for each CMSGen and PermGen that we are responsible for.
+
+  getFreelistLocks();   // gets free list locks on constituent spaces
+  bitMapLock()->lock_without_safepoint_check();
+
+  // Should call gc_prologue_work() for all cms gens we are responsible for
+  bool registerClosure =    _collectorState >= Marking
+                         && _collectorState < Sweeping;
+  ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
+                                               : &_modUnionClosure;
+  _cmsGen->gc_prologue_work(full, registerClosure, muc);
+  _permGen->gc_prologue_work(full, registerClosure, muc);
+
+  if (!full) {
+    stats().record_gc0_begin();
+  }
+}
+
+void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
+  // Delegate to CMScollector which knows how to coordinate between
+  // this and any other CMS generations that it is responsible for
+  // collecting.
+  collector()->gc_prologue(full);
+}
+
+// This is a "private" interface for use by this generation's CMSCollector.
+// Not to be called directly by any other entity (for instance,
+// GenCollectedHeap, which calls the "public" gc_prologue method above).
+void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
+  bool registerClosure, ModUnionClosure* modUnionClosure) {
+  assert(!incremental_collection_failed(), "Shouldn't be set yet");
+  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
+    "Should be NULL");
+  if (registerClosure) {
+    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
+  }
+  cmsSpace()->gc_prologue();
+  // Clear stat counters
+  NOT_PRODUCT(
+    assert(_numObjectsPromoted == 0, "check");
+    assert(_numWordsPromoted   == 0, "check");
+    if (Verbose && PrintGC) {
+      gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
+                          SIZE_FORMAT" bytes concurrently",
+      _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
+    }
+    _numObjectsAllocated = 0;
+    _numWordsAllocated   = 0;
+  )
+}
+
+void CMSCollector::gc_epilogue(bool full) {
+  // The following locking discipline assumes that we are only called
+  // when the world is stopped.
+  assert(SafepointSynchronize::is_at_safepoint(),
+         "world is stopped assumption");
+
+  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
+  // if linear allocation blocks need to be appropriately marked to allow the
+  // the blocks to be parsable. We also check here whether we need to nudge the
+  // CMS collector thread to start a new cycle (if it's not already active).
+  assert(   Thread::current()->is_VM_thread()
+         || (   CMSScavengeBeforeRemark
+             && Thread::current()->is_ConcurrentGC_thread()),
+         "Incorrect thread type for epilogue execution");
+  
+  if (!_between_prologue_and_epilogue) {
+    // We have already been invoked; this is a gc_epilogue delegation
+    // from yet another CMS generation that we are responsible for, just
+    // ignore it since all relevant work has already been done.
+    return;
+  }
+  assert(haveFreelistLocks(), "must have freelist locks");
+  assert_lock_strong(bitMapLock());
+
+  _cmsGen->gc_epilogue_work(full);
+  _permGen->gc_epilogue_work(full);
+
+  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
+    // in case sampling was not already enabled, enable it
+    _start_sampling = true;
+  }
+  // reset _eden_chunk_array so sampling starts afresh
+  _eden_chunk_index = 0;
+
+  size_t cms_used   = _cmsGen->cmsSpace()->used();
+  size_t perm_used  = _permGen->cmsSpace()->used();
+
+  // update performance counters - this uses a special version of
+  // update_counters() that allows the utilization to be passed as a
+  // parameter, avoiding multiple calls to used().
+  //
+  _cmsGen->update_counters(cms_used);
+  _permGen->update_counters(perm_used);
+
+  if (CMSIncrementalMode) {
+    icms_update_allocation_limits();
+  }
+
+  bitMapLock()->unlock();
+  releaseFreelistLocks();
+
+  _between_prologue_and_epilogue = false;  // ready for next cycle
+}
+
+void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
+  collector()->gc_epilogue(full);
+
+  // Also reset promotion tracking in par gc thread states.
+  if (ParallelGCThreads > 0) {
+    for (uint i = 0; i < ParallelGCThreads; i++) {
+      _par_gc_thread_states[i]->promo.stopTrackingPromotions();
+    }
+  }
+}
+
+void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
+  assert(!incremental_collection_failed(), "Should have been cleared");
+  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
+  cmsSpace()->gc_epilogue();
+    // Print stat counters
+  NOT_PRODUCT(
+    assert(_numObjectsAllocated == 0, "check");
+    assert(_numWordsAllocated == 0, "check");
+    if (Verbose && PrintGC) {
+      gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
+                          SIZE_FORMAT" bytes",
+                 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
+    }
+    _numObjectsPromoted = 0;
+    _numWordsPromoted   = 0;
+  )
+
+  if (PrintGC && Verbose) {
+    // Call down the chain in contiguous_available needs the freelistLock
+    // so print this out before releasing the freeListLock.
+    gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
+                        contiguous_available());
+  }
+}
+
+#ifndef PRODUCT
+bool CMSCollector::have_cms_token() {
+  Thread* thr = Thread::current();
+  if (thr->is_VM_thread()) {
+    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
+  } else if (thr->is_ConcurrentGC_thread()) {
+    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
+  } else if (thr->is_GC_task_thread()) {
+    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
+           ParGCRareEvent_lock->owned_by_self();
+  }
+  return false;
+}
+#endif
+
+// Check reachability of the given heap address in CMS generation,
+// treating all other generations as roots.
+bool CMSCollector::is_cms_reachable(HeapWord* addr) {
+  // We could "guarantee" below, rather than assert, but i'll
+  // leave these as "asserts" so that an adventurous debugger
+  // could try this in the product build provided some subset of
+  // the conditions were met, provided they were intersted in the
+  // results and knew that the computation below wouldn't interfere
+  // with other concurrent computations mutating the structures
+  // being read or written.
+  assert(SafepointSynchronize::is_at_safepoint(),
+         "Else mutations in object graph will make answer suspect");
+  assert(have_cms_token(), "Should hold cms token");
+  assert(haveFreelistLocks(), "must hold free list locks");
+  assert_lock_strong(bitMapLock());
+
+  // Clear the marking bit map array before starting, but, just
+  // for kicks, first report if the given address is already marked
+  gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
+                _markBitMap.isMarked(addr) ? "" : " not");
+
+  if (verify_after_remark()) {
+    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
+    bool result = verification_mark_bm()->isMarked(addr);
+    gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
+                           result ? "IS" : "is NOT");
+    return result;
+  } else {
+    gclog_or_tty->print_cr("Could not compute result");
+    return false;
+  }
+}
+
+////////////////////////////////////////////////////////
+// CMS Verification Support
+////////////////////////////////////////////////////////
+// Following the remark phase, the following invariant
+// should hold -- each object in the CMS heap which is
+// marked in markBitMap() should be marked in the verification_mark_bm().
+
+class VerifyMarkedClosure: public BitMapClosure {
+  CMSBitMap* _marks;
+  bool       _failed;
+
+ public:
+  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
+
+  void do_bit(size_t offset) {
+    HeapWord* addr = _marks->offsetToHeapWord(offset);
+    if (!_marks->isMarked(addr)) {
+      oop(addr)->print();
+      gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
+      _failed = true;
+    }
+  }
+
+  bool failed() { return _failed; }
+};
+
+bool CMSCollector::verify_after_remark() {
+  gclog_or_tty->print(" [Verifying CMS Marking... ");
+  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
+  static bool init = false;
+
+  assert(SafepointSynchronize::is_at_safepoint(),
+         "Else mutations in object graph will make answer suspect");
+  assert(have_cms_token(),
+         "Else there may be mutual interference in use of "
+         " verification data structures");
+  assert(_collectorState > Marking && _collectorState <= Sweeping,
+         "Else marking info checked here may be obsolete");
+  assert(haveFreelistLocks(), "must hold free list locks");
+  assert_lock_strong(bitMapLock());
+
+
+  // Allocate marking bit map if not already allocated
+  if (!init) { // first time
+    if (!verification_mark_bm()->allocate(_span)) {
+      return false;
+    }
+    init = true;
+  }
+
+  assert(verification_mark_stack()->isEmpty(), "Should be empty");
+
+  // Turn off refs discovery -- so we will be tracing through refs.
+  // This is as intended, because by this time
+  // GC must already have cleared any refs that need to be cleared,
+  // and traced those that need to be marked; moreover,
+  // the marking done here is not going to intefere in any
+  // way with the marking information used by GC.
+  NoRefDiscovery no_discovery(ref_processor());
+
+  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
+
+  // Clear any marks from a previous round
+  verification_mark_bm()->clear_all();
+  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
+  assert(overflow_list_is_empty(), "overflow list should be empty");
+
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  // Update the saved marks which may affect the root scans.
+  gch->save_marks();
+  
+  if (CMSRemarkVerifyVariant == 1) {
+    // In this first variant of verification, we complete
+    // all marking, then check if the new marks-verctor is
+    // a subset of the CMS marks-vector.
+    verify_after_remark_work_1();
+  } else if (CMSRemarkVerifyVariant == 2) {
+    // In this second variant of verification, we flag an error
+    // (i.e. an object reachable in the new marks-vector not reachable
+    // in the CMS marks-vector) immediately, also indicating the
+    // identify of an object (A) that references the unmarked object (B) --
+    // presumably, a mutation to A failed to be picked up by preclean/remark?
+    verify_after_remark_work_2();
+  } else {
+    warning("Unrecognized value %d for CMSRemarkVerifyVariant",
+            CMSRemarkVerifyVariant);
+  }
+  gclog_or_tty->print(" done] ");
+  return true;
+}
+
+void CMSCollector::verify_after_remark_work_1() {
+  ResourceMark rm;
+  HandleMark  hm;
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  // Mark from roots one level into CMS
+  MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
+  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  
+  gch->gen_process_strong_roots(_cmsGen->level(),
+                                true,   // younger gens are roots
+                                true,   // collecting perm gen
+                                SharedHeap::ScanningOption(roots_scanning_options()),
+                                NULL, &notOlder);
+
+  // Now mark from the roots
+  assert(_revisitStack.isEmpty(), "Should be empty");
+  MarkFromRootsClosure markFromRootsClosure(this, _span,
+    verification_mark_bm(), verification_mark_stack(), &_revisitStack,
+    false /* don't yield */, true /* verifying */);
+  assert(_restart_addr == NULL, "Expected pre-condition");
+  verification_mark_bm()->iterate(&markFromRootsClosure);
+  while (_restart_addr != NULL) {
+    // Deal with stack overflow: by restarting at the indicated
+    // address.
+    HeapWord* ra = _restart_addr;
+    markFromRootsClosure.reset(ra);
+    _restart_addr = NULL;
+    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
+  }
+  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
+  // Should reset the revisit stack above, since no class tree
+  // surgery is forthcoming.
+  _revisitStack.reset(); // throwing away all contents
+
+  // Marking completed -- now verify that each bit marked in
+  // verification_mark_bm() is also marked in markBitMap(); flag all
+  // errors by printing corresponding objects.
+  VerifyMarkedClosure vcl(markBitMap());
+  verification_mark_bm()->iterate(&vcl);
+  if (vcl.failed()) {
+    gclog_or_tty->print("Verification failed");
+    Universe::heap()->print();
+    fatal(" ... aborting");
+  }
+}
+
+void CMSCollector::verify_after_remark_work_2() {
+  ResourceMark rm;
+  HandleMark  hm;
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  // Mark from roots one level into CMS
+  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
+                                     markBitMap(), true /* nmethods */);
+  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  gch->gen_process_strong_roots(_cmsGen->level(),
+				true,   // younger gens are roots
+				true,   // collecting perm gen
+                                SharedHeap::ScanningOption(roots_scanning_options()),
+				NULL, &notOlder);
+
+  // Now mark from the roots
+  assert(_revisitStack.isEmpty(), "Should be empty");
+  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
+    verification_mark_bm(), markBitMap(), verification_mark_stack());
+  assert(_restart_addr == NULL, "Expected pre-condition");
+  verification_mark_bm()->iterate(&markFromRootsClosure);
+  while (_restart_addr != NULL) {
+    // Deal with stack overflow: by restarting at the indicated
+    // address.
+    HeapWord* ra = _restart_addr;
+    markFromRootsClosure.reset(ra);
+    _restart_addr = NULL;
+    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
+  }
+  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
+  // Should reset the revisit stack above, since no class tree
+  // surgery is forthcoming.
+  _revisitStack.reset(); // throwing away all contents
+
+  // Marking completed -- now verify that each bit marked in
+  // verification_mark_bm() is also marked in markBitMap(); flag all
+  // errors by printing corresponding objects.
+  VerifyMarkedClosure vcl(markBitMap());
+  verification_mark_bm()->iterate(&vcl);
+  assert(!vcl.failed(), "Else verification above should not have succeeded");
+}
+
+void ConcurrentMarkSweepGeneration::save_marks() {
+  // delegate to CMS space
+  cmsSpace()->save_marks();
+  for (uint i = 0; i < ParallelGCThreads; i++) {
+    _par_gc_thread_states[i]->promo.startTrackingPromotions();
+  }
+}
+
+bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
+  return cmsSpace()->no_allocs_since_save_marks();
+}
+
+#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
+                                                                \
+void ConcurrentMarkSweepGeneration::                            \
+oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
+  cl->set_generation(this);                                     \
+  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
+  cl->reset_generation();                                       \
+  save_marks();                                                 \
+}
+
+ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
+
+void
+ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
+{
+  // Not currently implemented; need to do the following. -- ysr.
+  // dld -- I think that is used for some sort of allocation profiler.  So it
+  // really means the objects allocated by the mutator since the last
+  // GC.  We could potentially implement this cheaply by recording only
+  // the direct allocations in a side data structure.
+  //
+  // I think we probably ought not to be required to support these
+  // iterations at any arbitrary point; I think there ought to be some
+  // call to enable/disable allocation profiling in a generation/space,
+  // and the iterator ought to return the objects allocated in the
+  // gen/space since the enable call, or the last iterator call (which
+  // will probably be at a GC.)  That way, for gens like CM&S that would
+  // require some extra data structure to support this, we only pay the
+  // cost when it's in use...
+  cmsSpace()->object_iterate_since_last_GC(blk);
+}
+
+void
+ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
+  cl->set_generation(this);
+  younger_refs_in_space_iterate(_cmsSpace, cl);
+  cl->reset_generation();
+}
+
+void
+ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
+  if (freelistLock()->owned_by_self()) {
+    Generation::oop_iterate(mr, cl);
+  } else {
+    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
+    Generation::oop_iterate(mr, cl);
+  }
+}
+
+void
+ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
+  if (freelistLock()->owned_by_self()) {
+    Generation::oop_iterate(cl);
+  } else {
+    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
+    Generation::oop_iterate(cl);
+  }
+}
+
+void
+ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
+  if (freelistLock()->owned_by_self()) {
+    Generation::object_iterate(cl);
+  } else {
+    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
+    Generation::object_iterate(cl);
+  }
+}
+
+void
+ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
+}
+
+void
+ConcurrentMarkSweepGeneration::post_compact() {
+}
+
+void
+ConcurrentMarkSweepGeneration::prepare_for_verify() {
+  // Fix the linear allocation blocks to look like free blocks.
+
+  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
+  // are not called when the heap is verified during universe initialization and
+  // at vm shutdown.
+  if (freelistLock()->owned_by_self()) {
+    cmsSpace()->prepare_for_verify();
+  } else {
+    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
+    cmsSpace()->prepare_for_verify();
+  }
+}
+
+void
+ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
+  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
+  // are not called when the heap is verified during universe initialization and
+  // at vm shutdown.
+  if (freelistLock()->owned_by_self()) {
+    cmsSpace()->verify(false /* ignored */);
+  } else {
+    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
+    cmsSpace()->verify(false /* ignored */);
+  }
+}
+
+void CMSCollector::verify(bool allow_dirty /* ignored */) {
+  _cmsGen->verify(allow_dirty);
+  _permGen->verify(allow_dirty);
+}
+
+void CMSCollector::setup_cms_unloading_and_verification_state() {
+  const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
+                             || VerifyBeforeExit;
+  const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
+                             |   SharedHeap::SO_CodeCache;
+
+  if (cms_should_unload_classes()) {   // Should unload classes this cycle
+    remove_root_scanning_option(rso);  // Shrink the root set appropriately
+    set_verifying(should_verify);    // Set verification state for this cycle
+    return;                            // Nothing else needs to be done at this time
+  }
+
+  // Not unloading classes this cycle
+  assert(!cms_should_unload_classes(), "Inconsitency!");
+  if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
+    // We were not verifying, or we _were_ unloading classes in the last cycle,
+    // AND some verification options are enabled this cycle; in this case,
+    // we must make sure that the deadness map is allocated if not already so,
+    // and cleared (if already allocated previously --
+    // CMSBitMap::sizeInBits() is used to determine if it's allocated).
+    if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
+      if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
+        warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
+                "permanent generation verification disabled");
+        return;  // Note that we leave verification disabled, so we'll retry this
+                 // allocation next cycle. We _could_ remember this failure
+                 // and skip further attempts and permanently disable verification
+                 // attempts if that is considered more desirable.
+      }
+      assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
+              "_perm_gen_ver_bit_map inconsistency?");
+    } else {
+      perm_gen_verify_bit_map()->clear_all();
+    }
+    // Include symbols, strings and code cache elements to prevent their resurrection.
+    add_root_scanning_option(rso);
+    set_verifying(true);
+  } else if (verifying() && !should_verify) {
+    // We were verifying, but some verification flags got disabled.
+    set_verifying(false);
+    // Exclude symbols, strings and code cache elements from root scanning to
+    // reduce IM and RM pauses.
+    remove_root_scanning_option(rso);
+  }
+}
+
+
+#ifndef PRODUCT
+HeapWord* CMSCollector::block_start(const void* p) const {
+  const HeapWord* addr = (HeapWord*)p;
+  if (_span.contains(p)) {
+    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
+      return _cmsGen->cmsSpace()->block_start(p);
+    } else {
+      assert(_permGen->cmsSpace()->is_in_reserved(addr),
+	     "Inconsistent _span?");
+      return _permGen->cmsSpace()->block_start(p);
+    }
+  }
+  return NULL;
+}
+#endif
+
+HeapWord*
+ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
+                                                   bool   tlab,
+						   bool   parallel) {
+  assert(!tlab, "Can't deal with TLAB allocation");
+  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
+  expand(word_size*HeapWordSize, MinHeapDeltaBytes,
+    CMSExpansionCause::_satisfy_allocation);
+  if (GCExpandToAllocateDelayMillis > 0) {
+    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
+  }
+  size_t adj_word_sz = CompactibleFreeListSpace::adjustObjectSize(word_size);
+  if (parallel) {
+    return cmsSpace()->par_allocate(adj_word_sz);
+  } else {
+    return cmsSpace()->allocate(adj_word_sz);
+  }
+}
+
+// YSR: All of this generation expansion/shrinking stuff is an exact copy of
+// OneContigSpaceCardGeneration, which makes me wonder if we should move this
+// to CardGeneration and share it...
+void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
+  CMSExpansionCause::Cause cause)
+{
+  assert_locked_or_safepoint(Heap_lock);
+
+  size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
+  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
+  bool success = false;
+  if (aligned_expand_bytes > aligned_bytes) {
+    success = grow_by(aligned_expand_bytes);
+  }
+  if (!success) {
+    success = grow_by(aligned_bytes);
+  }
+  if (!success) {
+    size_t remaining_bytes = _virtual_space.uncommitted_size();
+    if (remaining_bytes > 0) {
+      success = grow_by(remaining_bytes);
+    }
+  }
+  if (GC_locker::is_active()) {
+    if (PrintGC && Verbose) {
+      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
+    }
+  }
+  // remember why we expanded; this information is used
+  // by shouldConcurrentCollect() when making decisions on whether to start
+  // a new CMS cycle.
+  if (success) {
+    set_expansion_cause(cause);
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print_cr("Expanded CMS gen for %s", 
+	CMSExpansionCause::to_string(cause));
+    }
+  }
+}
+
+HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
+  HeapWord* res = NULL;
+  MutexLocker x(ParGCRareEvent_lock);
+  while (true) {
+    // Expansion by some other thread might make alloc OK now:
+    res = ps->lab.alloc(word_sz);
+    if (res != NULL) return res;
+    // If there's not enough expansion space available, give up.
+    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
+      return NULL;
+    }
+    // Otherwise, we try expansion.
+    expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
+      CMSExpansionCause::_allocate_par_lab);
+    // Now go around the loop and try alloc again;
+    // A competing par_promote might beat us to the expansion space,
+    // so we may go around the loop again if promotion fails agaion.
+    if (GCExpandToAllocateDelayMillis > 0) {
+      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
+    }
+  }
+}
+
+
+bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
+  PromotionInfo* promo) {
+  MutexLocker x(ParGCRareEvent_lock);
+  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
+  while (true) {
+    // Expansion by some other thread might make alloc OK now:
+    if (promo->ensure_spooling_space()) {
+      assert(promo->has_spooling_space(),
+             "Post-condition of successful ensure_spooling_space()");
+      return true;
+    }
+    // If there's not enough expansion space available, give up.
+    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
+      return false;
+    }
+    // Otherwise, we try expansion.
+    expand(refill_size_bytes, MinHeapDeltaBytes,
+      CMSExpansionCause::_allocate_par_spooling_space);
+    // Now go around the loop and try alloc again;
+    // A competing allocation might beat us to the expansion space,
+    // so we may go around the loop again if allocation fails again.
+    if (GCExpandToAllocateDelayMillis > 0) {
+      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
+    }
+  }
+}
+
+
+
+void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
+  assert_locked_or_safepoint(Heap_lock);
+  size_t size = ReservedSpace::page_align_size_down(bytes);
+  if (size > 0) {
+    shrink_by(size);
+  }
+}
+
+bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
+  assert_locked_or_safepoint(Heap_lock);
+  bool result = _virtual_space.expand_by(bytes);
+  if (result) {
+    HeapWord* old_end = _cmsSpace->end();
+    size_t new_word_size = 
+      heap_word_size(_virtual_space.committed_size());
+    MemRegion mr(_cmsSpace->bottom(), new_word_size);
+    _bts->resize(new_word_size);  // resize the block offset shared array
+    Universe::heap()->barrier_set()->resize_covered_region(mr);
+    // Hmmmm... why doesn't CFLS::set_end verify locking?
+    // This is quite ugly; FIX ME XXX
+    _cmsSpace->assert_locked();
+    _cmsSpace->set_end((HeapWord*)_virtual_space.high());
+
+    // update the space and generation capacity counters
+    if (UsePerfData) {
+      _space_counters->update_capacity();
+      _gen_counters->update_all();
+    }
+
+    if (Verbose && PrintGC) {
+      size_t new_mem_size = _virtual_space.committed_size();
+      size_t old_mem_size = new_mem_size - bytes;
+      gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
+                    name(), old_mem_size/K, bytes/K, new_mem_size/K);
+    }
+  }
+  return result;
+}
+
+bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
+  assert_locked_or_safepoint(Heap_lock);
+  bool success = true;
+  const size_t remaining_bytes = _virtual_space.uncommitted_size();
+  if (remaining_bytes > 0) {
+    success = grow_by(remaining_bytes);
+    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
+  }
+  return success;
+}
+
+void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
+  assert_locked_or_safepoint(Heap_lock);
+  assert_lock_strong(freelistLock());
+  // XXX Fix when compaction is implemented.
+  warning("Shrinking of CMS not yet implemented");
+  return;
+}
+
+
+// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
+// phases.
+class CMSPhaseAccounting: public StackObj {
+ public:
+  CMSPhaseAccounting(CMSCollector *collector, 
+		     const char *phase, 
+		     bool print_cr = true);
+  ~CMSPhaseAccounting();
+
+ private:
+  CMSCollector *_collector;
+  const char *_phase;
+  elapsedTimer _wallclock;
+  bool _print_cr;
+
+ public:
+  // Not MT-safe; so do not pass around these StackObj's
+  // where they may be accessed by other threads.
+  jlong wallclock_millis() {
+    assert(_wallclock.is_active(), "Wall clock should not stop");
+    _wallclock.stop();  // to record time
+    jlong ret = _wallclock.milliseconds();
+    _wallclock.start(); // restart
+    return ret;
+  }
+};
+
+CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
+				       const char *phase,
+				       bool print_cr) :
+  _collector(collector), _phase(phase), _print_cr(print_cr) {
+
+  if (PrintCMSStatistics != 0) {
+    _collector->resetYields();
+  }
+  if (PrintGCDetails && PrintGCTimeStamps) {
+    gclog_or_tty->stamp();
+    gclog_or_tty->print_cr(": [%s-concurrent-%s-start]", 
+      _collector->cmsGen()->short_name(), _phase);
+  }
+  _collector->resetTimer();
+  _wallclock.start();
+  _collector->startTimer();
+}
+
+CMSPhaseAccounting::~CMSPhaseAccounting() {
+  assert(_wallclock.is_active(), "Wall clock should not have stopped");
+  _collector->stopTimer();
+  _wallclock.stop();
+  if (PrintGCDetails) {
+    if (PrintGCTimeStamps) {
+      gclog_or_tty->stamp();
+      gclog_or_tty->print(": ");
+    }
+    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", 
+		 _collector->cmsGen()->short_name(),
+		 _phase, _collector->timerValue(), _wallclock.seconds());
+    if (_print_cr) {
+      gclog_or_tty->print_cr("");
+    }
+    if (PrintCMSStatistics != 0) {
+      gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
+		    _collector->yields());
+    }
+  }
+}
+
+// CMS work
+
+// Checkpoint the roots into this generation from outside
+// this generation. [Note this initial checkpoint need only
+// be approximate -- we'll do a catch up phase subsequently.]
+void CMSCollector::checkpointRootsInitial(bool asynch) {
+  assert(_collectorState == InitialMarking, "Wrong collector state");
+  check_correct_thread_executing();
+  ReferenceProcessor* rp = ref_processor();
+  SpecializationStats::clear();
+  assert(_restart_addr == NULL, "Control point invariant");
+  if (asynch) {
+    // acquire locks for subsequent manipulations
+    MutexLockerEx x(bitMapLock(),
+                    Mutex::_no_safepoint_check_flag);
+    checkpointRootsInitialWork(asynch);
+    rp->verify_no_references_recorded();
+    rp->enable_discovery(); // enable ("weak") refs discovery
+    _collectorState = Marking;
+  } else {
+    // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
+    // which recognizes if we are a CMS generation, and doesn't try to turn on
+    // discovery; verify that they aren't meddling.
+    assert(!rp->discovery_is_atomic(),
+           "incorrect setting of discovery predicate");
+    assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
+           "ref discovery for this generation kind");
+    // already have locks
+    checkpointRootsInitialWork(asynch);
+    rp->enable_discovery(); // now enable ("weak") refs discovery
+    _collectorState = Marking;
+  }
+  SpecializationStats::print();
+}
+
+void CMSCollector::checkpointRootsInitialWork(bool asynch) {
+  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
+  assert(_collectorState == InitialMarking, "just checking");
+
+  // If there has not been a GC[n-1] since last GC[n] cycle completed,
+  // precede our marking with a collection of all
+  // younger generations to keep floating garbage to a minimum.
+  // XXX: we won't do this for now -- it's an optimization to be done later.
+
+  // already have locks
+  assert_lock_strong(bitMapLock());
+  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
+
+  // Setup the verification and class unloading state for this
+  // CMS collection cycle.
+  setup_cms_unloading_and_verification_state();
+
+  NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", 
+    PrintGCDetails && Verbose, true, gclog_or_tty);)
+  if (UseAdaptiveSizePolicy) {
+    size_policy()->checkpoint_roots_initial_begin();
+  }
+
+  // Reset all the PLAB chunk arrays if necessary.
+  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
+    reset_survivor_plab_arrays();
+  }
+
+  ResourceMark rm;
+  HandleMark  hm;
+
+  FalseClosure falseClosure;
+  // In the case of a synchronous collection, we will elide the
+  // remark step, so it's important to catch all the nmethod oops
+  // in this step; hence the last argument to the constrcutor below.
+  MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  assert(_markStack.isEmpty(), "markStack should be empty");
+  assert(overflow_list_is_empty(), "overflow list should be empty");
+  assert(no_preserved_marks(), "no preserved marks");
+  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  // Update the saved marks which may affect the root scans.
+  gch->save_marks();
+
+  // weak reference processing has not started yet.
+  ref_processor()->set_enqueuing_is_done(false);
+
+  {
+    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
+    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+    gch->gen_process_strong_roots(_cmsGen->level(),
+				  true,   // younger gens are roots
+				  true,   // collecting perm gen
+                                  SharedHeap::ScanningOption(roots_scanning_options()),
+				  NULL, &notOlder);
+  }
+
+  // Clear mod-union table; it will be dirtied in the prologue of
+  // CMS generation per each younger generation collection.
+
+  assert(_modUnionTable.isAllClear(),
+       "Was cleared in most recent final checkpoint phase"
+       " or no bits are set in the gc_prologue before the start of the next "
+       "subsequent marking phase.");
+
+  // Temporarily disabled, since pre/post-consumption closures don't
+  // care about precleaned cards
+  #if 0
+  {
+    MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
+			     (HeapWord*)_virtual_space.high());
+    _ct->ct_bs()->preclean_dirty_cards(mr);
+  }
+  #endif
+
+  // Save the end of the used_region of the constituent generations
+  // to be used to limit the extent of sweep in each generation.
+  save_sweep_limits();
+  if (UseAdaptiveSizePolicy) {
+    size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
+  }
+}
+
+bool CMSCollector::markFromRoots(bool asynch) {
+  // we might be tempted to assert that:
+  // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
+  //        "inconsistent argument?");
+  // However that wouldn't be right, because it's possible that
+  // a safepoint is indeed in progress as a younger generation
+  // stop-the-world GC happens even as we mark in this generation.
+  assert(_collectorState == Marking, "inconsistent state?");
+  check_correct_thread_executing();
+
+  bool res;
+  if (asynch) {
+
+    // Start the timers for adaptive size policy for the concurrent phases
+    // Do it here so that the foreground MS can use the concurrent
+    // timer since a foreground MS might has the sweep done concurrently
+    // or STW.
+    if (UseAdaptiveSizePolicy) {
+      size_policy()->concurrent_marking_begin();
+    }
+
+    // Weak ref discovery note: We may be discovering weak
+    // refs in this generation concurrent (but interleaved) with
+    // weak ref discovery by a younger generation collector.
+
+    CMSTokenSyncWithLocks ts(true, bitMapLock());
+    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+    CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
+    res = markFromRootsWork(asynch);
+    if (res) {
+      _collectorState = Precleaning;
+    } else { // We failed and a foreground collection wants to take over
+      assert(_foregroundGCIsActive, "internal state inconsistency");
+      assert(_restart_addr == NULL,  "foreground will restart from scratch");
+      if (PrintGCDetails) {
+        gclog_or_tty->print_cr("bailing out to foreground collection");
+      }
+    }
+    if (UseAdaptiveSizePolicy) {
+      size_policy()->concurrent_marking_end();
+    }
+  } else {
+    assert(SafepointSynchronize::is_at_safepoint(),
+           "inconsistent with asynch == false");
+    if (UseAdaptiveSizePolicy) {
+      size_policy()->ms_collection_marking_begin();
+    }
+    // already have locks
+    res = markFromRootsWork(asynch);
+    _collectorState = FinalMarking;
+    if (UseAdaptiveSizePolicy) {
+      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      size_policy()->ms_collection_marking_end(gch->gc_cause());
+    }
+  }
+  return res;
+}
+
+bool CMSCollector::markFromRootsWork(bool asynch) {
+  // iterate over marked bits in bit map, doing a full scan and mark
+  // from these roots using the following algorithm:
+  // . if oop is to the right of the current scan pointer,
+  //   mark corresponding bit (we'll process it later)
+  // . else (oop is to left of current scan pointer)
+  //   push oop on marking stack
+  // . drain the marking stack
+
+  // Note that when we do a marking step we need to hold the
+  // bit map lock -- recall that direct allocation (by mutators)
+  // and promotion (by younger generation collectors) is also
+  // marking the bit map. [the so-called allocate live policy.]
+  // Because the implementation of bit map marking is not
+  // robust wrt simultaneous marking of bits in the same word,
+  // we need to make sure that there is no such interference
+  // between concurrent such updates.
+
+  // already have locks
+  assert_lock_strong(bitMapLock());
+
+  // Clear the revisit stack, just in case there are any
+  // obsolete contents from a short-circuited previous CMS cycle.
+  _revisitStack.reset();
+  assert(_revisitStack.isEmpty(), "tabula rasa");
+  assert(_markStack.isEmpty(),    "tabula rasa");
+  assert(overflow_list_is_empty(), "tabula rasa");
+  assert(no_preserved_marks(), "no preserved marks");
+
+  bool result = false;
+  if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
+    result = do_marking_mt(asynch);
+  } else {
+    result = do_marking_st(asynch);
+  }
+  return result;
+}
+
+// Forward decl
+class CMSConcMarkingTask;
+
+class CMSConcMarkingTerminator: public ParallelTaskTerminator {
+  CMSCollector*       _collector;
+  CMSConcMarkingTask* _task;
+  bool _yield;
+ protected:
+  virtual void yield();
+ public:
+  // "n_threads" is the number of threads to be terminated.
+  // "queue_set" is a set of work queues of other threads.
+  // "collector" is the CMS collector associated with this task terminator.
+  // "yield" indicates whether we need the gang as a whole to yield.
+  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
+                           CMSCollector* collector, bool yield) :
+    ParallelTaskTerminator(n_threads, queue_set),
+    _collector(collector),
+    _yield(yield) { }
+ 
+  void set_task(CMSConcMarkingTask* task) {
+    _task = task;
+  }
+};
+
+// MT Concurrent Marking Task
+class CMSConcMarkingTask: public YieldingFlexibleGangTask {
+  CMSCollector* _collector;
+  YieldingFlexibleWorkGang* _workers;        // the whole gang
+  int           _n_workers;                  // requested/desired # workers
+  bool          _asynch;
+  bool          _result;
+  CompactibleFreeListSpace*  _cms_space;
+  CompactibleFreeListSpace* _perm_space;
+  HeapWord*     _global_finger;
+
+  //  Exposed here for yielding support
+  Mutex* const _bit_map_lock;
+
+  // The per thread work queues, available here for stealing
+  OopTaskQueueSet*  _task_queues;
+  CMSConcMarkingTerminator _term;
+
+ public:
+  CMSConcMarkingTask(CMSCollector* collector, 
+                 CompactibleFreeListSpace* cms_space,
+                 CompactibleFreeListSpace* perm_space,
+                 bool asynch, int n_workers,
+                 YieldingFlexibleWorkGang* workers,
+                 OopTaskQueueSet* task_queues):
+    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
+    _collector(collector),
+    _cms_space(cms_space),
+    _perm_space(perm_space),
+    _asynch(asynch), _n_workers(n_workers), _result(true),
+    _workers(workers), _task_queues(task_queues),
+    _term(n_workers, task_queues, _collector, asynch),
+    _bit_map_lock(collector->bitMapLock())
+  {
+    assert(n_workers <= workers->total_workers(),
+           "Else termination won't work correctly today"); // XXX FIX ME!
+    _requested_size = n_workers;
+    _term.set_task(this);
+    assert(_cms_space->bottom() < _perm_space->bottom(),
+           "Finger incorrectly initialized below");
+    _global_finger = _cms_space->bottom();
+  }
+
+
+  OopTaskQueueSet* task_queues()  { return _task_queues; }
+
+  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
+
+  HeapWord** global_finger_addr() { return &_global_finger; }
+
+  CMSConcMarkingTerminator* terminator() { return &_term; }
+
+  void work(int i);
+    
+  virtual void coordinator_yield();  // stuff done by coordinator
+  bool result() { return _result; }
+
+  void reset(HeapWord* ra) {
+    _term.reset_for_reuse();
+  }
+
+  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
+                                           OopTaskQueue* work_q);
+
+ private:
+  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
+  void do_work_steal(int i);
+  void bump_global_finger(HeapWord* f);
+};
+
+void CMSConcMarkingTerminator::yield() {
+  if (ConcurrentMarkSweepThread::should_yield() &&
+      !_collector->foregroundGCIsActive() &&
+      _yield) {
+    _task->yield();
+  } else {
+    ParallelTaskTerminator::yield();
+  }
+}
+
+////////////////////////////////////////////////////////////////
+// Concurrent Marking Algorithm Sketch
+////////////////////////////////////////////////////////////////
+// Until all tasks exhausted (both spaces):
+// -- claim next available chunk
+// -- bump global finger via CAS
+// -- find first object that starts in this chunk
+//    and start scanning bitmap from that position
+// -- scan marked objects for oops
+// -- CAS-mark target, and if successful:
+//    . if target oop is above global finger (volatile read)
+//      nothing to do
+//    . if target oop is in chunk and above local finger
+//        then nothing to do
+//    . else push on work-queue
+// -- Deal with possible overflow issues:
+//    . local work-queue overflow causes stuff to be pushed on
+//      global (common) overflow queue
+//    . always first empty local work queue
+//    . then get a batch of oops from global work queue if any
+//    . then do work stealing
+// -- When all tasks claimed (both spaces)
+//    and local work queue empty, 
+//    then in a loop do:
+//    . check global overflow stack; steal a batch of oops and trace
+//    . try to steal from other threads oif GOS is empty
+//    . if neither is available, offer termination
+// -- Terminate and return result
+//
+void CMSConcMarkingTask::work(int i) {
+  elapsedTimer _timer;
+  ResourceMark rm;
+  HandleMark hm;
+
+  // Before we begin work, our work queue should be empty
+  assert(work_queue(i)->size() == 0, "Expected to be empty");
+  // Scan the bitmap covering _cms_space, tracing through grey objects.
+  _timer.start();
+  do_scan_and_mark(i, _cms_space);
+  _timer.stop();
+  if (PrintCMSStatistics != 0) {
+    gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
+      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
+  }
+
+  // ... do the same for the _perm_space
+  _timer.reset();
+  _timer.start();
+  do_scan_and_mark(i, _perm_space);
+  _timer.stop();
+  if (PrintCMSStatistics != 0) {
+    gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
+      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
+  }
+
+  // ... do work stealing
+  _timer.reset();
+  _timer.start();
+  do_work_steal(i);
+  _timer.stop();
+  if (PrintCMSStatistics != 0) {
+    gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
+      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
+  }
+  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
+  assert(work_queue(i)->size() == 0, "Should have been emptied");
+  // Note that under the current task protocol, the
+  // following assertion is true even of the spaces
+  // expanded since the completion of the concurrent
+  // marking. XXX This will likely change under a strict
+  // ABORT semantics.
+  assert(_global_finger >  _cms_space->end() &&
+         _global_finger >= _perm_space->end(),
+         "All tasks have been completed");
+}
+
+void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
+  HeapWord* read = _global_finger;
+  HeapWord* cur  = read;
+  while (f > read) {
+    cur = read;
+    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
+    if (cur == read) {
+      // our cas succeeded
+      assert(_global_finger >= f, "protocol consistency");
+      break;
+    }
+  }
+}
+
+// This is really inefficient, and should be redone by
+// using (not yet available) block-read and -write interfaces to the
+// stack and the work_queue. XXX FIX ME !!!
+bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
+                                                      OopTaskQueue* work_q) {
+  // Fast lock-free check
+  if (ovflw_stk->length() == 0) {
+    return false;
+  }
+  assert(work_q->size() == 0, "Shouldn't steal");
+  MutexLockerEx ml(ovflw_stk->par_lock(),
+                   Mutex::_no_safepoint_check_flag);
+  // Grab up to 1/4 the size of the work queue
+  size_t num = MIN2((size_t)work_q->max_elems()/4,
+                    (size_t)ParGCDesiredObjsFromOverflowList);
+  num = MIN2(num, ovflw_stk->length());
+  for (int i = (int) num; i > 0; i--) {
+    oop cur = ovflw_stk->pop();
+    assert(cur != NULL, "Counted wrong?");
+    work_q->push(cur);
+  }
+  return num > 0;
+}
+
+void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
+  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
+  int n_tasks = pst->n_tasks();
+  // We allow that there may be no tasks to do here because
+  // we are restarting after a stack overflow.
+  assert(pst->valid() || n_tasks == 0, "Uninitializd use?");
+  int nth_task = 0;
+
+  HeapWord* start = sp->bottom();
+  size_t chunk_size = sp->marking_task_size();
+  while (!pst->is_task_claimed(/* reference */ nth_task)) {
+    // Having claimed the nth task in this space,
+    // compute the chunk that it corresponds to:
+    MemRegion span = MemRegion(start + nth_task*chunk_size,
+                               start + (nth_task+1)*chunk_size);
+    // Try and bump the global finger via a CAS;
+    // note that we need to do the global finger bump
+    // _before_ taking the intersection below, because
+    // the task corresponding to that region will be
+    // deemed done even if the used_region() expands
+    // because of allocation -- as it almost certainly will
+    // during start-up while the threads yield in the
+    // closure below.
+    HeapWord* finger = span.end();
+    bump_global_finger(finger);   // atomically
+    // There are null tasks here corresponding to chunks
+    // beyond the "top" address of the space.
+    span = span.intersection(sp->used_region());
+    if (!span.is_empty()) {  // Non-null task
+      // We want to skip the first object because
+      // the protocol is to scan any object in its entirety
+      // that _starts_ in this span; a fortiori, any
+      // object starting in an earlier span is scanned
+      // as part of an earlier claimed task.
+      // Below we use the "careful" version of block_start
+      // so we do not try to navigate uninitialized objects.
+      HeapWord* prev_obj = sp->block_start_careful(span.start());
+      // Below we use a variant of block_size that uses the
+      // Printezis bits to avoid waiting for allocated
+      // objects to become initialized/parsable.
+      while (prev_obj < span.start()) {
+        size_t sz = sp->block_size_no_stall(prev_obj, _collector);
+        if (sz > 0) {
+          prev_obj += sz;
+        } else {
+          // In this case we may end up doing a bit of redundant
+          // scanning, but that appears unavoidable, short of
+          // locking the free list locks; see bug 6324141.
+          break;
+        }
+      }
+      if (prev_obj < span.end()) {
+        MemRegion my_span = MemRegion(prev_obj, span.end());
+        // Do the marking work within a non-empty span --
+        // the last argument to the constructor indicates whether the
+        // iteration should be incremental with periodic yields.
+        Par_MarkFromRootsClosure cl(this, _collector, my_span,
+                                    &_collector->_markBitMap,
+                                    work_queue(i),
+                                    &_collector->_markStack,
+                                    &_collector->_revisitStack,
+                                    _asynch);
+        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
+      } // else nothing to do for this task
+    }   // else nothing to do for this task
+  }
+  // We'd be tempted to assert here that since there are no
+  // more tasks left to claim in this space, the global_finger
+  // must exceed space->top() and a fortiori space->end(). However,
+  // that would not quite be correct because the bumping of
+  // global_finger occurs strictly after the claiming of a task,
+  // so by the time we reach here the global finger may not yet
+  // have been bumped up by the thread that claimed the last
+  // task.
+  pst->all_tasks_completed();
+}
+
+class Par_ConcMarkingClosure: public OopClosure {
+  CMSCollector* _collector;
+  MemRegion     _span;
+  CMSBitMap*    _bit_map;
+  CMSMarkStack* _overflow_stack;
+  CMSMarkStack* _revisit_stack;     // XXXXXX Check proper use
+  OopTaskQueue* _work_queue;
+
+ public:
+  Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
+                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
+    _collector(collector),
+    _span(_collector->_span),
+    _work_queue(work_queue),
+    _bit_map(bit_map),
+    _overflow_stack(overflow_stack) { }   // need to initialize revisit stack etc.
+
+  void do_oop(oop* p);
+  void trim_queue(size_t max);
+  void handle_stack_overflow(HeapWord* lost);
+};
+
+// Grey object rescan during work stealing phase --
+// the salient assumption here is that stolen oops must
+// always be initialized, so we do not need to check for
+// uninitialized objects before scanning here.
+void Par_ConcMarkingClosure::do_oop(oop* p) {
+  oop    this_oop = *p;
+  assert(this_oop->is_oop_or_null(),
+         "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)this_oop;
+  // Check if oop points into the CMS generation
+  // and is not marked
+  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
+    // a white object ...
+    // If we manage to "claim" the object, by being the
+    // first thread to mark it, then we push it on our
+    // marking stack
+    if (_bit_map->par_mark(addr)) {     // ... now grey
+      // push on work queue (grey set)
+      bool simulate_overflow = false;
+      NOT_PRODUCT(
+        if (CMSMarkStackOverflowALot &&
+            _collector->simulate_overflow()) {
+          // simulate a stack overflow
+          simulate_overflow = true;
+        }
+      )
+      if (simulate_overflow ||
+          !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+        // stack overflow
+        if (PrintCMSStatistics != 0) {
+          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
+                                 SIZE_FORMAT, _overflow_stack->capacity());
+        }
+        // We cannot assert that the overflow stack is full because
+        // it may have been emptied since.
+        assert(simulate_overflow ||
+               _work_queue->size() == _work_queue->max_elems(),
+              "Else push should have succeeded");
+        handle_stack_overflow(addr);
+      }
+    } // Else, some other thread got there first
+  }
+}
+
+void Par_ConcMarkingClosure::trim_queue(size_t max) {
+  while (_work_queue->size() > max) {
+    oop new_oop;
+    if (_work_queue->pop_local(new_oop)) {
+      assert(new_oop->is_oop(), "Should be an oop");
+      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
+      assert(_span.contains((HeapWord*)new_oop), "Not in span");
+      assert(new_oop->is_parsable(), "Should be parsable");
+      new_oop->oop_iterate(this);  // do_oop() above
+    }
+  }
+}
+
+// Upon stack overflow, we discard (part of) the stack,
+// remembering the least address amongst those discarded
+// in CMSCollector's _restart_address.
+void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
+  // We need to do this under a mutex to prevent other
+  // workers from interfering with the expansion below.
+  MutexLockerEx ml(_overflow_stack->par_lock(),
+                   Mutex::_no_safepoint_check_flag);
+  // Remember the least grey address discarded
+  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
+  _collector->lower_restart_addr(ra);
+  _overflow_stack->reset();  // discard stack contents
+  _overflow_stack->expand(); // expand the stack if possible
+}
+
+
+void CMSConcMarkingTask::do_work_steal(int i) {
+  OopTaskQueue* work_q = work_queue(i);
+  oop obj_to_scan;
+  CMSBitMap* bm = &(_collector->_markBitMap);
+  CMSMarkStack* ovflw = &(_collector->_markStack);
+  int* seed = _collector->hash_seed(i);
+  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
+  while (true) {
+    cl.trim_queue(0);
+    assert(work_q->size() == 0, "Should have been emptied above");
+    if (get_work_from_overflow_stack(ovflw, work_q)) {
+      // Can't assert below because the work obtained from the
+      // overflow stack may already have been stolen from us.
+      // assert(work_q->size() > 0, "Work from overflow stack");
+      continue;
+    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+      assert(obj_to_scan->is_oop(), "Should be an oop");
+      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
+      obj_to_scan->oop_iterate(&cl);
+    } else if (terminator()->offer_termination()) {
+      assert(work_q->size() == 0, "Impossible!");
+      break;
+    }
+  }
+}
+
+// This is run by the CMS (coordinator) thread.
+void CMSConcMarkingTask::coordinator_yield() {
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should hold CMS token");
+  
+  // First give up the locks, then yield, then re-lock
+  // We should probably use a constructor/destructor idiom to
+  // do this unlock/lock or modify the MutexUnlocker class to
+  // serve our purpose. XXX
+  assert_lock_strong(_bit_map_lock);
+  _bit_map_lock->unlock();
+  ConcurrentMarkSweepThread::desynchronize(true);
+  ConcurrentMarkSweepThread::acknowledge_yield_request();
+  _collector->stopTimer();
+  if (PrintCMSStatistics != 0) {
+    _collector->incrementYields();
+  }
+  _collector->icms_wait();
+
+  // It is possible for whichever thread initiated the yield request
+  // not to get a chance to wake up and take the bitmap lock between
+  // this thread releasing it and reacquiring it. So, while the
+  // should_yield() flag is on, let's sleep for a bit to give the
+  // other thread a chance to wake up. The limit imposed on the number
+  // of iterations is defensive, to avoid any unforseen circumstances
+  // putting us into an infinite loop. Since it's always been this
+  // (coordinator_yield()) method that was observed to cause the
+  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
+  // which is by default non-zero. For the other seven methods that
+  // also perform the yield operation, as are using a different
+  // parameter (CMSYieldSleepCount) which is by default zero. This way we
+  // can enable the sleeping for those methods too, if necessary.
+  // See 6442774.
+  //
+  // We really need to reconsider the synchronization between the GC
+  // thread and the yield-requesting threads in the future and we
+  // should really use wait/notify, which is the recommended
+  // way of doing this type of interaction. Additionally, we should
+  // consolidate the eight methods that do the yield operation and they
+  // are almost identical into one for better maintenability and
+  // readability. See 6445193.
+  //
+  // Tony 2006.06.29
+  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
+	               ConcurrentMarkSweepThread::should_yield() &&
+	               !CMSCollector::foregroundGCIsActive(); ++i) {
+    os::sleep(Thread::current(), 1, false);
+    ConcurrentMarkSweepThread::acknowledge_yield_request();
+  }
+
+  ConcurrentMarkSweepThread::synchronize(true);
+  _bit_map_lock->lock_without_safepoint_check();
+  _collector->startTimer();
+}
+
+bool CMSCollector::do_marking_mt(bool asynch) {
+  assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
+  // In the future this would be determined ergonomically, based
+  // on #cpu's, # active mutator threads (and load), and mutation rate.
+  int num_workers = ParallelCMSThreads;
+
+  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
+  CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
+  
+  CMSConcMarkingTask tsk(this, cms_space, perm_space,
+                         asynch, num_workers /* number requested XXX */,
+                         conc_workers(), task_queues());
+
+  // Since the actual number of workers we get may be different
+  // from the number we requested above, do we need to do anything different
+  // below? In particular, may be we need to subclass the SequantialSubTasksDone
+  // class?? XXX
+  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
+  perm_space->initialize_sequential_subtasks_for_marking(num_workers);
+  
+  // Refs discovery is already non-atomic.
+  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
+  // Mutate the Refs discovery so it is MT during the
+  // multi-threaded marking phase.
+  ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
+  
+  conc_workers()->start_task(&tsk);
+  while (tsk.yielded()) {
+    tsk.coordinator_yield();
+    conc_workers()->continue_task(&tsk);
+  }
+  // If the task was aborted, _restart_addr will be non-NULL
+  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
+  while (_restart_addr != NULL) {
+    // XXX For now we do not make use of ABORTED state and have not
+    // yet implemented the right abort semantics (even in the original
+    // single-threaded CMS case. That needs some more investigation
+    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
+    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
+    // If _restart_addr is non-NULL, a marking stack overflow
+    // occured; we need to do a fresh marking iteration from the
+    // indicated restart address.
+    if (_foregroundGCIsActive && asynch) {
+      // We may be running into repeated stack overflows, having
+      // reached the limit of the stack size, while making very
+      // slow forward progress. It may be best to bail out and
+      // let the foreground collector do its job.
+      // Clear _restart_addr, so that foreground GC
+      // works from scratch. This avoids the headache of
+      // a "rescan" which would otherwise be needed because
+      // of the dirty mod union table & card table.
+      _restart_addr = NULL;
+      return false;
+    }
+    // Adjust the task to restart from _restart_addr
+    tsk.reset(_restart_addr);
+    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
+                  _restart_addr);
+    perm_space->initialize_sequential_subtasks_for_marking(num_workers,
+                  _restart_addr);
+    _restart_addr = NULL;
+    // Get the workers going again
+    conc_workers()->start_task(&tsk);
+    while (tsk.yielded()) {
+      tsk.coordinator_yield();
+      conc_workers()->continue_task(&tsk);
+    }
+  }
+  assert(tsk.completed(), "Inconsistency");
+  assert(tsk.result() == true, "Inconsistency");
+  return true;
+}
+
+bool CMSCollector::do_marking_st(bool asynch) {
+  ResourceMark rm;
+  HandleMark   hm;
+
+  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
+    &_markStack, &_revisitStack, CMSYield && asynch);
+  // the last argument to iterate indicates whether the iteration
+  // should be incremental with periodic yields.
+  _markBitMap.iterate(&markFromRootsClosure);
+  // If _restart_addr is non-NULL, a marking stack overflow
+  // occured; we need to do a fresh iteration from the
+  // indicated restart address.
+  while (_restart_addr != NULL) {
+    if (_foregroundGCIsActive && asynch) {
+      // We may be running into repeated stack overflows, having
+      // reached the limit of the stack size, while making very
+      // slow forward progress. It may be best to bail out and
+      // let the foreground collector do its job.
+      // Clear _restart_addr, so that foreground GC
+      // works from scratch. This avoids the headache of
+      // a "rescan" which would otherwise be needed because
+      // of the dirty mod union table & card table.
+      _restart_addr = NULL;
+      return false;  // indicating failure to complete marking
+    }
+    // Deal with stack overflow:
+    // we restart marking from _restart_addr
+    HeapWord* ra = _restart_addr;
+    markFromRootsClosure.reset(ra);
+    _restart_addr = NULL;
+    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
+  }
+  return true;
+}
+
+void CMSCollector::preclean() {
+  check_correct_thread_executing();
+  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
+  _abort_preclean = false;
+  if (CMSPrecleaningEnabled) {
+    _eden_chunk_index = 0;
+    size_t used = get_eden_used();
+    size_t capacity = get_eden_capacity();
+    // Don't start sampling unless we will get sufficiently
+    // many samples.
+    if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
+                * CMSScheduleRemarkEdenPenetration)) {
+      _start_sampling = true;
+    } else {
+      _start_sampling = false;
+    }
+    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+    CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
+    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
+  }
+  CMSTokenSync x(true); // is cms thread
+  if (CMSPrecleaningEnabled) {
+    sample_eden();
+    _collectorState = AbortablePreclean;
+  } else {
+    _collectorState = FinalMarking;
+  }
+}
+
+// Try and schedule the remark such that young gen
+// occupancy is CMSScheduleRemarkEdenPenetration %.
+void CMSCollector::abortable_preclean() {
+  check_correct_thread_executing();
+  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
+  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
+
+  // If Eden's current occupancy is below this threshold,
+  // immediately schedule the remark; else preclean
+  // past the next scavenge in an effort to
+  // schedule the pause as described avove. By choosing
+  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
+  // we will never do an actual abortable preclean cycle.
+  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
+    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+    CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
+    // We need more smarts in the abortable preclean
+    // loop below to deal with cases where allocation
+    // in young gen is very very slow, and our precleaning
+    // is running a losing race against a horde of
+    // mutators intent on flooding us with CMS updates
+    // (dirty cards).
+    // One, admittedly dumb, strategy is to give up
+    // after a certain number of abortable precleaning loops
+    // or after a certain maximum time. We want to make
+    // this smarter in the next iteration.
+    // XXX FIX ME!!! YSR
+    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
+    while (!(should_abort_preclean() ||
+             ConcurrentMarkSweepThread::should_terminate())) {
+      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); 
+      cumworkdone += workdone;
+      loops++;
+      // Voluntarily terminate abortable preclean phase if we have
+      // been at it for too long.
+      if ((CMSMaxAbortablePrecleanLoops != 0) &&
+          loops >= CMSMaxAbortablePrecleanLoops) {
+        if (PrintGCDetails) {
+          gclog_or_tty->print(" CMS: abort preclean due to loops ");
+        }
+        break;
+      }
+      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
+        if (PrintGCDetails) {
+          gclog_or_tty->print(" CMS: abort preclean due to time ");
+        }
+        break;
+      }
+      // If we are doing little work each iteration, we should
+      // take a short break.
+      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
+        // Sleep for some time, waiting for work to accumulate
+        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
+        waited++;
+      }
+    }
+    if (PrintCMSStatistics > 0) {
+      gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
+                          loops, waited, cumworkdone);
+    }
+  }
+  CMSTokenSync x(true); // is cms thread
+  if (_collectorState != Idling) {
+    assert(_collectorState == AbortablePreclean,
+           "Spontaneous state transition?");
+    _collectorState = FinalMarking;
+  } // Else, a foreground collection completed this CMS cycle.
+  return;
+}
+
+// Respond to an Eden sampling opportunity
+void CMSCollector::sample_eden() {
+  // Make sure a young gc cannot sneak in between our
+  // reading and recording of a sample.
+  assert(Thread::current()->is_ConcurrentGC_thread(),
+         "Only the cms thread may collect Eden samples");
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "Should collect samples while holding CMS token");
+  if (!_start_sampling) {
+    return;
+  }
+  if (_eden_chunk_array) {
+    if (_eden_chunk_index < _eden_chunk_capacity) {
+      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
+      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
+             "Unexpected state of Eden");
+      // We'd like to check that what we just sampled is an oop-start address;
+      // however, we cannot do that here since the object may not yet have been
+      // initialized. So we'll instead do the check when we _use_ this sample
+      // later.
+      if (_eden_chunk_index == 0 ||
+          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
+                         _eden_chunk_array[_eden_chunk_index-1])
+           >= CMSSamplingGrain)) {
+        _eden_chunk_index++;  // commit sample
+      }
+    }
+  }
+  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
+    size_t used = get_eden_used();
+    size_t capacity = get_eden_capacity();
+    assert(used <= capacity, "Unexpected state of Eden");
+    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
+      _abort_preclean = true;
+    }
+  }
+}
+
+
+size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
+  assert(_collectorState == Precleaning ||
+         _collectorState == AbortablePreclean, "incorrect state");
+  ResourceMark rm;
+  HandleMark   hm;
+  // Do one pass of scrubbing the discovered reference lists
+  // to remove any reference objects with strongly-reachable
+  // referents.
+  if (clean_refs) {
+    ReferenceProcessor* rp = ref_processor();
+    CMSPrecleanRefsYieldClosure yield_cl(this);
+    assert(rp->span().equals(_span), "Spans should be equal");
+    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
+                                   &_markStack);
+    CMSDrainMarkingStackClosure complete_trace(this,
+                                  _span, &_markBitMap, &_markStack,
+                                  &keep_alive);
+
+    // We don't want this step to interfere with a young
+    // collection because we don't want to take CPU
+    // or memory bandwidth away from the young GC threads
+    // (which may be as many as there are CPUs).
+    // Note that we don't need to protect ourselves from
+    // interference with mutators because they can't
+    // manipulate the discovered reference lists nor affect
+    // the computed reachability of the referents, the
+    // only properties manipulated by the precleaning
+    // of these reference lists.
+    CMSTokenSyncWithLocks x(true /* is cms thread */,
+                            bitMapLock());
+    sample_eden();
+    // The following will yield to allow foreground
+    // collection to proceed promptly. XXX YSR:
+    // The code in this method may need further
+    // tweaking for better performance and some restructuring
+    // for cleaner interfaces.
+    rp->preclean_discovered_references(
+          rp->is_alive_non_header(), &keep_alive, &complete_trace,
+          &yield_cl);
+  }
+
+  if (clean_survivor) {  // preclean the active survivor space(s)
+    assert(_young_gen->kind() == Generation::DefNew ||
+           _young_gen->kind() == Generation::ParNew ||
+           _young_gen->kind() == Generation::ASParNew,
+         "incorrect type for cast");
+    DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
+    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
+                             &_markBitMap, &_modUnionTable,
+                             &_markStack, &_revisitStack,
+                             true /* precleaning phase */);
+    CMSTokenSyncWithLocks ts(true /* is cms thread */,
+                             bitMapLock());
+    unsigned int before_count =
+      GenCollectedHeap::heap()->total_collections();
+    SurvivorSpacePrecleanClosure
+      sss_cl(this, _span, &_markBitMap, &_markStack,
+             &pam_cl, before_count, CMSYield);
+    dng->from()->object_iterate_careful(&sss_cl);
+    dng->to()->object_iterate_careful(&sss_cl);
+  }
+  MarkRefsIntoAndScanClosure
+    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
+             &_markStack, &_revisitStack, this, CMSYield,
+             true /* precleaning phase */);
+  // CAUTION: The following closure has persistent state that may need to
+  // be reset upon a decrease in the sequence of addresses it
+  // processes.
+  ScanMarkedObjectsAgainCarefullyClosure
+    smoac_cl(this, _span,
+      &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
+
+  // Preclean dirty cards in ModUnionTable and CardTable using
+  // appropriate convergence criterion;
+  // repeat CMSPrecleanIter times unless we find that
+  // we are losing.
+  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
+  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
+         "Bad convergence multiplier");
+  assert(CMSPrecleanThreshold >= 100,
+         "Unreasonably low CMSPrecleanThreshold");
+
+  size_t numIter, cumNumCards, lastNumCards, curNumCards;
+  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
+       numIter < CMSPrecleanIter;
+       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
+    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
+    if (CMSPermGenPrecleaningEnabled) {
+      curNumCards  += preclean_mod_union_table(_permGen, &smoac_cl);
+    }
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
+    }
+    // Either there are very few dirty cards, so re-mark
+    // pause will be small anyway, or our pre-cleaning isn't
+    // that much faster than the rate at which cards are being
+    // dirtied, so we might as well stop and re-mark since
+    // precleaning won't improve our re-mark time by much.
+    if (curNumCards <= CMSPrecleanThreshold ||
+        (numIter > 0 &&
+         (curNumCards * CMSPrecleanDenominator >
+         lastNumCards * CMSPrecleanNumerator))) {
+      numIter++;
+      cumNumCards += curNumCards;
+      break;
+    }
+  }
+  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
+  if (CMSPermGenPrecleaningEnabled) {
+    curNumCards += preclean_card_table(_permGen, &smoac_cl);
+  }
+  cumNumCards += curNumCards;
+  if (PrintGCDetails && PrintCMSStatistics != 0) {
+    gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
+		  curNumCards, cumNumCards, numIter);
+  }
+  return cumNumCards;   // as a measure of useful work done
+}
+
+// PRECLEANING NOTES:
+// Precleaning involves:
+// . reading the bits of the modUnionTable and clearing the set bits.
+// . For the cards corresponding to the set bits, we scan the
+//   objects on those cards. This means we need the free_list_lock
+//   so that we can safely iterate over the CMS space when scanning
+//   for oops.
+// . When we scan the objects, we'll be both reading and setting
+//   marks in the marking bit map, so we'll need the marking bit map.
+// . For protecting _collector_state transitions, we take the CGC_lock.
+//   Note that any races in the reading of of card table entries by the
+//   CMS thread on the one hand and the clearing of those entries by the
+//   VM thread or the setting of those entries by the mutator threads on the
+//   other are quite benign. However, for efficiency it makes sense to keep
+//   the VM thread from racing with the CMS thread while the latter is
+//   dirty card info to the modUnionTable. We therefore also use the
+//   CGC_lock to protect the reading of the card table and the mod union
+//   table by the CM thread.
+// . We run concurrently with mutator updates, so scanning
+//   needs to be done carefully  -- we should not try to scan
+//   potentially uninitialized objects.
+//
+// Locking strategy: While holding the CGC_lock, we scan over and
+// reset a maximal dirty range of the mod union / card tables, then lock
+// the free_list_lock and bitmap lock to do a full marking, then
+// release these locks; and repeat the cycle. This allows for a
+// certain amount of fairness in the sharing of these locks between
+// the CMS collector on the one hand, and the VM thread and the
+// mutators on the other.
+
+// NOTE: preclean_mod_union_table() and preclean_card_table()
+// further below are largely identical; if you need to modify
+// one of these methods, please check the other method too.
+
+size_t CMSCollector::preclean_mod_union_table(
+  ConcurrentMarkSweepGeneration* gen,
+  ScanMarkedObjectsAgainCarefullyClosure* cl) {
+  assert(_markStack.isEmpty(), "should be empty");
+  assert(overflow_list_is_empty(), "should be empty");
+  assert(no_preserved_marks(), "no preserved marks");
+  // strategy: starting with the first card, accumulate contiguous
+  // ranges of dirty cards; clear these cards, then scan the region
+  // covered by these cards.
+
+  // Since all of the MUT is committed ahead, we can just use
+  // that, in case the generations expand while we are precleaning.
+  // It might also be fine to just use the committed part of the
+  // generation, but we might potentially miss cards when the
+  // generation is rapidly expanding while we are in the midst
+  // of precleaning.
+  HeapWord* startAddr = gen->reserved().start();
+  HeapWord* endAddr   = gen->reserved().end();
+
+  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
+
+  size_t numDirtyCards, cumNumDirtyCards;
+  HeapWord *nextAddr, *lastAddr;
+  for (cumNumDirtyCards = numDirtyCards = 0,
+       nextAddr = lastAddr = startAddr;
+       nextAddr < endAddr;
+       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
+
+    ResourceMark rm;
+    HandleMark   hm;
+
+    MemRegion dirtyRegion;
+    {
+      CMSTokenSync ts(true);
+      sample_eden();
+
+      if (PrintGCDetails) {
+        startTimer();
+      }
+
+      // Get dirty region starting at nextOffset (inclusive),
+      // simultaneously clearing it.
+      dirtyRegion = 
+        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
+      assert(dirtyRegion.start() >= nextAddr,
+             "returned region inconsistent?");
+    }
+    // Remember where the next search should begin.
+    // The returned region (if non-empty) is a right open interval,
+    // so lastOffset is obtained from the right end of that
+    // interval.
+    lastAddr = dirtyRegion.end();
+    // Should do something more transparent and less hacky XXX
+    numDirtyCards =
+      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
+
+    // We'll scan the cards in the dirty region (with periodic
+    // yields for foreground GC as needed).
+    if (!dirtyRegion.is_empty()) {
+      if (PrintGCDetails) {
+        stopTimer();
+      }
+      assert(numDirtyCards > 0, "consistency check");
+      HeapWord* stop_point = NULL;
+      {
+        CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
+                                 bitMapLock());
+        assert(_markStack.isEmpty(), "should be empty");
+        assert(overflow_list_is_empty(), "should be empty");
+        assert(no_preserved_marks(), "no preserved marks");
+        sample_eden();
+        if (PrintGCDetails) {
+          startTimer();
+        }
+        stop_point =
+          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+      }
+      if (stop_point != NULL) {
+        // The careful iteration stopped early either because it found an
+        // uninitialized object, or because we were in the midst of an
+        // "abortable preclean", which should now be aborted. Redirty
+        // the bits corresponding to the partially-scanned or unscanned
+        // cards. We'll either restart at the next block boundary or
+        // abort the preclean.
+        assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
+               (_collectorState == AbortablePreclean && should_abort_preclean()),
+               "Unparsable objects should only be in perm gen.");
+
+        CMSTokenSyncWithLocks ts(true, bitMapLock());
+        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
+        if (should_abort_preclean()) {
+          break; // out of preclean loop
+        } else {
+          // Compute the next address at which preclean should pick up;
+          // might need bitMapLock in order to read P-bits.
+          lastAddr = next_card_start_after_block(stop_point);
+        }
+      }
+      if (PrintGCDetails) {
+        stopTimer();
+      }
+    } else {
+      assert(lastAddr == endAddr, "consistency check");
+      assert(numDirtyCards == 0, "consistency check");
+      break;
+    }
+  }
+  if (PrintGCDetails) {
+    stopTimer();
+  }
+  assert(_markStack.isEmpty(), "should be empty");
+  assert(overflow_list_is_empty(), "should be empty");
+  assert(no_preserved_marks(), "no preserved marks");
+  return cumNumDirtyCards;
+}
+
+// NOTE: preclean_mod_union_table() above and preclean_card_table()
+// below are largely identical; if you need to modify
+// one of these methods, please check the other method too.
+
+size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
+  ScanMarkedObjectsAgainCarefullyClosure* cl) {
+  // strategy: it's similar to precleamModUnionTable above, in that
+  // we accumulate contiguous ranges of dirty cards, mark these cards
+  // precleaned, then scan the region covered by these cards.
+  HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
+  HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
+
+  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
+
+  size_t numDirtyCards, cumNumDirtyCards;
+  HeapWord *lastAddr, *nextAddr;
+
+  for (cumNumDirtyCards = numDirtyCards = 0,
+       nextAddr = lastAddr = startAddr;
+       nextAddr < endAddr;
+       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
+
+    ResourceMark rm;
+    HandleMark   hm;
+
+    MemRegion dirtyRegion;
+    {
+      // See comments in "Precleaning notes" above on why we
+      // do this locking. XXX Could the locking overheads be
+      // too high when dirty cards are sparse? [I don't think so.]
+      CMSTokenSync x(true); // is cms thread
+      sample_eden();
+
+      if (PrintGCDetails) {
+        startTimer();
+      }
+
+      // Get and clear dirty region from card table
+      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
+                                    MemRegion(nextAddr, endAddr));
+      assert(dirtyRegion.start() >= nextAddr,
+             "returned region inconsistent?");
+    }
+    lastAddr = dirtyRegion.end();
+    numDirtyCards =
+      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
+
+    if (!dirtyRegion.is_empty()) {
+      if (PrintGCDetails) {
+        stopTimer();
+      }
+      CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
+      sample_eden();
+      assert(_markStack.isEmpty(), "should be empty");
+      assert(overflow_list_is_empty(), "should be empty");
+      assert(no_preserved_marks(), "no preserved marks");
+      if (PrintGCDetails) {
+        startTimer();
+      }
+      HeapWord* stop_point =
+        gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
+      if (stop_point != NULL) {
+        // The careful iteration stopped early because it found an
+        // uninitialized object.  Redirty the bits corresponding to the
+        // partially-scanned or unscanned cards, and start again at the
+        // next block boundary.
+        assert(CMSPermGenPrecleaningEnabled ||
+               (_collectorState == AbortablePreclean && should_abort_preclean()),
+               "Unparsable objects should only be in perm gen.");
+        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
+        if (should_abort_preclean()) {
+          break; // out of preclean loop
+        } else {
+          // Compute the next address at which preclean should pick up.
+          lastAddr = next_card_start_after_block(stop_point);
+        }
+      }
+      if (PrintGCDetails) {
+        stopTimer();
+      }
+    } else {
+      break;
+    }
+  }
+  if (PrintGCDetails) {
+    stopTimer();
+  }
+  assert(_markStack.isEmpty(), "should be empty");
+  assert(overflow_list_is_empty(), "should be empty");
+  assert(no_preserved_marks(), "no preserved marks");
+  return cumNumDirtyCards;
+}
+
+void CMSCollector::checkpointRootsFinal(bool asynch,
+  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
+  assert(_collectorState == FinalMarking, "incorrect state transition?");
+  check_correct_thread_executing();
+  // world is stopped at this checkpoint
+  assert(SafepointSynchronize::is_at_safepoint(),
+         "world should be stopped");
+
+  SpecializationStats::clear();
+  if (PrintGCDetails) {
+    gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
+                        _young_gen->used() / K,
+                        _young_gen->capacity() / K);
+  }
+  if (asynch) {
+    if (CMSScavengeBeforeRemark) {
+      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      // Temporarily set flag to false, GCH->do_collection will
+      // expect it to be false and set to true
+      FlagSetting fl(gch->_is_gc_active, false);
+      NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", 
+	PrintGCDetails && Verbose, true, gclog_or_tty);)
+      int level = _cmsGen->level() - 1;
+      if (level >= 0) {
+        gch->do_collection(true,        // full (i.e. force, see below)
+                           false,       // !clear_all_soft_refs
+                           0,           // size
+                           false,       // is_tlab
+                           level        // max_level
+                          );
+      }
+    }
+    FreelistLocker x(this);
+    MutexLockerEx y(bitMapLock(),
+                    Mutex::_no_safepoint_check_flag);
+    assert(!init_mark_was_synchronous, "but that's impossible!");
+    checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
+  } else {
+    // already have all the locks
+    checkpointRootsFinalWork(asynch, clear_all_soft_refs,
+                             init_mark_was_synchronous);
+  }
+  SpecializationStats::print();
+}
+
+void CMSCollector::checkpointRootsFinalWork(bool asynch,
+  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
+
+  NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
+
+  assert(haveFreelistLocks(), "must have free list locks");
+  assert_lock_strong(bitMapLock());
+
+  if (UseAdaptiveSizePolicy) {
+    size_policy()->checkpoint_roots_final_begin();
+  }
+
+  ResourceMark rm;
+  HandleMark   hm;
+
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  if (cms_should_unload_classes()) {
+    CodeCache::gc_prologue();
+  }
+  assert(haveFreelistLocks(), "must have free list locks");
+  assert_lock_strong(bitMapLock());
+
+  if (!init_mark_was_synchronous) {
+    if (CMSScavengeBeforeRemark) {
+      // Heap already made parsable as a result of scavenge
+    } else {
+      gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
+    }
+    // Update the saved marks which may affect the root scans.
+    gch->save_marks();
+  
+    {
+      COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
+  
+      // Note on the role of the mod union table:
+      // Since the marker in "markFromRoots" marks concurrently with
+      // mutators, it is possible for some reachable objects not to have been
+      // scanned. For instance, an only reference to an object A was
+      // placed in object B after the marker scanned B. Unless B is rescanned,
+      // A would be collected. Such updates to references in marked objects
+      // are detected via the mod union table which is the set of all cards
+      // dirtied since the first checkpoint in this GC cycle and prior to
+      // the most recent young generation GC, minus those cleaned up by the
+      // concurrent precleaning.
+      if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
+        TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
+        do_remark_parallel();
+      } else {
+        TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
+                    gclog_or_tty);
+        do_remark_non_parallel();
+      }
+    }
+  } else {
+    assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
+    // The initial mark was stop-world, so there's no rescanning to
+    // do; go straight on to the next step below.
+  }
+  assert(_markStack.isEmpty(), "should be empty");
+  assert(overflow_list_is_empty(), "should be empty");
+  assert(no_preserved_marks(), "no preserved marks");
+
+  {
+    NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
+    refProcessingWork(asynch, clear_all_soft_refs);
+  }
+  assert(_markStack.isEmpty(), "should be empty");
+  assert(overflow_list_is_empty(), "should be empty");
+  assert(no_preserved_marks(), "no preserved marks");
+
+  if (cms_should_unload_classes()) {
+    CodeCache::gc_epilogue();
+  }
+
+  // If we encountered any (marking stack / work queue) overflow
+  // events during the current CMS cycle, take appropriate
+  // remedial measures, where possible, so as to try and avoid
+  // recurrence of that condition.
+  assert(_markStack.isEmpty(), "No grey objects");
+  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
+                     _ser_kac_ovflw;
+  if (ser_ovflw > 0) {
+    if (PrintCMSStatistics != 0) {
+      gclog_or_tty->print_cr("Marking stack overflow (benign) "
+        "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
+        _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
+        _ser_kac_ovflw);
+    }
+    _markStack.expand();
+    _ser_pmc_remark_ovflw = 0;
+    _ser_pmc_preclean_ovflw = 0;
+    _ser_kac_ovflw = 0;
+  }
+  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
+    if (PrintCMSStatistics != 0) {
+      gclog_or_tty->print_cr("Work queue overflow (benign) "
+        "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
+        _par_pmc_remark_ovflw, _par_kac_ovflw);
+    }
+    _par_pmc_remark_ovflw = 0;
+    _par_kac_ovflw = 0;
+  }
+  if (PrintCMSStatistics != 0) {
+     if (_markStack._hit_limit > 0) {
+       gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
+                              _markStack._hit_limit);
+     }
+     if (_markStack._failed_double > 0) {
+       gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
+                              " current capacity "SIZE_FORMAT,
+                              _markStack._failed_double,
+                              _markStack.capacity());
+     }
+  }
+  _markStack._hit_limit = 0;
+  _markStack._failed_double = 0;
+
+  if ((VerifyAfterGC || VerifyDuringGC) &&
+      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+    verify_after_remark();
+  }
+
+  // Change under the freelistLocks.
+  _collectorState = Sweeping;
+  // Call isAllClear() under bitMapLock
+  assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
+    " final marking");
+  if (UseAdaptiveSizePolicy) {
+    size_policy()->checkpoint_roots_final_end(gch->gc_cause());
+  }
+}
+
+// Parallel remark task
+class CMSParRemarkTask: public AbstractGangTask {
+  CMSCollector* _collector;
+  WorkGang*     _workers;
+  int           _n_workers;
+  CompactibleFreeListSpace* _cms_space;
+  CompactibleFreeListSpace* _perm_space;
+
+  // The per-thread work queues, available here for stealing.
+  OopTaskQueueSet*       _task_queues;
+  ParallelTaskTerminator _term;
+
+ public:
+  CMSParRemarkTask(CMSCollector* collector,
+                   CompactibleFreeListSpace* cms_space,
+                   CompactibleFreeListSpace* perm_space,
+                   int n_workers, WorkGang* workers,
+                   OopTaskQueueSet* task_queues):
+    AbstractGangTask("Rescan roots and grey objects in parallel"),
+    _collector(collector),
+    _cms_space(cms_space), _perm_space(perm_space),
+    _n_workers(n_workers),
+    _workers(workers),
+    _task_queues(task_queues),
+    _term(workers->total_workers(), task_queues) { }
+
+  OopTaskQueueSet* task_queues() { return _task_queues; }
+
+  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
+
+  ParallelTaskTerminator* terminator() { return &_term; }
+
+  void work(int i);
+
+ private:
+  // Work method in support of parallel rescan ... of young gen spaces
+  void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
+                             ContiguousSpace* space,
+                             HeapWord** chunk_array, size_t chunk_top);
+
+  // ... of  dirty cards in old space
+  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
+                                  Par_MarkRefsIntoAndScanClosure* cl);
+
+  // ... work stealing for the above
+  void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
+};
+
+void CMSParRemarkTask::work(int i) {
+  elapsedTimer _timer;
+  ResourceMark rm;
+  HandleMark   hm;
+
+  // ---------- rescan from roots --------------
+  _timer.start();
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
+    _collector->_span, _collector->ref_processor(),
+    &(_collector->_markBitMap),
+    work_queue(i), &(_collector->_revisitStack));
+
+  // Rescan young gen roots first since these are likely
+  // coarsely partitioned and may, on that account, constitute
+  // the critical path; thus, it's best to start off that
+  // work first.
+  // ---------- young gen roots --------------
+  {
+    DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
+    EdenSpace* eden_space = dng->eden();
+    ContiguousSpace* from_space = dng->from();
+    ContiguousSpace* to_space   = dng->to();
+
+    HeapWord** eca = _collector->_eden_chunk_array;
+    size_t     ect = _collector->_eden_chunk_index;
+    HeapWord** sca = _collector->_survivor_chunk_array;
+    size_t     sct = _collector->_survivor_chunk_index;
+
+    assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
+    assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
+
+    do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
+    do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
+    do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
+
+    _timer.stop();
+    if (PrintCMSStatistics != 0) {
+      gclog_or_tty->print_cr(
+        "Finished young gen rescan work in %dth thread: %3.3f sec",
+        i, _timer.seconds());
+    }
+  }
+
+  // ---------- remaining roots --------------
+  _timer.reset();
+  _timer.start();
+  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
+				false,     // yg was scanned above
+				true,      // collecting perm gen
+                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+				NULL, &par_mrias_cl);
+  _timer.stop();
+  if (PrintCMSStatistics != 0) {
+    gclog_or_tty->print_cr(
+      "Finished remaining root rescan work in %dth thread: %3.3f sec",
+      i, _timer.seconds());
+  }
+
+  // ---------- rescan dirty cards ------------
+  _timer.reset();
+  _timer.start();
+
+  // Do the rescan tasks for each of the two spaces
+  // (cms_space and perm_space) in turn.
+  do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
+  do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
+  _timer.stop();
+  if (PrintCMSStatistics != 0) {
+    gclog_or_tty->print_cr(
+      "Finished dirty card rescan work in %dth thread: %3.3f sec",
+      i, _timer.seconds());
+  }
+
+  // ---------- steal work from other threads ...
+  // ---------- ... and drain overflow list.
+  _timer.reset();
+  _timer.start();
+  do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
+  _timer.stop();
+  if (PrintCMSStatistics != 0) {
+    gclog_or_tty->print_cr(
+      "Finished work stealing in %dth thread: %3.3f sec",
+      i, _timer.seconds());
+  }
+}
+
+void
+CMSParRemarkTask::do_young_space_rescan(int i,
+  Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
+  HeapWord** chunk_array, size_t chunk_top) {
+  // Until all tasks completed:
+  // . claim an unclaimed task
+  // . compute region boundaries corresponding to task claimed
+  //   using chunk_array
+  // . par_oop_iterate(cl) over that region
+
+  ResourceMark rm;
+  HandleMark   hm;
+
+  SequentialSubTasksDone* pst = space->par_seq_tasks();
+  assert(pst->valid(), "Uninitialized use?");
+
+  int nth_task = 0;
+  int n_tasks  = pst->n_tasks();
+
+  HeapWord *start, *end;
+  while (!pst->is_task_claimed(/* reference */ nth_task)) {
+    // We claimed task # nth_task; compute its boundaries.
+    if (chunk_top == 0) {  // no samples were taken
+      assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
+      start = space->bottom();
+      end   = space->top();
+    } else if (nth_task == 0) {
+      start = space->bottom();
+      end   = chunk_array[nth_task];
+    } else if (nth_task < (jint)chunk_top) {
+      assert(nth_task >= 1, "Control point invariant");
+      start = chunk_array[nth_task - 1];
+      end   = chunk_array[nth_task];
+    } else {
+      assert(nth_task == (jint)chunk_top, "Control point invariant");
+      start = chunk_array[chunk_top - 1];
+      end   = space->top();
+    }
+    MemRegion mr(start, end);
+    // Verify that mr is in space
+    assert(mr.is_empty() || space->used_region().contains(mr),
+           "Should be in space");
+    // Verify that "start" is an object boundary
+    assert(mr.is_empty() || oop(mr.start())->is_oop(),
+           "Should be an oop");
+    space->par_oop_iterate(mr, cl);
+  }
+  pst->all_tasks_completed();
+}
+
+void
+CMSParRemarkTask::do_dirty_card_rescan_tasks(
+  CompactibleFreeListSpace* sp, int i,
+  Par_MarkRefsIntoAndScanClosure* cl) {
+  // Until all tasks completed:
+  // . claim an unclaimed task
+  // . compute region boundaries corresponding to task claimed
+  // . transfer dirty bits ct->mut for that region
+  // . apply rescanclosure to dirty mut bits for that region
+
+  ResourceMark rm;
+  HandleMark   hm;
+
+  OopTaskQueue* work_q = work_queue(i);
+  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
+  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
+  // CAUTION: This closure has state that persists across calls to
+  // the work method dirty_range_iterate_clear() in that it has
+  // imbedded in it a (subtype of) UpwardsObjectClosure. The
+  // use of that state in the imbedded UpwardsObjectClosure instance
+  // assumes that the cards are always iterated (even if in parallel
+  // by several threads) in monotonically increasing order per each
+  // thread. This is true of the implementation below which picks
+  // card ranges (chunks) in monotonically increasing order globally
+  // and, a-fortiori, in monotonically increasing order per thread
+  // (the latter order being a subsequence of the former).
+  // If the work code below is ever reorganized into a more chaotic
+  // work-partitioning form than the current "sequential tasks"
+  // paradigm, the use of that persistent state will have to be
+  // revisited and modified appropriately. See also related
+  // bug 4756801 work on which should examine this code to make
+  // sure that the changes there do not run counter to the
+  // assumptions made here and necessary for correctness and
+  // efficiency. Note also that this code might yield inefficient
+  // behaviour in the case of very large objects that span one or
+  // more work chunks. Such objects would potentially be scanned 
+  // several times redundantly. Work on 4756801 should try and
+  // address that performance anomaly if at all possible. XXX
+  MemRegion  full_span  = _collector->_span;
+  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
+  CMSMarkStack* rs = &(_collector->_revisitStack);   // shared
+  MarkFromDirtyCardsClosure
+    greyRescanClosure(_collector, full_span, // entire span of interest
+                      sp, bm, work_q, rs, cl);
+
+  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
+  assert(pst->valid(), "Uninitialized use?");
+  int nth_task = 0;
+  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
+  MemRegion span = sp->used_region();
+  HeapWord* start_addr = span.start();
+  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
+                                           alignment);
+  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
+  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
+         start_addr, "Check alignment");
+  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
+         chunk_size, "Check alignment");
+
+  while (!pst->is_task_claimed(/* reference */ nth_task)) {
+    // Having claimed the nth_task, compute corresponding mem-region,
+    // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
+    // The alignment restriction ensures that we do not need any
+    // synchronization with other gang-workers while setting or
+    // clearing bits in thus chunk of the MUT.
+    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
+                                    start_addr + (nth_task+1)*chunk_size);
+    // The last chunk's end might be way beyond end of the
+    // used region. In that case pull back appropriately.
+    if (this_span.end() > end_addr) {
+      this_span.set_end(end_addr);
+      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
+    }
+    // Iterate over the dirty cards covering this chunk, marking them
+    // precleaned, and setting the corresponding bits in the mod union
+    // table. Since we have been careful to partition at Card and MUT-word
+    // boundaries no synchronization is needed between parallel threads.
+    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
+                                                 &modUnionClosure);
+
+    // Having transferred these marks into the modUnionTable,
+    // rescan the marked objects on the dirty cards in the modUnionTable.
+    // Even if this is at a synchronous collection, the initial marking
+    // may have been done during an asynchronous collection so there
+    // may be dirty bits in the mod-union table.
+    _collector->_modUnionTable.dirty_range_iterate_clear(
+                  this_span, &greyRescanClosure);
+    _collector->_modUnionTable.verifyNoOneBitsInRange(
+                                 this_span.start(),
+                                 this_span.end());
+  }
+  pst->all_tasks_completed();  // declare that i am done
+}
+
+// . see if we can share work_queues with ParNew? XXX
+void
+CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
+                                int* seed) {
+  OopTaskQueue* work_q = work_queue(i);
+  NOT_PRODUCT(int num_steals = 0;)
+  oop obj_to_scan;
+  CMSBitMap* bm = &(_collector->_markBitMap);
+  size_t num_from_overflow_list =
+           MIN2((size_t)work_q->max_elems()/4,
+                (size_t)ParGCDesiredObjsFromOverflowList);
+
+  while (true) {
+    // Completely finish any left over work from (an) earlier round(s)
+    cl->trim_queue(0);
+    // Now check if there's any work in the overflow list
+    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
+                                                work_q)) {
+      // found something in global overflow list;
+      // not yet ready to go stealing work from others.
+      // We'd like to assert(work_q->size() != 0, ...)
+      // because we just took work from the overflow list,
+      // but of course we can't since all of that could have
+      // been already stolen from us.
+      // "He giveth and He taketh away."
+      continue;
+    }
+    // Verify that we have no work before we resort to stealing
+    assert(work_q->size() == 0, "Have work, shouldn't steal");
+    // Try to steal from other queues that have work
+    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+      NOT_PRODUCT(num_steals++;)
+      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
+      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
+      // Do scanning work
+      obj_to_scan->oop_iterate(cl);
+      // Loop around, finish this work, and try to steal some more
+    } else if (terminator()->offer_termination()) {
+        break;  // nirvana from the infinite cycle
+    }
+  }
+  NOT_PRODUCT(
+    if (PrintCMSStatistics != 0) {
+      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
+    }
+  )
+  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
+         "Else our work is not yet done");
+}
+
+// Return a thread-local PLAB recording array, as appropriate.
+void* CMSCollector::get_data_recorder(int thr_num) {
+  if (_survivor_plab_array != NULL &&
+      (CMSPLABRecordAlways ||
+       (_collectorState > Marking && _collectorState < FinalMarking))) {
+    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
+    ChunkArray* ca = &_survivor_plab_array[thr_num];
+    ca->reset();   // clear it so that fresh data is recorded
+    return (void*) ca;
+  } else {
+    return NULL;
+  }
+}
+
+// Reset all the thread-local PLAB recording arrays
+void CMSCollector::reset_survivor_plab_arrays() {
+  for (uint i = 0; i < ParallelGCThreads; i++) {
+    _survivor_plab_array[i].reset();
+  }
+}
+
+// Merge the per-thread plab arrays into the global survivor chunk
+// array which will provide the partitioning of the survivor space
+// for CMS rescan.
+void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
+  assert(_survivor_plab_array  != NULL, "Error");
+  assert(_survivor_chunk_array != NULL, "Error");
+  assert(_collectorState == FinalMarking, "Error");
+  for (uint j = 0; j < ParallelGCThreads; j++) {
+    _cursor[j] = 0;
+  }
+  HeapWord* top = surv->top();
+  size_t i;
+  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
+    HeapWord* min_val = top;          // Higher than any PLAB address
+    uint      min_tid = 0;            // position of min_val this round
+    for (uint j = 0; j < ParallelGCThreads; j++) {
+      ChunkArray* cur_sca = &_survivor_plab_array[j];
+      if (_cursor[j] == cur_sca->end()) {
+        continue;
+      }
+      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
+      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
+      if (cur_val < min_val) {
+        min_tid = j;
+        min_val = cur_val;
+      } else {
+        assert(cur_val < top, "All recorded addresses should be less");
+      }
+    }
+    // At this point min_val and min_tid are respectively
+    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
+    // and the thread (j) that witnesses that address.
+    // We record this address in the _survivor_chunk_array[i]
+    // and increment _cursor[min_tid] prior to the next round i.
+    if (min_val == top) {
+      break;
+    }
+    _survivor_chunk_array[i] = min_val;
+    _cursor[min_tid]++;
+  }
+  // We are all done; record the size of the _survivor_chunk_array
+  _survivor_chunk_index = i; // exclusive: [0, i)
+  if (PrintCMSStatistics > 0) {
+    gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
+  }
+  // Verify that we used up all the recorded entries
+  #ifdef ASSERT
+    size_t total = 0;
+    for (uint j = 0; j < ParallelGCThreads; j++) {
+      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
+      total += _cursor[j];
+    }
+    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
+    // Check that the merged array is in sorted order
+    if (total > 0) {
+      for (size_t i = 0; i < total - 1; i++) {
+        if (PrintCMSStatistics > 0) {
+          gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
+                              i, _survivor_chunk_array[i]);
+        }
+        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
+               "Not sorted");
+      }
+    }
+  #endif // ASSERT
+}
+
+// Set up the space's par_seq_tasks structure for work claiming
+// for parallel rescan of young gen.
+// See ParRescanTask where this is currently used.
+void
+CMSCollector::
+initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
+  assert(n_threads > 0, "Unexpected n_threads argument");
+  DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
+
+  // Eden space
+  {
+    SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
+    assert(!pst->valid(), "Clobbering existing data?");
+    // Each valid entry in [0, _eden_chunk_index) represents a task.
+    size_t n_tasks = _eden_chunk_index + 1;
+    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
+    pst->set_par_threads(n_threads);
+    pst->set_n_tasks((int)n_tasks);
+  }
+
+  // Merge the survivor plab arrays into _survivor_chunk_array
+  if (_survivor_plab_array != NULL) {
+    merge_survivor_plab_arrays(dng->from());
+  } else {
+    assert(_survivor_chunk_index == 0, "Error");
+  }
+
+  // To space
+  {
+    SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
+    assert(!pst->valid(), "Clobbering existing data?");
+    pst->set_par_threads(n_threads);
+    pst->set_n_tasks(1);
+    assert(pst->valid(), "Error");
+  }
+
+  // From space
+  {
+    SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
+    assert(!pst->valid(), "Clobbering existing data?");
+    size_t n_tasks = _survivor_chunk_index + 1;
+    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
+    pst->set_par_threads(n_threads);
+    pst->set_n_tasks((int)n_tasks);
+    assert(pst->valid(), "Error");
+  }
+}
+
+// Parallel version of remark
+void CMSCollector::do_remark_parallel() {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  WorkGang* workers = gch->workers();
+  assert(workers != NULL, "Need parallel worker threads.");
+  int n_workers = workers->total_workers();
+  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
+  CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
+
+  CMSParRemarkTask tsk(this,
+    cms_space, perm_space,
+    n_workers, workers, task_queues());
+
+  // Set up for parallel process_strong_roots work.
+  gch->set_par_threads(n_workers);
+  gch->change_strong_roots_parity();
+  // We won't be iterating over the cards in the card table updating
+  // the younger_gen cards, so we shouldn't call the following else
+  // the verification code as well as subsequent younger_refs_iterate
+  // code would get confused. XXX
+  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
+
+  // The young gen rescan work will not be done as part of
+  // process_strong_roots (which currently doesn't knw how to
+  // parallelize such a scan), but rather will be broken up into
+  // a set of parallel tasks (via the sampling that the [abortable]
+  // preclean phase did of EdenSpace, plus the [two] tasks of
+  // scanning the [two] survivor spaces. Further fine-grain
+  // parallelization of the scanning of the survivor spaces
+  // themselves, and of precleaning of the younger gen itself
+  // is deferred to the future.
+  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
+
+  // The dirty card rescan work is broken up into a "sequence"
+  // of parallel tasks (per constituent space) that are dynamically
+  // claimed by the parallel threads.
+  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
+  perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
+  
+  // It turns out that even when we're using 1 thread, doing the work in a
+  // separate thread causes wide variance in run times.  We can't help this
+  // in the multi-threaded case, but we special-case n=1 here to get
+  // repeatable measurements of the 1-thread overhead of the parallel code.
+  if (n_workers > 1) {
+    // Make refs discovery MT-safe
+    ReferenceProcessorMTMutator mt(ref_processor(), true);
+    workers->run_task(&tsk);
+  } else {
+    tsk.work(0);
+  }
+  gch->set_par_threads(0);  // 0 ==> non-parallel.
+  // restore, single-threaded for now, any preserved marks
+  // as a result of work_q overflow
+  restore_preserved_marks_if_any();
+}
+
+// Non-parallel version of remark
+void CMSCollector::do_remark_non_parallel() {
+  ResourceMark rm;
+  HandleMark   hm;
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  MarkRefsIntoAndScanClosure
+    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
+             &_markStack, &_revisitStack, this,
+             false /* should_yield */, false /* not precleaning */);
+  MarkFromDirtyCardsClosure
+    markFromDirtyCardsClosure(this, _span,
+                              NULL,  // space is set further below
+                              &_markBitMap, &_markStack, &_revisitStack,
+                              &mrias_cl);
+  {
+    TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
+    // Iterate over the dirty cards, marking them precleaned, and
+    // setting the corresponding bits in the mod union table.
+    {
+      ModUnionClosure modUnionClosure(&_modUnionTable);
+      _ct->ct_bs()->dirty_card_iterate(
+                      _cmsGen->used_region(),
+                      &modUnionClosure);
+      _ct->ct_bs()->dirty_card_iterate(
+                      _permGen->used_region(),
+                      &modUnionClosure);
+    }
+    // Having transferred these marks into the modUnionTable, we just need
+    // to rescan the marked objects on the dirty cards in the modUnionTable.
+    // The initial marking may have been done during an asynchronous
+    // collection so there may be dirty bits in the mod-union table.
+    const int alignment =
+      CardTableModRefBS::card_size * BitsPerWord;
+    { 
+      // ... First handle dirty cards in CMS gen
+      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
+      MemRegion ur = _cmsGen->used_region();
+      HeapWord* lb = ur.start();
+      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
+      MemRegion cms_span(lb, ub);
+      _modUnionTable.dirty_range_iterate_clear(cms_span,
+                                               &markFromDirtyCardsClosure);
+      assert(_markStack.isEmpty(), "mark stack should be empty");
+      assert(overflow_list_is_empty(), "overflow list should be empty");
+      if (PrintCMSStatistics != 0) {
+        gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
+          markFromDirtyCardsClosure.num_dirty_cards());
+      }
+    } 
+    {
+      // .. and then repeat for dirty cards in perm gen
+      markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
+      MemRegion ur = _permGen->used_region();
+      HeapWord* lb = ur.start();
+      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
+      MemRegion perm_span(lb, ub);
+      _modUnionTable.dirty_range_iterate_clear(perm_span,
+                                               &markFromDirtyCardsClosure);
+      assert(_markStack.isEmpty(), "mark stack should be empty");
+      assert(overflow_list_is_empty(), "overflow list should be empty");
+      if (PrintCMSStatistics != 0) {
+        gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
+          markFromDirtyCardsClosure.num_dirty_cards());
+      }
+    }
+  }
+  if (VerifyDuringGC &&
+      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+    HandleMark hm;  // Discard invalid handles created during verification
+    Universe::verify(true);
+  }
+  {
+    TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
+
+    assert(_markStack.isEmpty(), "should be empty");
+    assert(overflow_list_is_empty(), "overflow list should be empty");
+  
+    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+    gch->gen_process_strong_roots(_cmsGen->level(),
+				  true,  // younger gens as roots
+				  true,  // collecting perm gen
+                                  SharedHeap::ScanningOption(roots_scanning_options()),
+				  NULL, &mrias_cl);
+  }
+  assert(_markStack.isEmpty(), "should be empty");
+  assert(overflow_list_is_empty(), "overflow list should be empty");
+  // Restore evacuated mark words, if any, used for overflow list links
+  if (!CMSOverflowEarlyRestoration) {
+    restore_preserved_marks_if_any();
+  }
+  assert(no_preserved_marks(), "no preserved marks");
+}
+
+////////////////////////////////////////////////////////
+// Parallel Reference Processing Task Proxy Class
+////////////////////////////////////////////////////////
+class CMSRefProcTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+  CMSCollector*          _collector;
+  CMSBitMap*             _mark_bit_map;
+  MemRegion              _span;
+  OopTaskQueueSet*       _task_queues;
+  ParallelTaskTerminator _term;
+  ProcessTask&           _task;
+
+public:
+  CMSRefProcTaskProxy(ProcessTask&     task,
+                      CMSCollector*    collector,
+                      const MemRegion& span,
+                      CMSBitMap*       mark_bit_map,
+                      int              total_workers,
+                      OopTaskQueueSet* task_queues):
+    AbstractGangTask("Process referents by policy in parallel"),
+    _task(task),
+    _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
+    _task_queues(task_queues),
+    _term(total_workers, task_queues)
+    { }
+
+  OopTaskQueueSet* task_queues() { return _task_queues; }
+
+  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
+
+  ParallelTaskTerminator* terminator() { return &_term; }
+  
+  void do_work_steal(int i,
+                     CMSParDrainMarkingStackClosure* drain,
+                     CMSParKeepAliveClosure* keep_alive,
+                     int* seed);
+
+  virtual void work(int i);
+};
+
+void CMSRefProcTaskProxy::work(int i) {
+  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
+                                        _mark_bit_map, work_queue(i));
+  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
+                                                 _mark_bit_map, work_queue(i));
+  CMSIsAliveClosure is_alive_closure(_mark_bit_map);
+  _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
+  if (_task.marks_oops_alive()) {
+    do_work_steal(i, &par_drain_stack, &par_keep_alive,
+                  _collector->hash_seed(i));
+  }
+}
+
+class CMSRefEnqueueTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
+  EnqueueTask& _task;
+
+public:
+  CMSRefEnqueueTaskProxy(EnqueueTask& task)
+    : AbstractGangTask("Enqueue reference objects in parallel"),
+      _task(task)
+  { }
+
+  virtual void work(int i)
+  {
+    _task.work(i);
+  }
+};
+
+CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
+  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
+   _collector(collector),
+   _span(span),
+   _bit_map(bit_map),
+   _work_queue(work_queue),
+   _mark_and_push(collector, span, bit_map, work_queue),
+   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
+                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
+{ }
+
+// . see if we can share work_queues with ParNew? XXX
+void CMSRefProcTaskProxy::do_work_steal(int i,
+  CMSParDrainMarkingStackClosure* drain,
+  CMSParKeepAliveClosure* keep_alive,
+  int* seed) {
+  OopTaskQueue* work_q = work_queue(i);
+  NOT_PRODUCT(int num_steals = 0;)
+  oop obj_to_scan;
+
+  while (true) {
+    // Completely finish any left over work from (an) earlier round(s)
+    drain->trim_queue(0);
+    // Verify that we have no work before we resort to stealing
+    assert(work_q->size() == 0, "Have work, shouldn't steal");
+    // Try to steal from other queues that have work
+    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+      NOT_PRODUCT(num_steals++;)
+      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
+      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
+      // Do scanning work
+      obj_to_scan->oop_iterate(keep_alive);
+      // Loop around, finish this work, and try to steal some more
+    } else if (terminator()->offer_termination()) {
+      break;  // nirvana from the infinite cycle
+    }
+  }
+  NOT_PRODUCT(
+    if (PrintCMSStatistics != 0) {
+      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
+    }
+  )
+}
+
+void CMSRefProcTaskExecutor::execute(ProcessTask& task)
+{
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  WorkGang* workers = gch->workers();
+  assert(workers != NULL, "Need parallel worker threads.");
+  int n_workers = workers->total_workers();
+  CMSRefProcTaskProxy rp_task(task, &_collector, 
+                              _collector.ref_processor()->span(), 
+                              _collector.markBitMap(), 
+                              n_workers, _collector.task_queues());
+  workers->run_task(&rp_task);
+}
+
+void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
+{
+
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  WorkGang* workers = gch->workers();
+  assert(workers != NULL, "Need parallel worker threads.");
+  CMSRefEnqueueTaskProxy enq_task(task);
+  workers->run_task(&enq_task);
+}
+
+void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
+
+  ResourceMark rm;
+  HandleMark   hm;
+  ReferencePolicy* soft_ref_policy;
+
+  assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
+  // Process weak references.
+  if (clear_all_soft_refs) {
+    soft_ref_policy = new AlwaysClearPolicy();
+  } else {
+#ifdef COMPILER2
+    soft_ref_policy = new LRUMaxHeapPolicy();
+#else
+    soft_ref_policy = new LRUCurrentHeapPolicy();
+#endif // COMPILER2
+  }
+  assert(_markStack.isEmpty(), "mark stack should be empty");
+  assert(overflow_list_is_empty(), "overflow list should be empty");
+
+  ReferenceProcessor* rp = ref_processor();
+  assert(rp->span().equals(_span), "Spans should be equal");
+  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
+                                          &_markStack);
+  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
+                                _span, &_markBitMap, &_markStack,
+                                &cmsKeepAliveClosure);
+  {
+    TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
+    if (rp->processing_is_mt()) {
+      CMSRefProcTaskExecutor task_executor(*this);
+      rp->process_discovered_references(soft_ref_policy, 
+                                        &_is_alive_closure,
+                                        &cmsKeepAliveClosure,
+                                        &cmsDrainMarkingStackClosure,
+                                        &task_executor);
+    } else {
+      rp->process_discovered_references(soft_ref_policy,
+                                        &_is_alive_closure,
+                                        &cmsKeepAliveClosure,
+                                        &cmsDrainMarkingStackClosure,
+                                        NULL);
+    }
+    assert(_markStack.isEmpty(), "mark stack should be empty");
+    assert(overflow_list_is_empty(), "overflow list should be empty");
+  }
+
+  if (cms_should_unload_classes()) {
+    {
+      TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
+
+      // Follow SystemDictionary roots and unload classes
+      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
+
+      // Follow CodeCache roots and unload any methods marked for unloading
+      CodeCache::do_unloading(&_is_alive_closure,
+                              &cmsKeepAliveClosure,
+                              purged_class);
+
+      cmsDrainMarkingStackClosure.do_void();
+      assert(_markStack.isEmpty(), "just drained");
+      assert(overflow_list_is_empty(), "just drained");
+
+      // Update subklass/sibling/implementor links in KlassKlass descendants
+      assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
+      oop k;
+      while ((k = _revisitStack.pop()) != NULL) {
+        ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
+                       &_is_alive_closure,
+                       &cmsKeepAliveClosure);
+      }
+      assert(!ClassUnloading ||
+             (_markStack.isEmpty() && overflow_list_is_empty()),
+             "Should not have found new reachable objects");
+      assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
+      cmsDrainMarkingStackClosure.do_void();
+      assert(_markStack.isEmpty(), "just drained");
+      assert(overflow_list_is_empty(), "just drained");
+  
+    }
+     
+    {
+      TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
+      // Now clean up stale oops in SymbolTable and StringTable
+      SymbolTable::unlink(&_is_alive_closure);
+      StringTable::unlink(&_is_alive_closure);
+    }
+  }
+
+  assert(_markStack.isEmpty(), "tautology");
+  assert(overflow_list_is_empty(), "tautology");
+  // Restore any preserved marks as a result of mark stack or
+  // work queue overflow
+  restore_preserved_marks_if_any();  // done single-threaded for now
+
+  rp->set_enqueuing_is_done(true);
+  if (rp->processing_is_mt()) {
+    CMSRefProcTaskExecutor task_executor(*this);
+    rp->enqueue_discovered_references(&task_executor);
+  } else {
+    rp->enqueue_discovered_references(NULL);
+  }
+  rp->verify_no_references_recorded();
+  assert(!rp->discovery_enabled(), "should have been disabled");
+
+  // JVMTI object tagging is based on JNI weak refs. If any of these
+  // refs were cleared then JVMTI needs to update its maps and
+  // maybe post ObjectFrees to agents.
+  JvmtiExport::cms_ref_processing_epilogue();
+}
+
+#ifndef PRODUCT
+void CMSCollector::check_correct_thread_executing() {
+  Thread* t = Thread::current();
+  // Only the VM thread or the CMS thread should be here.
+  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
+         "Unexpected thread type");
+  // If this is the vm thread, the foreground process 
+  // should not be waiting.  Note that _foregroundGCIsActive is 
+  // true while the foreground collector is waiting.
+  if (_foregroundGCShouldWait) {
+    // We cannot be the VM thread
+    assert(t->is_ConcurrentGC_thread(),
+           "Should be CMS thread");
+  } else {
+    // We can be the CMS thread only if we are in a stop-world
+    // phase of CMS collection.
+    if (t->is_ConcurrentGC_thread()) {
+      assert(_collectorState == InitialMarking ||
+             _collectorState == FinalMarking, 
+             "Should be a stop-world phase");
+      // The CMS thread should be holding the CMS_token.
+      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+             "Potential interference with concurrently "
+             "executing VM thread");
+    }
+  }
+}
+#endif
+
+void CMSCollector::sweep(bool asynch) {
+  assert(_collectorState == Sweeping, "just checking");
+  check_correct_thread_executing();
+  incrementSweepCount();
+  _sweep_timer.stop();
+  _sweep_estimate.sample(_sweep_timer.seconds());
+  size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
+
+  // PermGen verification support: If perm gen sweeping is disabled in
+  // this cycle, we preserve the perm gen object "deadness" information
+  // in the perm_gen_verify_bit_map. In order to do that we traverse
+  // all blocks in perm gen and mark all dead objects.
+  if (verifying() && !cms_should_unload_classes()) {
+    CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
+                             bitMapLock());
+    assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
+           "Should have already been allocated");
+    MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
+                               markBitMap(), perm_gen_verify_bit_map());
+    _permGen->cmsSpace()->blk_iterate(&mdo);
+  }
+
+  if (asynch) {
+    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+    CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
+    // First sweep the old gen then the perm gen
+    {
+      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
+                               bitMapLock());
+      sweepWork(_cmsGen, asynch);
+    }
+
+    // Now repeat for perm gen
+    if (cms_should_unload_classes()) {
+      CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
+                             bitMapLock());
+      sweepWork(_permGen, asynch);
+    }
+
+    // Update Universe::_heap_*_at_gc figures.
+    // We need all the free list locks to make the abstract state
+    // transition from Sweeping to Resetting. See detailed note
+    // further below.
+    {
+      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
+                               _permGen->freelistLock());
+      // Update heap occupancy information which is used as
+      // input to soft ref clearing policy at the next gc.
+      Universe::update_heap_info_at_gc();
+      _collectorState = Resizing;
+    }
+  } else {
+    // already have needed locks
+    sweepWork(_cmsGen,  asynch);
+
+    if (cms_should_unload_classes()) {
+      sweepWork(_permGen, asynch);
+    }
+    // Update heap occupancy information which is used as
+    // input to soft ref clearing policy at the next gc.
+    Universe::update_heap_info_at_gc();
+    _collectorState = Resizing;
+  }
+  _sweep_timer.reset();
+  _sweep_timer.start();
+
+  update_time_of_last_gc(os::javaTimeMillis());
+
+  // NOTE on abstract state transitions:
+  // Mutators allocate-live and/or mark the mod-union table dirty
+  // based on the state of the collection.  The former is done in
+  // the interval [Marking, Sweeping] and the latter in the interval
+  // [Marking, Sweeping).  Thus the transitions into the Marking state
+  // and out of the Sweeping state must be synchronously visible 
+  // globally to the mutators.
+  // The transition into the Marking state happens with the world
+  // stopped so the mutators will globally see it.  Sweeping is
+  // done asynchronously by the background collector so the transition
+  // from the Sweeping state to the Resizing state must be done
+  // under the freelistLock (as is the check for whether to 
+  // allocate-live and whether to dirty the mod-union table).
+  assert(_collectorState == Resizing, "Change of collector state to"
+    " Resizing must be done under the freelistLocks (plural)");
+
+  // Now that sweeping has been completed, if the GCH's
+  // incremental_collection_will_fail flag is set, clear it,
+  // thus inviting a younger gen collection to promote into
+  // this generation. If such a promotion may still fail,
+  // the flag will be set again when a young collection is
+  // attempted.
+  // I think the incremental_collection_will_fail flag's use
+  // is specific to a 2 generation collection policy, so i'll
+  // assert that that's the configuration we are operating within.
+  // The use of the flag can and should be generalized appropriately
+  // in the future to deal with a general n-generation system.
+
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->collector_policy()->is_two_generation_policy(),
+         "Resetting of incremental_collection_will_fail flag"
+         " may be incorrect otherwise");
+  gch->clear_incremental_collection_will_fail();
+  gch->update_full_collections_completed(_collection_count_start);
+}
+
+// FIX ME!!! Looks like this belongs in CFLSpace, with
+// CMSGen merely delegating to it.
+void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
+  double nearLargestPercent = 0.999;
+  HeapWord*  minAddr        = _cmsSpace->bottom();
+  HeapWord*  largestAddr    = 
+    (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
+  if (largestAddr == 0) {
+    // The dictionary appears to be empty.  In this case 
+    // try to coalesce at the end of the heap.
+    largestAddr = _cmsSpace->end();
+  }
+  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
+  size_t nearLargestOffset =
+    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
+  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
+}
+
+bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
+  return addr >= _cmsSpace->nearLargestChunk();
+}
+
+FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
+  return _cmsSpace->find_chunk_at_end();
+}
+
+void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
+						    bool full) {
+  // The next lower level has been collected.  Gather any statistics
+  // that are of interest at this point.
+  if (!full && (current_level + 1) == level()) {
+    // Gather statistics on the young generation collection.
+    collector()->stats().record_gc0_end(used());
+  }
+}
+
+CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
+    "Wrong type of heap");
+  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
+    gch->gen_policy()->size_policy();
+  assert(sp->is_gc_cms_adaptive_size_policy(),
+    "Wrong type of size policy");
+  return sp; 
+}
+
+void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
+  if (PrintGCDetails && Verbose) {
+    gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
+  }
+  _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
+  _debug_collection_type = 
+    (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
+  if (PrintGCDetails && Verbose) {
+    gclog_or_tty->print_cr("to %d ", _debug_collection_type);
+  }
+}
+
+void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
+  bool asynch) {
+  // We iterate over the space(s) underlying this generation,
+  // checking the mark bit map to see if the bits corresponding
+  // to specific blocks are marked or not. Blocks that are
+  // marked are live and are not swept up. All remaining blocks
+  // are swept up, with coalescing on-the-fly as we sweep up
+  // contiguous free and/or garbage blocks:
+  // We need to ensure that the sweeper synchronizes with allocators
+  // and stop-the-world collectors. In particular, the following
+  // locks are used:
+  // . CMS token: if this is held, a stop the world collection cannot occur
+  // . freelistLock: if this is held no allocation can occur from this
+  //                 generation by another thread
+  // . bitMapLock: if this is held, no other thread can access or update
+  //
+    
+  // Note that we need to hold the freelistLock if we use
+  // block iterate below; else the iterator might go awry if
+  // a mutator (or promotion) causes block contents to change
+  // (for instance if the allocator divvies up a block).
+  // If we hold the free list lock, for all practical purposes
+  // young generation GC's can't occur (they'll usually need to
+  // promote), so we might as well prevent all young generation
+  // GC's while we do a sweeping step. For the same reason, we might
+  // as well take the bit map lock for the entire duration
+  
+  // check that we hold the requisite locks
+  assert(have_cms_token(), "Should hold cms token");
+  assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
+         || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
+        "Should possess CMS token to sweep");
+  assert_lock_strong(gen->freelistLock());
+  assert_lock_strong(bitMapLock());
+  
+  assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
+  gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
+                                      _sweep_estimate.padded_average());
+  gen->setNearLargestChunk();
+
+  {
+    SweepClosure sweepClosure(this, gen, &_markBitMap,
+                            CMSYield && asynch);
+    gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
+    // We need to free-up/coalesce garbage/blocks from a
+    // co-terminal free run. This is done in the SweepClosure
+    // destructor; so, do not remove this scope, else the
+    // end-of-sweep-census below will be off by a little bit.
+  }
+  gen->cmsSpace()->sweep_completed();
+  gen->cmsSpace()->endSweepFLCensus(sweepCount());
+}
+
+// Reset CMS data structures (for now just the marking bit map)
+// preparatory for the next cycle.
+void CMSCollector::reset(bool asynch) {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSAdaptiveSizePolicy* sp = size_policy();
+  AdaptiveSizePolicyOutput(sp, gch->total_collections());
+  if (asynch) {
+    CMSTokenSyncWithLocks ts(true, bitMapLock());
+
+    // If the state is not "Resetting", the foreground  thread
+    // has done a collection and the resetting.
+    if (_collectorState != Resetting) {
+      assert(_collectorState == Idling, "The state should only change"
+	" because the foreground collector has finished the collection");
+      return;
+    }
+
+    // Clear the mark bitmap (no grey objects to start with)
+    // for the next cycle.
+    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+    CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
+
+    HeapWord* curAddr = _markBitMap.startWord();
+    while (curAddr < _markBitMap.endWord()) {
+      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr); 
+      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
+      _markBitMap.clear_large_range(chunk);
+      if (ConcurrentMarkSweepThread::should_yield() &&
+          !foregroundGCIsActive() &&
+          CMSYield) {
+        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+               "CMS thread should hold CMS token");
+        assert_lock_strong(bitMapLock());
+        bitMapLock()->unlock();
+        ConcurrentMarkSweepThread::desynchronize(true);
+        ConcurrentMarkSweepThread::acknowledge_yield_request();
+        stopTimer();
+        if (PrintCMSStatistics != 0) {
+          incrementYields();
+        }
+        icms_wait();
+
+	// See the comment in coordinator_yield()
+	for (unsigned i = 0; i < CMSYieldSleepCount &&
+	                ConcurrentMarkSweepThread::should_yield() &&
+	                !CMSCollector::foregroundGCIsActive(); ++i) {
+	  os::sleep(Thread::current(), 1, false);    
+	  ConcurrentMarkSweepThread::acknowledge_yield_request();
+	}
+
+        ConcurrentMarkSweepThread::synchronize(true);
+        bitMapLock()->lock_without_safepoint_check();
+        startTimer();
+      }
+      curAddr = chunk.end();
+    }
+    _collectorState = Idling;
+  } else {
+    // already have the lock
+    assert(_collectorState == Resetting, "just checking");
+    assert_lock_strong(bitMapLock());
+    _markBitMap.clear_all();
+    _collectorState = Idling;
+  }
+
+  // Stop incremental mode after a cycle completes, so that any future cycles
+  // are triggered by allocation.
+  stop_icms();
+
+  NOT_PRODUCT(
+    if (RotateCMSCollectionTypes) {
+      _cmsGen->rotate_debug_collection_type();
+    }
+  )
+}
+
+void CMSCollector::do_CMS_operation(CMS_op_type op) {
+  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+  TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
+  TraceCollectorStats tcs(counters());
+
+  switch (op) {
+    case CMS_op_checkpointRootsInitial: {
+      checkpointRootsInitial(true);       // asynch
+      if (PrintGC) {
+        _cmsGen->printOccupancy("initial-mark");
+      }
+      break;
+    }
+    case CMS_op_checkpointRootsFinal: {
+      checkpointRootsFinal(true,    // asynch
+                           false,   // !clear_all_soft_refs
+                           false);  // !init_mark_was_synchronous
+      if (PrintGC) {
+        _cmsGen->printOccupancy("remark");
+      }
+      break;
+    }
+    default:
+      fatal("No such CMS_op");
+  }
+}
+
+#ifndef PRODUCT
+size_t const CMSCollector::skip_header_HeapWords() {
+  return FreeChunk::header_size();
+}
+
+// Try and collect here conditions that should hold when
+// CMS thread is exiting. The idea is that the foreground GC
+// thread should not be blocked if it wants to terminate
+// the CMS thread and yet continue to run the VM for a while
+// after that.
+void CMSCollector::verify_ok_to_terminate() const {
+  assert(Thread::current()->is_ConcurrentGC_thread(), 
+         "should be called by CMS thread");
+  assert(!_foregroundGCShouldWait, "should be false");
+  // We could check here that all the various low-level locks
+  // are not held by the CMS thread, but that is overkill; see
+  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
+  // is checked.
+}
+#endif
+
+size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
+  assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
+         "missing Printezis mark?");
+  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
+  size_t size = pointer_delta(nextOneAddr + 1, addr);
+  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
+         "alignment problem");
+  assert(size >= 3, "Necessary for Printezis marks to work");
+  return size;
+}
+
+// A variant of the above (block_size_using_printezis_bits()) except
+// that we return 0 if the P-bits are not yet set.
+size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
+  if (_markBitMap.isMarked(addr)) {
+    assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
+    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
+    size_t size = pointer_delta(nextOneAddr + 1, addr);
+    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
+           "alignment problem");
+    assert(size >= 3, "Necessary for Printezis marks to work");
+    return size;
+  } else {
+    assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
+    return 0;
+  }
+}
+
+HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
+  size_t sz = 0;
+  oop p = (oop)addr;
+  if (p->klass() != NULL && p->is_parsable()) {
+    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
+  } else {
+    sz = block_size_using_printezis_bits(addr);
+  }
+  assert(sz > 0, "size must be nonzero");
+  HeapWord* next_block = addr + sz;
+  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
+                                             CardTableModRefBS::card_size);
+  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
+         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
+         "must be different cards");
+  return next_card;
+}
+
+
+// CMS Bit Map Wrapper /////////////////////////////////////////
+
+// Construct a CMS bit map infrastructure, but don't create the 
+// bit vector itself. That is done by a separate call CMSBitMap::allocate()
+// further below.
+CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
+  _bm(NULL,0),
+  _shifter(shifter),
+  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
+{
+  _bmStartWord = 0;
+  _bmWordSize  = 0;
+}
+
+bool CMSBitMap::allocate(MemRegion mr) {
+  _bmStartWord = mr.start();
+  _bmWordSize  = mr.word_size();
+  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
+                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
+  if (!brs.is_reserved()) {
+    warning("CMS bit map allocation failure");
+    return false;
+  }
+  // For now we'll just commit all of the bit map up fromt.
+  // Later on we'll try to be more parsimonious with swap.
+  if (!_virtual_space.initialize(brs, brs.size())) {
+    warning("CMS bit map backing store failure");
+    return false;
+  }
+  assert(_virtual_space.committed_size() == brs.size(),
+         "didn't reserve backing store for all of CMS bit map?");
+  _bm.set_map((uintptr_t*)_virtual_space.low());
+  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
+         _bmWordSize, "inconsistency in bit map sizing");
+  _bm.set_size(_bmWordSize >> _shifter);
+
+  // bm.clear(); // can we rely on getting zero'd memory? verify below
+  assert(isAllClear(),
+         "Expected zero'd memory from ReservedSpace constructor");
+  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
+         "consistency check");
+  return true;
+}
+
+void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
+  HeapWord *next_addr, *end_addr, *last_addr;
+  assert_locked();
+  assert(covers(mr), "out-of-range error");
+  // XXX assert that start and end are appropriately aligned
+  for (next_addr = mr.start(), end_addr = mr.end();
+       next_addr < end_addr; next_addr = last_addr) {
+    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
+    last_addr = dirty_region.end();
+    if (!dirty_region.is_empty()) {
+      cl->do_MemRegion(dirty_region);
+    } else {
+      assert(last_addr == end_addr, "program logic");
+      return;
+    }
+  }
+}
+
+#ifndef PRODUCT
+void CMSBitMap::assert_locked() const {
+  CMSLockVerifier::assert_locked(lock());
+}
+
+bool CMSBitMap::covers(MemRegion mr) const {
+  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
+  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
+         "size inconsistency");
+  return (mr.start() >= _bmStartWord) &&
+         (mr.end()   <= endWord());
+}
+
+bool CMSBitMap::covers(HeapWord* start, size_t size) const {
+    return (start >= _bmStartWord && (start + size) <= endWord());
+}
+
+void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
+  // verify that there are no 1 bits in the interval [left, right)
+  FalseBitMapClosure falseBitMapClosure;
+  iterate(&falseBitMapClosure, left, right);
+}
+
+void CMSBitMap::region_invariant(MemRegion mr)
+{
+  assert_locked();
+  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
+  assert(!mr.is_empty(), "unexpected empty region");
+  assert(covers(mr), "mr should be covered by bit map");
+  // convert address range into offset range
+  size_t start_ofs = heapWordToOffset(mr.start());
+  // Make sure that end() is appropriately aligned
+  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
+                        (1 << (_shifter+LogHeapWordSize))),
+         "Misaligned mr.end()");
+  size_t end_ofs   = heapWordToOffset(mr.end());
+  assert(end_ofs > start_ofs, "Should mark at least one bit");
+}
+
+#endif
+
+bool CMSMarkStack::allocate(size_t size) {
+  // allocate a stack of the requisite depth
+  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
+                   size * sizeof(oop)));
+  if (!rs.is_reserved()) {
+    warning("CMSMarkStack allocation failure");
+    return false;
+  }
+  if (!_virtual_space.initialize(rs, rs.size())) {
+    warning("CMSMarkStack backing store failure");
+    return false;
+  }
+  assert(_virtual_space.committed_size() == rs.size(),
+         "didn't reserve backing store for all of CMS stack?");
+  _base = (oop*)(_virtual_space.low());
+  _index = 0;
+  _capacity = size;
+  NOT_PRODUCT(_max_depth = 0);
+  return true;
+}
+
+// XXX FIX ME !!! In the MT case we come in here holding a
+// leaf lock. For printing we need to take a further lock
+// which has lower rank. We need to recallibrate the two
+// lock-ranks involved in order to be able to rpint the
+// messages below. (Or defer the printing to the caller.
+// For now we take the expedient path of just disabling the
+// messages for the problematic case.)
+void CMSMarkStack::expand() {
+  assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
+  if (_capacity == CMSMarkStackSizeMax) {
+    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
+      // We print a warning message only once per CMS cycle.
+      gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
+    }
+    return;
+  }
+  // Double capacity if possible
+  size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
+  // Do not give up existing stack until we have managed to
+  // get the double capacity that we desired.
+  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
+                   new_capacity * sizeof(oop)));
+  if (rs.is_reserved()) {
+    // Release the backing store associated with old stack
+    _virtual_space.release();
+    // Reinitialize virtual space for new stack
+    if (!_virtual_space.initialize(rs, rs.size())) {
+      fatal("Not enough swap for expanded marking stack");
+    }
+    _base = (oop*)(_virtual_space.low());
+    _index = 0;
+    _capacity = new_capacity;
+  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
+    // Failed to double capacity, continue;
+    // we print a detail message only once per CMS cycle.
+    gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
+            SIZE_FORMAT"K",
+            _capacity / K, new_capacity / K);
+  }
+}
+
+
+// Closures
+// XXX: there seems to be a lot of code  duplication here;
+// should refactor and consolidate common code.
+
+// This closure is used to mark refs into the CMS generation in
+// the CMS bit map. Called at the first checkpoint. This closure
+// assumes that we do not need to re-mark dirty cards; if the CMS
+// generation on which this is used is not an oldest (modulo perm gen)
+// generation then this will lose younger_gen cards!
+
+MarkRefsIntoClosure::MarkRefsIntoClosure(
+  MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
+    _span(span),
+    _bitMap(bitMap),
+    _should_do_nmethods(should_do_nmethods)
+{
+    assert(_ref_processor == NULL, "deliberately left NULL");
+    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
+}
+
+void MarkRefsIntoClosure::do_oop(oop* p) {
+  // if p points into _span, then mark corresponding bit in _markBitMap
+  oop thisOop = *p;
+  if (thisOop != NULL) {
+    assert(thisOop->is_oop(), "expected an oop");
+    HeapWord* addr = (HeapWord*)thisOop;
+    if (_span.contains(addr)) {
+      // this should be made more efficient
+      _bitMap->mark(addr);
+    }
+  }
+}
+
+// A variant of the above, used for CMS marking verification.
+MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
+  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
+  bool should_do_nmethods):
+    _span(span),
+    _verification_bm(verification_bm),
+    _cms_bm(cms_bm),
+    _should_do_nmethods(should_do_nmethods) {
+    assert(_ref_processor == NULL, "deliberately left NULL");
+    assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
+}
+
+void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
+  // if p points into _span, then mark corresponding bit in _markBitMap
+  oop this_oop = *p;
+  if (this_oop != NULL) {
+    assert(this_oop->is_oop(), "expected an oop");
+    HeapWord* addr = (HeapWord*)this_oop;
+    if (_span.contains(addr)) {
+      _verification_bm->mark(addr);
+      if (!_cms_bm->isMarked(addr)) {
+        oop(addr)->print();
+        gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
+        fatal("... aborting");
+      }
+    }
+  }
+}
+
+//////////////////////////////////////////////////
+// MarkRefsIntoAndScanClosure
+//////////////////////////////////////////////////
+
+MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
+                                                       ReferenceProcessor* rp,
+                                                       CMSBitMap* bit_map,
+                                                       CMSBitMap* mod_union_table,
+                                                       CMSMarkStack*  mark_stack,
+                                                       CMSMarkStack*  revisit_stack,
+                                                       CMSCollector* collector,
+                                                       bool should_yield,
+                                                       bool concurrent_precleaning):
+  _collector(collector),
+  _span(span),
+  _bit_map(bit_map),
+  _mark_stack(mark_stack),
+  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
+                      mark_stack, revisit_stack, concurrent_precleaning),
+  _yield(should_yield),
+  _concurrent_precleaning(concurrent_precleaning),
+  _freelistLock(NULL)
+{
+  _ref_processor = rp;
+  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+}
+
+// This closure is used to mark refs into the CMS generation at the
+// second (final) checkpoint, and to scan and transitively follow
+// the unmarked oops. It is also used during the concurrent precleaning
+// phase while scanning objects on dirty cards in the CMS generation.
+// The marks are made in the marking bit map and the marking stack is
+// used for keeping the (newly) grey objects during the scan.
+// The parallel version (Par_...) appears further below.
+void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
+  oop this_oop = *p;
+  if (this_oop != NULL) {
+    assert(this_oop->is_oop(), "expected an oop");
+    HeapWord* addr = (HeapWord*)this_oop;
+    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
+    assert(_collector->overflow_list_is_empty(),
+           "overflow list should be empty");
+    if (_span.contains(addr) &&
+        !_bit_map->isMarked(addr)) {
+      // mark bit map (object is now grey)
+      _bit_map->mark(addr);
+      // push on marking stack (stack should be empty), and drain the
+      // stack by applying this closure to the oops in the oops popped
+      // from the stack (i.e. blacken the grey objects)
+      bool res = _mark_stack->push(this_oop);
+      assert(res, "Should have space to push on empty stack");
+      do {
+        oop new_oop = _mark_stack->pop();
+        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
+        assert(new_oop->is_parsable(), "Found unparsable oop");
+        assert(_bit_map->isMarked((HeapWord*)new_oop),
+               "only grey objects on this stack");
+        // iterate over the oops in this oop, marking and pushing
+        // the ones in CMS heap (i.e. in _span).
+        new_oop->oop_iterate(&_pushAndMarkClosure);
+        // check if it's time to yield
+        do_yield_check();
+      } while (!_mark_stack->isEmpty() ||
+               (!_concurrent_precleaning && take_from_overflow_list()));
+        // if marking stack is empty, and we are not doing this
+        // during precleaning, then check the overflow list
+    }
+    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
+    assert(_collector->overflow_list_is_empty(),
+           "overflow list was drained above");
+    // We could restore evacuated mark words, if any, used for
+    // overflow list links here because the overflow list is
+    // provably empty here. That would reduce the maximum
+    // size requirements for preserved_{oop,mark}_stack.
+    // But we'll just postpone it until we are all done
+    // so we can just stream through.
+    if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
+      _collector->restore_preserved_marks_if_any();
+      assert(_collector->no_preserved_marks(), "No preserved marks");
+    }
+    assert(_concurrent_precleaning || _collector->no_preserved_marks(), "no preserved marks");
+    assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
+           "no preserved marks");
+  }
+}
+
+void MarkRefsIntoAndScanClosure::do_yield_work() {
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should hold CMS token");
+  assert_lock_strong(_freelistLock);
+  assert_lock_strong(_bit_map->lock());
+  // relinquish the free_list_lock and bitMaplock()
+  _bit_map->lock()->unlock();
+  _freelistLock->unlock();
+  ConcurrentMarkSweepThread::desynchronize(true);
+  ConcurrentMarkSweepThread::acknowledge_yield_request();
+  _collector->stopTimer();
+  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
+  if (PrintCMSStatistics != 0) {
+    _collector->incrementYields();
+  }
+  _collector->icms_wait();
+
+  // See the comment in coordinator_yield()
+  for (unsigned i = 0; i < CMSYieldSleepCount &&
+	               ConcurrentMarkSweepThread::should_yield() &&
+	               !CMSCollector::foregroundGCIsActive(); ++i) {
+    os::sleep(Thread::current(), 1, false);    
+    ConcurrentMarkSweepThread::acknowledge_yield_request();
+  }
+
+  ConcurrentMarkSweepThread::synchronize(true);
+  _freelistLock->lock_without_safepoint_check();
+  _bit_map->lock()->lock_without_safepoint_check();
+  _collector->startTimer();
+}
+
+///////////////////////////////////////////////////////////
+// Par_MarkRefsIntoAndScanClosure: a parallel version of
+//                                 MarkRefsIntoAndScanClosure
+///////////////////////////////////////////////////////////
+Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
+  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
+  CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack*  revisit_stack):
+  _span(span),
+  _bit_map(bit_map),
+  _work_queue(work_queue),
+  _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
+                       (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
+  _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
+                          revisit_stack)
+{
+  _ref_processor = rp;
+  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+}
+
+// This closure is used to mark refs into the CMS generation at the
+// second (final) checkpoint, and to scan and transitively follow
+// the unmarked oops. The marks are made in the marking bit map and
+// the work_queue is used for keeping the (newly) grey objects during
+// the scan phase whence they are also available for stealing by parallel
+// threads. Since the marking bit map is shared, updates are
+// synchronized (via CAS).
+void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
+  oop this_oop = *p;
+  if (this_oop != NULL) {
+    assert(this_oop->is_oop(), "expected an oop");
+    HeapWord* addr = (HeapWord*)this_oop;
+    if (_span.contains(addr) &&
+        !_bit_map->isMarked(addr)) {
+      // mark bit map (object will become grey):
+      // It is possible for several threads to be
+      // trying to "claim" this object concurrently;
+      // the unique thread that succeeds in marking the
+      // object first will do the subsequent push on
+      // to the work queue (or overflow list).
+      if (_bit_map->par_mark(addr)) {
+        // push on work_queue (which may not be empty), and trim the
+        // queue to an appropriate length by applying this closure to
+        // the oops in the oops popped from the stack (i.e. blacken the
+        // grey objects)
+        bool res = _work_queue->push(this_oop);
+        assert(res, "Low water mark should be less than capacity?");
+        trim_queue(_low_water_mark);
+      } // Else, another thread claimed the object
+    }
+  }
+}
+
+// This closure is used to rescan the marked objects on the dirty cards
+// in the mod union table and the card table proper.
+size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
+  oop p, MemRegion mr) {
+
+  size_t size = 0;
+  HeapWord* addr = (HeapWord*)p;
+  assert(_markStack->isEmpty(), "pre-condition (eager drainage)");
+  assert(_span.contains(addr), "we are scanning the CMS generation");
+  // check if it's time to yield
+  if (do_yield_check()) {
+    // We yielded for some foreground stop-world work,
+    // and we have been asked to abort this ongoing preclean cycle.
+    return 0;
+  }
+  if (_bitMap->isMarked(addr)) {
+    // it's marked; is it potentially uninitialized?
+    if (p->klass() != NULL) {
+      if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
+        // Signal precleaning to redirty the card since
+        // the klass pointer is already installed.
+        assert(size == 0, "Initial value");
+      } else {
+        assert(p->is_parsable(), "must be parsable.");
+        // an initialized object; ignore mark word in verification below
+        // since we are running concurrent with mutators
+        assert(p->is_oop(true), "should be an oop");
+        if (p->is_objArray()) {
+          // objArrays are precisely marked; restrict scanning
+          // to dirty cards only.
+          size = p->oop_iterate(_scanningClosure, mr);
+          assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
+                 "adjustObjectSize should be the identity for array sizes, "
+                 "which are necessarily larger than minimum object size of "
+                 "two heap words");
+        } else {
+          // A non-array may have been imprecisely marked; we need
+          // to scan object in its entirety.
+          size = CompactibleFreeListSpace::adjustObjectSize(
+                   p->oop_iterate(_scanningClosure));
+        }
+        #ifdef DEBUG
+          size_t direct_size =
+            CompactibleFreeListSpace::adjustObjectSize(p->size());
+          assert(size == direct_size, "Inconsistency in size");
+          assert(size >= 3, "Necessary for Printezis marks to work");
+          if (!_bitMap->isMarked(addr+1)) {
+            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
+          } else {
+            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
+            assert(_bitMap->isMarked(addr+size-1),
+                   "inconsistent Printezis mark");
+          }
+        #endif // DEBUG
+      }
+    } else {
+      // an unitialized object
+      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
+      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
+      size = pointer_delta(nextOneAddr + 1, addr);
+      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
+             "alignment problem");
+      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
+      // will dirty the card when the klass pointer is installed in the
+      // object (signalling the completion of initialization).
+    }
+  } else {
+    // Either a not yet marked object or an uninitialized object
+    if (p->klass() == NULL || !p->is_parsable()) {
+      // An uninitialized object, skip to the next card, since
+      // we may not be able to read its P-bits yet.
+      assert(size == 0, "Initial value");
+    } else {
+      // An object not (yet) reached by marking: we merely need to
+      // compute its size so as to go look at the next block.
+      assert(p->is_oop(true), "should be an oop"); 
+      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
+    }
+  }
+  assert(_markStack->isEmpty(), "post-condition (eager drainage)");
+  return size;
+}
+
+void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should hold CMS token");
+  assert_lock_strong(_freelistLock);
+  assert_lock_strong(_bitMap->lock());
+  // relinquish the free_list_lock and bitMaplock()
+  _bitMap->lock()->unlock();
+  _freelistLock->unlock();
+  ConcurrentMarkSweepThread::desynchronize(true);
+  ConcurrentMarkSweepThread::acknowledge_yield_request();
+  _collector->stopTimer();
+  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
+  if (PrintCMSStatistics != 0) {
+    _collector->incrementYields();
+  }
+  _collector->icms_wait();
+
+  // See the comment in coordinator_yield()
+  for (unsigned i = 0; i < CMSYieldSleepCount &&
+	               ConcurrentMarkSweepThread::should_yield() &&
+	               !CMSCollector::foregroundGCIsActive(); ++i) {
+    os::sleep(Thread::current(), 1, false);    
+    ConcurrentMarkSweepThread::acknowledge_yield_request();
+  }
+
+  ConcurrentMarkSweepThread::synchronize(true);
+  _freelistLock->lock_without_safepoint_check();
+  _bitMap->lock()->lock_without_safepoint_check();
+  _collector->startTimer();
+}
+
+
+//////////////////////////////////////////////////////////////////
+// SurvivorSpacePrecleanClosure
+//////////////////////////////////////////////////////////////////
+// This (single-threaded) closure is used to preclean the oops in
+// the survivor spaces.
+size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
+
+  HeapWord* addr = (HeapWord*)p;
+  assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
+  assert(!_span.contains(addr), "we are scanning the survivor spaces");
+  assert(p->klass() != NULL, "object should be initializd");
+  assert(p->is_parsable(), "must be parsable.");
+  // an initialized object; ignore mark word in verification below
+  // since we are running concurrent with mutators
+  assert(p->is_oop(true), "should be an oop");
+  // Note that we do not yield while we iterate over
+  // the interior oops of p, pushing the relevant ones
+  // on our marking stack.
+  size_t size = p->oop_iterate(_scanning_closure);
+  do_yield_check();
+  // Observe that below, we do not abandon the preclean
+  // phase as soon as we should; rather we empty the
+  // marking stack before returning. This is to satisfy
+  // some existing assertions. In general, it may be a
+  // good idea to abort immediately and complete the marking
+  // from the grey objects at a later time.
+  while (!_mark_stack->isEmpty()) {
+    oop new_oop = _mark_stack->pop();
+    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
+    assert(new_oop->is_parsable(), "Found unparsable oop");
+    assert(_bit_map->isMarked((HeapWord*)new_oop),
+           "only grey objects on this stack");
+    // iterate over the oops in this oop, marking and pushing
+    // the ones in CMS heap (i.e. in _span).
+    new_oop->oop_iterate(_scanning_closure);
+    // check if it's time to yield
+    do_yield_check();
+  }
+  unsigned int after_count =
+    GenCollectedHeap::heap()->total_collections();
+  bool abort = (_before_count != after_count) ||
+               _collector->should_abort_preclean();
+  return abort ? 0 : size;
+}
+
+void SurvivorSpacePrecleanClosure::do_yield_work() {
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should hold CMS token");
+  assert_lock_strong(_bit_map->lock());
+  // Relinquish the bit map lock
+  _bit_map->lock()->unlock();
+  ConcurrentMarkSweepThread::desynchronize(true);
+  ConcurrentMarkSweepThread::acknowledge_yield_request();
+  _collector->stopTimer();
+  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
+  if (PrintCMSStatistics != 0) {
+    _collector->incrementYields();
+  }
+  _collector->icms_wait();
+
+  // See the comment in coordinator_yield()
+  for (unsigned i = 0; i < CMSYieldSleepCount &&
+	               ConcurrentMarkSweepThread::should_yield() &&
+	               !CMSCollector::foregroundGCIsActive(); ++i) {
+    os::sleep(Thread::current(), 1, false);    
+    ConcurrentMarkSweepThread::acknowledge_yield_request();
+  }
+
+  ConcurrentMarkSweepThread::synchronize(true);
+  _bit_map->lock()->lock_without_safepoint_check();
+  _collector->startTimer();
+}
+
+// This closure is used to rescan the marked objects on the dirty cards
+// in the mod union table and the card table proper. In the parallel
+// case, although the bitMap is shared, we do a single read so the
+// isMarked() query is "safe".
+bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
+  // Ignore mark word because we are running concurrent with mutators
+  assert(p->is_oop_or_null(true), "expected an oop or null");
+  HeapWord* addr = (HeapWord*)p;
+  assert(_span.contains(addr), "we are scanning the CMS generation");
+  bool is_obj_array = false;
+  #ifdef DEBUG
+    if (!_parallel) {
+      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
+      assert(_collector->overflow_list_is_empty(),
+             "overflow list should be empty");
+
+    }
+  #endif // DEBUG
+  if (_bit_map->isMarked(addr)) {
+    // Obj arrays are precisely marked, non-arrays are not;
+    // so we scan objArrays precisely and non-arrays in their
+    // entirety.
+    if (p->is_objArray()) {
+      is_obj_array = true;
+      if (_parallel) {
+        p->oop_iterate(_par_scan_closure, mr);
+      } else {
+        p->oop_iterate(_scan_closure, mr);
+      }
+    } else {
+      if (_parallel) {
+        p->oop_iterate(_par_scan_closure);
+      } else {
+        p->oop_iterate(_scan_closure);
+      }
+    }
+  }
+  #ifdef DEBUG
+    if (!_parallel) {
+      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
+      assert(_collector->overflow_list_is_empty(),
+             "overflow list should be empty");
+
+    }
+  #endif // DEBUG
+  return is_obj_array;
+}
+
+MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
+                        MemRegion span,
+                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
+                        CMSMarkStack*  revisitStack,
+                        bool should_yield, bool verifying):
+  _collector(collector),
+  _span(span),
+  _bitMap(bitMap),
+  _mut(&collector->_modUnionTable),
+  _markStack(markStack),
+  _revisitStack(revisitStack),
+  _yield(should_yield),
+  _skipBits(0)
+{
+  assert(_markStack->isEmpty(), "stack should be empty");
+  _finger = _bitMap->startWord();
+  _threshold = _finger;
+  assert(_collector->_restart_addr == NULL, "Sanity check");
+  assert(_span.contains(_finger), "Out of bounds _finger?");
+  DEBUG_ONLY(_verifying = verifying;)
+}
+
+void MarkFromRootsClosure::reset(HeapWord* addr) {
+  assert(_markStack->isEmpty(), "would cause duplicates on stack");
+  assert(_span.contains(addr), "Out of bounds _finger?");
+  _finger = addr;
+  _threshold = (HeapWord*)round_to(
+                 (intptr_t)_finger, CardTableModRefBS::card_size);
+}
+
+// Should revisit to see if this should be restructured for
+// greater efficiency.
+void MarkFromRootsClosure::do_bit(size_t offset) {
+  if (_skipBits > 0) {
+    _skipBits--;
+    return;
+  }
+  // convert offset into a HeapWord*
+  HeapWord* addr = _bitMap->startWord() + offset;
+  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
+         "address out of range");
+  assert(_bitMap->isMarked(addr), "tautology");
+  if (_bitMap->isMarked(addr+1)) {
+    // this is an allocated but not yet initialized object
+    assert(_skipBits == 0, "tautology");
+    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
+    oop p = oop(addr);
+    if (p->klass() == NULL || !p->is_parsable()) {
+      DEBUG_ONLY(if (!_verifying) {)
+        // We re-dirty the cards on which this object lies and increase
+        // the _threshold so that we'll come back to scan this object
+        // during the preclean or remark phase. (CMSCleanOnEnter)
+        if (CMSCleanOnEnter) {
+          size_t sz = _collector->block_size_using_printezis_bits(addr);
+          HeapWord* start_card_addr = (HeapWord*)round_down(
+                                         (intptr_t)addr, CardTableModRefBS::card_size);
+          HeapWord* end_card_addr   = (HeapWord*)round_to(
+                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
+          MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr);
+          assert(!redirty_range.is_empty(), "Arithmetical tautology");
+          // Bump _threshold to end_card_addr; note that
+          // _threshold cannot possibly exceed end_card_addr, anyhow.
+          // This prevents future clearing of the card as the scan proceeds
+          // to the right.
+          assert(_threshold <= end_card_addr,
+                 "Because we are just scanning into this object");
+          if (_threshold < end_card_addr) {
+            _threshold = end_card_addr;
+          }
+          if (p->klass() != NULL) {
+            // Redirty the range of cards...
+            _mut->mark_range(redirty_range);
+          } // ...else the setting of klass will dirty the card anyway.
+        }
+      DEBUG_ONLY(})
+      return;
+    }
+  }
+  scanOopsInOop(addr);
+}
+
+// We take a break if we've been at this for a while,
+// so as to avoid monopolizing the locks involved.
+void MarkFromRootsClosure::do_yield_work() {
+  // First give up the locks, then yield, then re-lock
+  // We should probably use a constructor/destructor idiom to
+  // do this unlock/lock or modify the MutexUnlocker class to
+  // serve our purpose. XXX
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should hold CMS token");
+  assert_lock_strong(_bitMap->lock());
+  _bitMap->lock()->unlock();
+  ConcurrentMarkSweepThread::desynchronize(true);
+  ConcurrentMarkSweepThread::acknowledge_yield_request();
+  _collector->stopTimer();
+  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
+  if (PrintCMSStatistics != 0) {
+    _collector->incrementYields();
+  }
+  _collector->icms_wait();
+
+  // See the comment in coordinator_yield()
+  for (unsigned i = 0; i < CMSYieldSleepCount &&
+	               ConcurrentMarkSweepThread::should_yield() &&
+	               !CMSCollector::foregroundGCIsActive(); ++i) {
+    os::sleep(Thread::current(), 1, false);    
+    ConcurrentMarkSweepThread::acknowledge_yield_request();
+  }
+
+  ConcurrentMarkSweepThread::synchronize(true);
+  _bitMap->lock()->lock_without_safepoint_check();
+  _collector->startTimer();
+}
+
+void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
+  assert(_bitMap->isMarked(ptr), "expected bit to be set");
+  assert(_markStack->isEmpty(),
+         "should drain stack to limit stack usage");
+  // convert ptr to an oop preparatory to scanning
+  oop this_oop = oop(ptr);
+  // Ignore mark word in verification below, since we
+  // may be running concurrent with mutators.
+  assert(this_oop->is_oop(true), "should be an oop");
+  assert(_finger <= ptr, "_finger runneth ahead");
+  // advance the finger to right end of this object
+  _finger = ptr + this_oop->size();
+  assert(_finger > ptr, "we just incremented it above");
+  // On large heaps, it may take us some time to get through
+  // the marking phase (especially if running iCMS). During
+  // this time it's possible that a lot of mutations have
+  // accumulated in the card table and the mod union table --
+  // these mutation records are redundant until we have
+  // actually traced into the corresponding card.
+  // Here, we check whether advancing the finger would make
+  // us cross into a new card, and if so clear corresponding
+  // cards in the MUT (preclean them in the card-table in the
+  // future).
+
+  DEBUG_ONLY(if (!_verifying) {)
+    // The clean-on-enter optimization is disabled by default,
+    // until we fix 6178663.
+    if (CMSCleanOnEnter && (_finger > _threshold)) {
+      // [_threshold, _finger) represents the interval
+      // of cards to be cleared  in MUT (or precleaned in card table).
+      // The set of cards to be cleared is all those that overlap
+      // with the interval [_threshold, _finger); note that
+      // _threshold is always kept card-aligned but _finger isn't
+      // always card-aligned.
+      HeapWord* old_threshold = _threshold;
+      assert(old_threshold == (HeapWord*)round_to(
+              (intptr_t)old_threshold, CardTableModRefBS::card_size),
+             "_threshold should always be card-aligned");
+      _threshold = (HeapWord*)round_to(
+                     (intptr_t)_finger, CardTableModRefBS::card_size);
+      MemRegion mr(old_threshold, _threshold);
+      assert(!mr.is_empty(), "Control point invariant");
+      assert(_span.contains(mr), "Should clear within span");
+      // XXX When _finger crosses from old gen into perm gen
+      // we may be doing unnecessary cleaning; do better in the
+      // future by detecting that condition and clearing fewer
+      // MUT/CT entries.
+      _mut->clear_range(mr);
+    }
+  DEBUG_ONLY(})
+
+  // Note: the finger doesn't advance while we drain
+  // the stack below.
+  PushOrMarkClosure pushOrMarkClosure(_collector,
+                                      _span, _bitMap, _markStack,
+                                      _revisitStack,
+                                      _finger, this);
+  bool res = _markStack->push(this_oop);
+  assert(res, "Empty non-zero size stack should have space for single push");
+  while (!_markStack->isEmpty()) {
+    oop new_oop = _markStack->pop();
+    // Skip verifying header mark word below because we are
+    // running concurrent with mutators.
+    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
+    // now scan this oop's oops
+    new_oop->oop_iterate(&pushOrMarkClosure);
+    do_yield_check();
+  }
+  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
+}
+
+Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
+                       CMSCollector* collector, MemRegion span,
+                       CMSBitMap* bit_map,
+                       OopTaskQueue* work_queue,
+                       CMSMarkStack*  overflow_stack,
+                       CMSMarkStack*  revisit_stack,
+                       bool should_yield):
+  _collector(collector),
+  _whole_span(collector->_span),
+  _span(span),
+  _bit_map(bit_map),
+  _mut(&collector->_modUnionTable),
+  _work_queue(work_queue),
+  _overflow_stack(overflow_stack),
+  _revisit_stack(revisit_stack),
+  _yield(should_yield),
+  _skip_bits(0),
+  _task(task)
+{
+  assert(_work_queue->size() == 0, "work_queue should be empty");
+  _finger = span.start();
+  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
+  assert(_span.contains(_finger), "Out of bounds _finger?");
+}
+
+// Should revisit to see if this should be restructured for
+// greater efficiency.
+void Par_MarkFromRootsClosure::do_bit(size_t offset) {
+  if (_skip_bits > 0) {
+    _skip_bits--;
+    return;
+  }
+  // convert offset into a HeapWord*
+  HeapWord* addr = _bit_map->startWord() + offset;
+  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
+         "address out of range");
+  assert(_bit_map->isMarked(addr), "tautology");
+  if (_bit_map->isMarked(addr+1)) {
+    // this is an allocated object that might not yet be initialized
+    assert(_skip_bits == 0, "tautology");
+    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
+    oop p = oop(addr);
+    if (p->klass() == NULL || !p->is_parsable()) {
+      // in the case of Clean-on-Enter optimization, redirty card
+      // and avoid clearing card by increasing  the threshold.
+      return;
+    }
+  }
+  scan_oops_in_oop(addr);
+}
+
+void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
+  assert(_bit_map->isMarked(ptr), "expected bit to be set");
+  // Should we assert that our work queue is empty or
+  // below some drain limit?
+  assert(_work_queue->size() == 0,
+         "should drain stack to limit stack usage");
+  // convert ptr to an oop preparatory to scanning
+  oop this_oop = oop(ptr);
+  // Ignore mark word in verification below, since we
+  // may be running concurrent with mutators.
+  assert(this_oop->is_oop(true), "should be an oop");
+  assert(_finger <= ptr, "_finger runneth ahead");
+  // advance the finger to right end of this object
+  _finger = ptr + this_oop->size();
+  assert(_finger > ptr, "we just incremented it above");
+  // On large heaps, it may take us some time to get through
+  // the marking phase (especially if running iCMS). During
+  // this time it's possible that a lot of mutations have
+  // accumulated in the card table and the mod union table --
+  // these mutation records are redundant until we have
+  // actually traced into the corresponding card.
+  // Here, we check whether advancing the finger would make
+  // us cross into a new card, and if so clear corresponding
+  // cards in the MUT (preclean them in the card-table in the
+  // future).
+
+  // The clean-on-enter optimization is disabled by default,
+  // until we fix 6178663.
+  if (CMSCleanOnEnter && (_finger > _threshold)) {
+    // [_threshold, _finger) represents the interval
+    // of cards to be cleared  in MUT (or precleaned in card table).
+    // The set of cards to be cleared is all those that overlap
+    // with the interval [_threshold, _finger); note that
+    // _threshold is always kept card-aligned but _finger isn't
+    // always card-aligned.
+    HeapWord* old_threshold = _threshold;
+    assert(old_threshold == (HeapWord*)round_to(
+            (intptr_t)old_threshold, CardTableModRefBS::card_size),
+           "_threshold should always be card-aligned");
+    _threshold = (HeapWord*)round_to(
+                   (intptr_t)_finger, CardTableModRefBS::card_size);
+    MemRegion mr(old_threshold, _threshold);
+    assert(!mr.is_empty(), "Control point invariant");
+    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
+    // XXX When _finger crosses from old gen into perm gen
+    // we may be doing unnecessary cleaning; do better in the
+    // future by detecting that condition and clearing fewer
+    // MUT/CT entries.
+    _mut->clear_range(mr);
+  }
+
+  // Note: the local finger doesn't advance while we drain
+  // the stack below, but the global finger sure can and will.
+  HeapWord** gfa = _task->global_finger_addr();
+  Par_PushOrMarkClosure pushOrMarkClosure(_collector,
+                                      _span, _bit_map,
+                                      _work_queue,
+                                      _overflow_stack,
+                                      _revisit_stack,
+                                      _finger,
+                                      gfa, this);
+  bool res = _work_queue->push(this_oop);   // overflow could occur here
+  assert(res, "Will hold once we use workqueues");
+  while (true) {
+    oop new_oop;
+    if (!_work_queue->pop_local(new_oop)) {
+      // We emptied our work_queue; check if there's stuff that can
+      // be gotten from the overflow stack.
+      if (CMSConcMarkingTask::get_work_from_overflow_stack(
+            _overflow_stack, _work_queue)) {
+        do_yield_check();
+        continue;
+      } else {  // done
+        break;
+      }
+    }
+    // Skip verifying header mark word below because we are
+    // running concurrent with mutators.
+    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
+    // now scan this oop's oops
+    new_oop->oop_iterate(&pushOrMarkClosure);
+    do_yield_check();
+  }
+  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
+}
+
+// Yield in response to a request from VM Thread or
+// from mutators.
+void Par_MarkFromRootsClosure::do_yield_work() {
+  assert(_task != NULL, "sanity");
+  _task->yield();
+}
+
+// A variant of the above used for verifying CMS marking work.
+MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
+                        MemRegion span,
+                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
+                        CMSMarkStack*  mark_stack):
+  _collector(collector),
+  _span(span),
+  _verification_bm(verification_bm),
+  _cms_bm(cms_bm),
+  _mark_stack(mark_stack),
+  _pam_verify_closure(collector, span, verification_bm, cms_bm,
+                      mark_stack)
+{
+  assert(_mark_stack->isEmpty(), "stack should be empty");
+  _finger = _verification_bm->startWord();
+  assert(_collector->_restart_addr == NULL, "Sanity check");
+  assert(_span.contains(_finger), "Out of bounds _finger?");
+}
+
+void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
+  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
+  assert(_span.contains(addr), "Out of bounds _finger?");
+  _finger = addr;
+}
+
+// Should revisit to see if this should be restructured for
+// greater efficiency.
+void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
+  // convert offset into a HeapWord*
+  HeapWord* addr = _verification_bm->startWord() + offset;
+  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
+         "address out of range");
+  assert(_verification_bm->isMarked(addr), "tautology");
+  assert(_cms_bm->isMarked(addr), "tautology");
+
+  assert(_mark_stack->isEmpty(),
+         "should drain stack to limit stack usage");
+  // convert addr to an oop preparatory to scanning
+  oop this_oop = oop(addr);
+  assert(this_oop->is_oop(), "should be an oop");
+  assert(_finger <= addr, "_finger runneth ahead");
+  // advance the finger to right end of this object
+  _finger = addr + this_oop->size();
+  assert(_finger > addr, "we just incremented it above");
+  // Note: the finger doesn't advance while we drain
+  // the stack below.
+  bool res = _mark_stack->push(this_oop);
+  assert(res, "Empty non-zero size stack should have space for single push");
+  while (!_mark_stack->isEmpty()) {
+    oop new_oop = _mark_stack->pop();
+    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
+    // now scan this oop's oops
+    new_oop->oop_iterate(&_pam_verify_closure);
+  }
+  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
+}
+
+PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
+  CMSCollector* collector, MemRegion span,
+  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
+  CMSMarkStack*  mark_stack):
+  OopClosure(collector->ref_processor()),
+  _collector(collector),
+  _span(span),
+  _verification_bm(verification_bm),
+  _cms_bm(cms_bm),
+  _mark_stack(mark_stack)
+{ }
+
+
+// Upon stack overflow, we discard (part of) the stack,
+// remembering the least address amongst those discarded
+// in CMSCollector's _restart_address.
+void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
+  // Remember the least grey address discarded
+  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
+  _collector->lower_restart_addr(ra);
+  _mark_stack->reset();  // discard stack contents
+  _mark_stack->expand(); // expand the stack if possible
+}
+
+void PushAndMarkVerifyClosure::do_oop(oop* p) {
+  oop    this_oop = *p;
+  assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)this_oop;
+  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
+    // Oop lies in _span and isn't yet grey or black
+    _verification_bm->mark(addr);            // now grey
+    if (!_cms_bm->isMarked(addr)) {
+      oop(addr)->print();
+      gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
+      fatal("... aborting");
+    }
+
+    if (!_mark_stack->push(this_oop)) { // stack overflow
+      if (PrintCMSStatistics != 0) {
+        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
+                               SIZE_FORMAT, _mark_stack->capacity());
+      }
+      assert(_mark_stack->isFull(), "Else push should have succeeded");
+      handle_stack_overflow(addr);
+    }
+    // anything including and to the right of _finger
+    // will be scanned as we iterate over the remainder of the
+    // bit map
+  }
+}
+
+PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
+                     MemRegion span,
+                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
+                     CMSMarkStack*  revisitStack,
+                     HeapWord* finger, MarkFromRootsClosure* parent) :
+  OopClosure(collector->ref_processor()),
+  _collector(collector),
+  _span(span),
+  _bitMap(bitMap),
+  _markStack(markStack),
+  _revisitStack(revisitStack),
+  _finger(finger),
+  _parent(parent),
+  _should_remember_klasses(collector->cms_should_unload_classes())
+{ }
+
+Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
+                     MemRegion span,
+                     CMSBitMap* bit_map,
+                     OopTaskQueue* work_queue,
+                     CMSMarkStack*  overflow_stack,
+                     CMSMarkStack*  revisit_stack,
+                     HeapWord* finger,
+                     HeapWord** global_finger_addr,
+                     Par_MarkFromRootsClosure* parent) :
+  OopClosure(collector->ref_processor()),
+  _collector(collector),
+  _whole_span(collector->_span),
+  _span(span),
+  _bit_map(bit_map),
+  _work_queue(work_queue),
+  _overflow_stack(overflow_stack),
+  _revisit_stack(revisit_stack),
+  _finger(finger),
+  _global_finger_addr(global_finger_addr),
+  _parent(parent),
+  _should_remember_klasses(collector->cms_should_unload_classes())
+{ }
+
+
+void CMSCollector::lower_restart_addr(HeapWord* low) {
+  assert(_span.contains(low), "Out of bounds addr");
+  if (_restart_addr == NULL) {
+    _restart_addr = low;
+  } else {
+    _restart_addr = MIN2(_restart_addr, low);
+  }
+}
+
+// Upon stack overflow, we discard (part of) the stack,
+// remembering the least address amongst those discarded
+// in CMSCollector's _restart_address.
+void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
+  // Remember the least grey address discarded
+  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
+  _collector->lower_restart_addr(ra);
+  _markStack->reset();  // discard stack contents
+  _markStack->expand(); // expand the stack if possible
+}
+
+// Upon stack overflow, we discard (part of) the stack,
+// remembering the least address amongst those discarded
+// in CMSCollector's _restart_address.
+void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
+  // We need to do this under a mutex to prevent other
+  // workers from interfering with the expansion below.
+  MutexLockerEx ml(_overflow_stack->par_lock(),
+                   Mutex::_no_safepoint_check_flag);
+  // Remember the least grey address discarded
+  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
+  _collector->lower_restart_addr(ra);
+  _overflow_stack->reset();  // discard stack contents
+  _overflow_stack->expand(); // expand the stack if possible
+}
+
+
+void PushOrMarkClosure::do_oop(oop* p) {
+  oop    thisOop = *p;
+  // Ignore mark word because we are running concurrent with mutators.
+  assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)thisOop;
+  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
+    // Oop lies in _span and isn't yet grey or black
+    _bitMap->mark(addr);            // now grey
+    if (addr < _finger) {
+      // the bit map iteration has already either passed, or
+      // sampled, this bit in the bit map; we'll need to
+      // use the marking stack to scan this oop's oops.
+      bool simulate_overflow = false;
+      NOT_PRODUCT(
+        if (CMSMarkStackOverflowALot &&
+            _collector->simulate_overflow()) {
+          // simulate a stack overflow
+          simulate_overflow = true;
+        }
+      )
+      if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
+        if (PrintCMSStatistics != 0) {
+          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
+                                 SIZE_FORMAT, _markStack->capacity());
+        }
+        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
+        handle_stack_overflow(addr);
+      }
+    }
+    // anything including and to the right of _finger
+    // will be scanned as we iterate over the remainder of the
+    // bit map
+    do_yield_check();
+  }
+}
+
+void Par_PushOrMarkClosure::do_oop(oop* p) {
+  oop    this_oop = *p;
+  // Ignore mark word because we are running concurrent with mutators.
+  assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)this_oop;
+  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
+    // Oop lies in _span and isn't yet grey or black
+    // We read the global_finger (volatile read) strictly after marking oop
+    bool res = _bit_map->par_mark(addr);    // now grey
+    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
+    // Should we push this marked oop on our stack?
+    // -- if someone else marked it, nothing to do
+    // -- if target oop is above global finger nothing to do
+    // -- if target oop is in chunk and above local finger
+    //      then nothing to do
+    // -- else push on work queue
+    if (   !res       // someone else marked it, they will deal with it
+        || (addr >= *gfa)  // will be scanned in a later task
+        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
+      return;
+    }
+    // the bit map iteration has already either passed, or
+    // sampled, this bit in the bit map; we'll need to
+    // use the marking stack to scan this oop's oops.
+    bool simulate_overflow = false;
+    NOT_PRODUCT(
+      if (CMSMarkStackOverflowALot &&
+          _collector->simulate_overflow()) {
+        // simulate a stack overflow
+        simulate_overflow = true;
+      }
+    )
+    if (simulate_overflow ||
+        !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+      // stack overflow
+      if (PrintCMSStatistics != 0) {
+        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
+                               SIZE_FORMAT, _overflow_stack->capacity());
+      }
+      // We cannot assert that the overflow stack is full because
+      // it may have been emptied since.
+      assert(simulate_overflow ||
+             _work_queue->size() == _work_queue->max_elems(),
+            "Else push should have succeeded");
+      handle_stack_overflow(addr);
+    }
+    do_yield_check();
+  }
+}
+
+
+PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
+                                       MemRegion span,
+                                       ReferenceProcessor* rp,
+                                       CMSBitMap* bit_map,
+                                       CMSBitMap* mod_union_table,
+                                       CMSMarkStack*  mark_stack,
+                                       CMSMarkStack*  revisit_stack,
+                                       bool           concurrent_precleaning):
+  OopClosure(rp),
+  _collector(collector),
+  _span(span),
+  _bit_map(bit_map),
+  _mod_union_table(mod_union_table),
+  _mark_stack(mark_stack),
+  _revisit_stack(revisit_stack),
+  _concurrent_precleaning(concurrent_precleaning),
+  _should_remember_klasses(collector->cms_should_unload_classes())
+{
+  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+}
+
+// Grey object rescan during pre-cleaning and second checkpoint phases --
+// the non-parallel version (the parallel version appears further below.)
+void PushAndMarkClosure::do_oop(oop* p) {
+  oop    this_oop = *p;
+  // If _concurrent_precleaning, ignore mark word verification
+  assert(this_oop->is_oop_or_null(_concurrent_precleaning),
+         "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)this_oop;
+  // Check if oop points into the CMS generation
+  // and is not marked
+  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
+    // a white object ...
+    _bit_map->mark(addr);         // ... now grey
+    // push on the marking stack (grey set)
+    bool simulate_overflow = false;
+    NOT_PRODUCT(
+      if (CMSMarkStackOverflowALot &&
+          _collector->simulate_overflow()) {
+        // simulate a stack overflow
+        simulate_overflow = true;
+      }
+    )
+    if (simulate_overflow || !_mark_stack->push(this_oop)) {
+      if (_concurrent_precleaning) {
+         // During precleaning we can just dirty the appropriate card
+         // in the mod union table, thus ensuring that the object remains
+         // in the grey set  and continue. Note that no one can be intefering
+         // with us in this action of dirtying the mod union table, so
+         // no locking is required.
+         _mod_union_table->mark(addr);
+         _collector->_ser_pmc_preclean_ovflw++;
+      } else {
+         // During the remark phase, we need to remember this oop
+         // in the overflow list.
+         _collector->push_on_overflow_list(this_oop);
+         _collector->_ser_pmc_remark_ovflw++;
+      }
+    }
+  }
+}
+
+Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
+                                               MemRegion span,
+                                               ReferenceProcessor* rp,
+                                               CMSBitMap* bit_map,
+                                               OopTaskQueue* work_queue,
+                                               CMSMarkStack* revisit_stack):
+  OopClosure(rp),
+  _collector(collector),
+  _span(span),
+  _bit_map(bit_map),
+  _work_queue(work_queue),
+  _revisit_stack(revisit_stack),
+  _should_remember_klasses(collector->cms_should_unload_classes())
+{
+  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+}
+
+// Grey object rescan during second checkpoint phase --
+// the parallel version.
+void Par_PushAndMarkClosure::do_oop(oop* p) {
+  oop    this_oop = *p;
+  assert(this_oop->is_oop_or_null(),
+         "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)this_oop;
+  // Check if oop points into the CMS generation 
+  // and is not marked
+  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
+    // a white object ...
+    // If we manage to "claim" the object, by being the
+    // first thread to mark it, then we push it on our
+    // marking stack
+    if (_bit_map->par_mark(addr)) {     // ... now grey
+      // push on work queue (grey set)
+      bool simulate_overflow = false;
+      NOT_PRODUCT(
+        if (CMSMarkStackOverflowALot &&
+            _collector->par_simulate_overflow()) {
+          // simulate a stack overflow
+          simulate_overflow = true;
+        }
+      )
+      if (simulate_overflow || !_work_queue->push(this_oop)) {
+        _collector->par_push_on_overflow_list(this_oop);
+        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
+      }
+    } // Else, some other thread got there first
+  }
+}
+
+void PushAndMarkClosure::remember_klass(Klass* k) {
+  if (!_revisit_stack->push(oop(k))) {
+    fatal("Revisit stack overflowed in PushAndMarkClosure");
+  }
+}
+
+void Par_PushAndMarkClosure::remember_klass(Klass* k) {
+  if (!_revisit_stack->par_push(oop(k))) {
+    fatal("Revist stack overflowed in Par_PushAndMarkClosure");
+  }
+}
+
+void CMSPrecleanRefsYieldClosure::do_yield_work() {
+  Mutex* bml = _collector->bitMapLock();
+  assert_lock_strong(bml);
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should hold CMS token");
+
+  bml->unlock();
+  ConcurrentMarkSweepThread::desynchronize(true);
+
+  ConcurrentMarkSweepThread::acknowledge_yield_request();
+
+  _collector->stopTimer();
+  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
+  if (PrintCMSStatistics != 0) {
+    _collector->incrementYields();
+  }
+  _collector->icms_wait();
+
+  // See the comment in coordinator_yield()
+  for (unsigned i = 0; i < CMSYieldSleepCount &&
+	               ConcurrentMarkSweepThread::should_yield() &&
+	               !CMSCollector::foregroundGCIsActive(); ++i) {
+    os::sleep(Thread::current(), 1, false);    
+    ConcurrentMarkSweepThread::acknowledge_yield_request();
+  }
+
+  ConcurrentMarkSweepThread::synchronize(true);
+  bml->lock();
+
+  _collector->startTimer();
+}
+
+bool CMSPrecleanRefsYieldClosure::should_return() {
+  if (ConcurrentMarkSweepThread::should_yield()) {
+    do_yield_work();
+  }
+  return _collector->foregroundGCIsActive();
+}
+
+void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
+  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
+         "mr should be aligned to start at a card boundary");
+  // We'd like to assert:
+  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
+  //        "mr should be a range of cards");
+  // However, that would be too strong in one case -- the last
+  // partition ends at _unallocated_block which, in general, can be
+  // an arbitrary boundary, not necessarily card aligned.
+  if (PrintCMSStatistics != 0) {
+    _num_dirty_cards +=
+         mr.word_size()/CardTableModRefBS::card_size_in_words;
+  }
+  _space->object_iterate_mem(mr, &_scan_cl);
+}
+
+SweepClosure::SweepClosure(CMSCollector* collector,
+                           ConcurrentMarkSweepGeneration* g,
+                           CMSBitMap* bitMap, bool should_yield) :
+  _collector(collector),
+  _g(g),
+  _sp(g->cmsSpace()),
+  _limit(_sp->sweep_limit()),
+  _freelistLock(_sp->freelistLock()),
+  _bitMap(bitMap),
+  _yield(should_yield),
+  _inFreeRange(false),           // No free range at beginning of sweep
+  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
+  _lastFreeRangeCoalesced(false),
+  _freeFinger(g->used_region().start())
+{
+  NOT_PRODUCT(
+    _numObjectsFreed = 0;
+    _numWordsFreed   = 0;
+    _numObjectsLive = 0;
+    _numWordsLive = 0;
+    _numObjectsAlreadyFree = 0;
+    _numWordsAlreadyFree = 0;
+    _last_fc = NULL;
+
+    _sp->initializeIndexedFreeListArrayReturnedBytes();
+    _sp->dictionary()->initializeDictReturnedBytes();
+  )
+  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
+         "sweep _limit out of bounds");
+  if (CMSTraceSweeper) {
+    gclog_or_tty->print("\n====================\nStarting new sweep\n");
+  }
+}
+
+// We need this destructor to reclaim any space at the end
+// of the space, which do_blk below may not have added back to
+// the free lists. [basically dealing with the "fringe effect"]
+SweepClosure::~SweepClosure() {
+  assert_lock_strong(_freelistLock);
+  // this should be treated as the end of a free run if any
+  // The current free range should be returned to the free lists
+  // as one coalesced chunk.
+  if (inFreeRange()) {
+    flushCurFreeChunk(freeFinger(), 
+      pointer_delta(_limit, freeFinger()));
+    assert(freeFinger() < _limit, "the finger pointeth off base");
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print("destructor:");
+      gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
+                 "[coalesced:"SIZE_FORMAT"]\n",
+                 freeFinger(), pointer_delta(_limit, freeFinger()),
+                 lastFreeRangeCoalesced());
+    }
+  }
+  NOT_PRODUCT(
+    if (Verbose && PrintGC) {
+      gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
+                          SIZE_FORMAT " bytes",
+                 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
+      gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
+                             SIZE_FORMAT" bytes  "
+	"Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
+	_numObjectsLive, _numWordsLive*sizeof(HeapWord), 
+	_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
+      size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
+	sizeof(HeapWord);
+      gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
+
+      if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
+        size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
+        size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
+        size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
+        gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
+        gclog_or_tty->print("	Indexed List Returned "SIZE_FORMAT" bytes", 
+  	  indexListReturnedBytes);
+        gclog_or_tty->print_cr("	Dictionary Returned "SIZE_FORMAT" bytes",
+  	  dictReturnedBytes);
+      }
+    }
+  )
+  // Now, in debug mode, just null out the sweep_limit
+  NOT_PRODUCT(_sp->clear_sweep_limit();)
+  if (CMSTraceSweeper) {
+    gclog_or_tty->print("end of sweep\n================\n");
+  }
+}
+
+void SweepClosure::initialize_free_range(HeapWord* freeFinger, 
+    bool freeRangeInFreeLists) {
+  if (CMSTraceSweeper) {
+    gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
+               freeFinger, _sp->block_size(freeFinger),
+	       freeRangeInFreeLists);
+  }
+  assert(!inFreeRange(), "Trampling existing free range");
+  set_inFreeRange(true);
+  set_lastFreeRangeCoalesced(false);
+
+  set_freeFinger(freeFinger);
+  set_freeRangeInFreeLists(freeRangeInFreeLists);
+  if (CMSTestInFreeList) {
+    if (freeRangeInFreeLists) { 
+      FreeChunk* fc = (FreeChunk*) freeFinger;
+      assert(fc->isFree(), "A chunk on the free list should be free.");
+      assert(fc->size() > 0, "Free range should have a size");
+      assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
+    }
+  }
+}
+
+// Note that the sweeper runs concurrently with mutators. Thus,
+// it is possible for direct allocation in this generation to happen
+// in the middle of the sweep. Note that the sweeper also coalesces
+// contiguous free blocks. Thus, unless the sweeper and the allocator
+// synchronize appropriately freshly allocated blocks may get swept up.
+// This is accomplished by the sweeper locking the free lists while
+// it is sweeping. Thus blocks that are determined to be free are
+// indeed free. There is however one additional complication:
+// blocks that have been allocated since the final checkpoint and
+// mark, will not have been marked and so would be treated as
+// unreachable and swept up. To prevent this, the allocator marks
+// the bit map when allocating during the sweep phase. This leads,
+// however, to a further complication -- objects may have been allocated
+// but not yet initialized -- in the sense that the header isn't yet
+// installed. The sweeper can not then determine the size of the block
+// in order to skip over it. To deal with this case, we use a technique
+// (due to Printezis) to encode such uninitialized block sizes in the
+// bit map. Since the bit map uses a bit per every HeapWord, but the
+// CMS generation has a minimum object size of 3 HeapWords, it follows
+// that "normal marks" won't be adjacent in the bit map (there will
+// always be at least two 0 bits between successive 1 bits). We make use
+// of these "unused" bits to represent uninitialized blocks -- the bit
+// corresponding to the start of the uninitialized object and the next
+// bit are both set. Finally, a 1 bit marks the end of the object that
+// started with the two consecutive 1 bits to indicate its potentially
+// uninitialized state.
+
+size_t SweepClosure::do_blk_careful(HeapWord* addr) {
+  FreeChunk* fc = (FreeChunk*)addr;
+  size_t res;
+
+  // check if we are done sweepinrg
+  if (addr == _limit) { // we have swept up to the limit, do nothing more
+    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
+           "sweep _limit out of bounds");
+    // help the closure application finish
+    return pointer_delta(_sp->end(), _limit);
+  }
+  assert(addr <= _limit, "sweep invariant");
+
+  // check if we should yield
+  do_yield_check(addr);
+  if (fc->isFree()) {
+    // Chunk that is already free
+    res = fc->size();
+    doAlreadyFreeChunk(fc);
+    debug_only(_sp->verifyFreeLists());
+    assert(res == fc->size(), "Don't expect the size to change");
+    NOT_PRODUCT(
+      _numObjectsAlreadyFree++;
+      _numWordsAlreadyFree += res;
+    )
+    NOT_PRODUCT(_last_fc = fc;)
+  } else if (!_bitMap->isMarked(addr)) {
+    // Chunk is fresh garbage
+    res = doGarbageChunk(fc);
+    debug_only(_sp->verifyFreeLists());
+    NOT_PRODUCT(
+      _numObjectsFreed++;
+      _numWordsFreed += res;
+    )
+  } else {
+    // Chunk that is alive.
+    res = doLiveChunk(fc);
+    debug_only(_sp->verifyFreeLists());
+    NOT_PRODUCT(
+	_numObjectsLive++;
+	_numWordsLive += res;
+    )
+  }
+  return res;
+}
+
+// For the smart allocation, record following
+//  split deaths - a free chunk is removed from its free list because
+//	it is being split into two or more chunks.
+//  split birth - a free chunk is being added to its free list because
+//	a larger free chunk has been split and resulted in this free chunk.
+//  coal death - a free chunk is being removed from its free list because
+//	it is being coalesced into a large free chunk.
+//  coal birth - a free chunk is being added to its free list because
+//	it was created when two or more free chunks where coalesced into
+//	this free chunk.
+//
+// These statistics are used to determine the desired number of free
+// chunks of a given size.  The desired number is chosen to be relative
+// to the end of a CMS sweep.  The desired number at the end of a sweep
+// is the 
+// 	count-at-end-of-previous-sweep (an amount that was enough)
+//		- count-at-beginning-of-current-sweep  (the excess)
+//		+ split-births  (gains in this size during interval)
+//		- split-deaths  (demands on this size during interval)
+// where the interval is from the end of one sweep to the end of the
+// next.
+//
+// When sweeping the sweeper maintains an accumulated chunk which is
+// the chunk that is made up of chunks that have been coalesced.  That
+// will be termed the left-hand chunk.  A new chunk of garbage that
+// is being considered for coalescing will be referred to as the
+// right-hand chunk.
+//
+// When making a decision on whether to coalesce a right-hand chunk with
+// the current left-hand chunk, the current count vs. the desired count
+// of the left-hand chunk is considered.  Also if the right-hand chunk
+// is near the large chunk at the end of the heap (see 
+// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the 
+// left-hand chunk is coalesced.
+//
+// When making a decision about whether to split a chunk, the desired count
+// vs. the current count of the candidate to be split is also considered.
+// If the candidate is underpopulated (currently fewer chunks than desired)
+// a chunk of an overpopulated (currently more chunks than desired) size may 
+// be chosen.  The "hint" associated with a free list, if non-null, points
+// to a free list which may be overpopulated.  
+//
+
+void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
+  size_t size = fc->size();
+  // Chunks that cannot be coalesced are not in the
+  // free lists.
+  if (CMSTestInFreeList && !fc->cantCoalesce()) {
+    assert(_sp->verifyChunkInFreeLists(fc), 
+      "free chunk should be in free lists");
+  }
+  // a chunk that is already free, should not have been
+  // marked in the bit map
+  HeapWord* addr = (HeapWord*) fc;
+  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
+  // Verify that the bit map has no bits marked between
+  // addr and purported end of this block.
+  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
+
+  // Some chunks cannot be coalesced in under any circumstances.  
+  // See the definition of cantCoalesce().
+  if (!fc->cantCoalesce()) {
+    // This chunk can potentially be coalesced.
+    if (_sp->adaptive_freelists()) {
+      // All the work is done in 
+      doPostIsFreeOrGarbageChunk(fc, size);
+    } else {  // Not adaptive free lists
+      // this is a free chunk that can potentially be coalesced by the sweeper;
+      if (!inFreeRange()) {
+        // if the next chunk is a free block that can't be coalesced
+        // it doesn't make sense to remove this chunk from the free lists
+        FreeChunk* nextChunk = (FreeChunk*)(addr + size);
+        assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
+        if ((HeapWord*)nextChunk < _limit  &&    // there's a next chunk...
+            nextChunk->isFree()    &&            // which is free...
+            nextChunk->cantCoalesce()) {         // ... but cant be coalesced
+          // nothing to do
+        } else {
+          // Potentially the start of a new free range:
+	  // Don't eagerly remove it from the free lists.  
+	  // No need to remove it if it will just be put
+	  // back again.  (Also from a pragmatic point of view
+	  // if it is a free block in a region that is beyond
+	  // any allocated blocks, an assertion will fail)
+          // Remember the start of a free run.
+          initialize_free_range(addr, true);
+          // end - can coalesce with next chunk
+        }
+      } else {
+        // the midst of a free range, we are coalescing
+        debug_only(record_free_block_coalesced(fc);)
+        if (CMSTraceSweeper) { 
+          gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
+        }
+        // remove it from the free lists
+        _sp->removeFreeChunkFromFreeLists(fc);
+        set_lastFreeRangeCoalesced(true);
+        // If the chunk is being coalesced and the current free range is
+        // in the free lists, remove the current free range so that it
+        // will be returned to the free lists in its entirety - all
+        // the coalesced pieces included.
+        if (freeRangeInFreeLists()) {
+	  FreeChunk* ffc = (FreeChunk*) freeFinger();
+	  assert(ffc->size() == pointer_delta(addr, freeFinger()),
+	    "Size of free range is inconsistent with chunk size.");
+	  if (CMSTestInFreeList) {
+            assert(_sp->verifyChunkInFreeLists(ffc),
+	      "free range is not in free lists");
+	  }
+          _sp->removeFreeChunkFromFreeLists(ffc);
+	  set_freeRangeInFreeLists(false);
+        }
+      }
+    }
+  } else {
+    // Code path common to both original and adaptive free lists.
+
+    // cant coalesce with previous block; this should be treated
+    // as the end of a free run if any
+    if (inFreeRange()) {
+      // we kicked some butt; time to pick up the garbage
+      assert(freeFinger() < addr, "the finger pointeth off base");
+      flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
+    }
+    // else, nothing to do, just continue
+  }
+}
+
+size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
+  // This is a chunk of garbage.  It is not in any free list.
+  // Add it to a free list or let it possibly be coalesced into
+  // a larger chunk.
+  HeapWord* addr = (HeapWord*) fc;
+  size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
+
+  if (_sp->adaptive_freelists()) {
+    // Verify that the bit map has no bits marked between
+    // addr and purported end of just dead object.
+    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
+
+    doPostIsFreeOrGarbageChunk(fc, size);
+  } else {
+    if (!inFreeRange()) {
+      // start of a new free range
+      assert(size > 0, "A free range should have a size");
+      initialize_free_range(addr, false);
+
+    } else {
+      // this will be swept up when we hit the end of the
+      // free range
+      if (CMSTraceSweeper) {
+        gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
+      }
+      // If the chunk is being coalesced and the current free range is
+      // in the free lists, remove the current free range so that it
+      // will be returned to the free lists in its entirety - all
+      // the coalesced pieces included.
+      if (freeRangeInFreeLists()) {
+	FreeChunk* ffc = (FreeChunk*)freeFinger();
+	assert(ffc->size() == pointer_delta(addr, freeFinger()),
+	  "Size of free range is inconsistent with chunk size.");
+	if (CMSTestInFreeList) {
+          assert(_sp->verifyChunkInFreeLists(ffc),
+	    "free range is not in free lists");
+	}
+        _sp->removeFreeChunkFromFreeLists(ffc);
+	set_freeRangeInFreeLists(false);
+      }
+      set_lastFreeRangeCoalesced(true);
+    }
+    // this will be swept up when we hit the end of the free range
+
+    // Verify that the bit map has no bits marked between
+    // addr and purported end of just dead object.
+    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
+  }
+  return size;
+}
+
+size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
+  HeapWord* addr = (HeapWord*) fc;
+  // The sweeper has just found a live object. Return any accumulated
+  // left hand chunk to the free lists.
+  if (inFreeRange()) {
+    if (_sp->adaptive_freelists()) {
+      flushCurFreeChunk(freeFinger(),
+                        pointer_delta(addr, freeFinger()));
+    } else { // not adaptive freelists
+      set_inFreeRange(false);
+      // Add the free range back to the free list if it is not already
+      // there.
+      if (!freeRangeInFreeLists()) {
+        assert(freeFinger() < addr, "the finger pointeth off base");
+        if (CMSTraceSweeper) {
+          gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
+            "[coalesced:%d]\n",
+            freeFinger(), pointer_delta(addr, freeFinger()),
+            lastFreeRangeCoalesced());
+        }
+        _sp->addChunkAndRepairOffsetTable(freeFinger(),
+          pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
+      }
+    }
+  }
+
+  // Common code path for original and adaptive free lists.
+
+  // this object is live: we'd normally expect this to be
+  // an oop, and like to assert the following:
+  // assert(oop(addr)->is_oop(), "live block should be an oop");
+  // However, as we commented above, this may be an object whose
+  // header hasn't yet been initialized.
+  size_t size;
+  assert(_bitMap->isMarked(addr), "Tautology for this control point");
+  if (_bitMap->isMarked(addr + 1)) {
+    // Determine the size from the bit map, rather than trying to
+    // compute it from the object header.
+    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
+    size = pointer_delta(nextOneAddr + 1, addr);
+    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
+           "alignment problem");
+
+    #ifdef DEBUG
+      if (oop(addr)->klass() != NULL &&
+          (   !_collector->cms_should_unload_classes()
+           || oop(addr)->is_parsable())) {
+        // Ignore mark word because we are running concurrent with mutators
+        assert(oop(addr)->is_oop(true), "live block should be an oop");
+        assert(size ==
+               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
+               "P-mark and computed size do not agree");
+      }
+    #endif
+
+  } else {
+    // This should be an initialized object that's alive.
+    assert(oop(addr)->klass() != NULL &&
+           (!_collector->cms_should_unload_classes()
+            || oop(addr)->is_parsable()),
+           "Should be an initialized object");
+    // Ignore mark word because we are running concurrent with mutators
+    assert(oop(addr)->is_oop(true), "live block should be an oop");
+    // Verify that the bit map has no bits marked between
+    // addr and purported end of this block.
+    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
+    assert(size >= 3, "Necessary for Printezis marks to work");
+    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
+    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
+  }
+  return size;
+}
+
+void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc, 
+					    size_t chunkSize) { 
+  // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
+  // scheme.
+  bool fcInFreeLists = fc->isFree();
+  assert(_sp->adaptive_freelists(), "Should only be used in this case.");
+  assert((HeapWord*)fc <= _limit, "sweep invariant");
+  if (CMSTestInFreeList && fcInFreeLists) {
+    assert(_sp->verifyChunkInFreeLists(fc), 
+      "free chunk is not in free lists");
+  }
+  
+ 
+  if (CMSTraceSweeper) {
+    gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
+  }
+
+  HeapWord* addr = (HeapWord*) fc;
+
+  bool coalesce;
+  size_t left  = pointer_delta(addr, freeFinger());
+  size_t right = chunkSize;
+  switch (FLSCoalescePolicy) {
+    // numeric value forms a coalition aggressiveness metric
+    case 0:  { // never coalesce
+      coalesce = false;
+      break;
+    } 
+    case 1: { // coalesce if left & right chunks on overpopulated lists
+      coalesce = _sp->coalOverPopulated(left) &&
+                 _sp->coalOverPopulated(right);
+      break;
+    }
+    case 2: { // coalesce if left chunk on overpopulated list (default)
+      coalesce = _sp->coalOverPopulated(left);
+      break;
+    }
+    case 3: { // coalesce if left OR right chunk on overpopulated list
+      coalesce = _sp->coalOverPopulated(left) || 
+                 _sp->coalOverPopulated(right);
+      break;
+    }
+    case 4: { // always coalesce
+      coalesce = true;
+      break;
+    }
+    default:
+     ShouldNotReachHere();
+  }
+
+  // Should the current free range be coalesced?
+  // If the chunk is in a free range and either we decided to coalesce above
+  // or the chunk is near the large block at the end of the heap
+  // (isNearLargestChunk() returns true), then coalesce this chunk.
+  bool doCoalesce = inFreeRange() &&
+    (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
+  if (doCoalesce) {
+    // Coalesce the current free range on the left with the new
+    // chunk on the right.  If either is on a free list,
+    // it must be removed from the list and stashed in the closure.
+    if (freeRangeInFreeLists()) {
+      FreeChunk* ffc = (FreeChunk*)freeFinger();
+      assert(ffc->size() == pointer_delta(addr, freeFinger()),
+        "Size of free range is inconsistent with chunk size.");
+      if (CMSTestInFreeList) {
+        assert(_sp->verifyChunkInFreeLists(ffc),
+	  "Chunk is not in free lists");
+      }
+      _sp->coalDeath(ffc->size());
+      _sp->removeFreeChunkFromFreeLists(ffc);
+      set_freeRangeInFreeLists(false);
+    }
+    if (fcInFreeLists) {
+      _sp->coalDeath(chunkSize);
+      assert(fc->size() == chunkSize, 
+	"The chunk has the wrong size or is not in the free lists");
+      _sp->removeFreeChunkFromFreeLists(fc);
+    }
+    set_lastFreeRangeCoalesced(true);
+  } else {  // not in a free range and/or should not coalesce
+    // Return the current free range and start a new one.
+    if (inFreeRange()) {
+      // In a free range but cannot coalesce with the right hand chunk.
+      // Put the current free range into the free lists.
+      flushCurFreeChunk(freeFinger(), 
+	pointer_delta(addr, freeFinger()));
+    }
+    // Set up for new free range.  Pass along whether the right hand
+    // chunk is in the free lists.
+    initialize_free_range((HeapWord*)fc, fcInFreeLists);
+  }
+}
+void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
+  assert(inFreeRange(), "Should only be called if currently in a free range.");
+  assert(size > 0, 
+    "A zero sized chunk cannot be added to the free lists.");
+  if (!freeRangeInFreeLists()) {
+    if(CMSTestInFreeList) {
+      FreeChunk* fc = (FreeChunk*) chunk;
+      fc->setSize(size);
+      assert(!_sp->verifyChunkInFreeLists(fc),
+	"chunk should not be in free lists yet");
+    }
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
+                    chunk, size);
+    }
+    // A new free range is going to be starting.  The current
+    // free range has not been added to the free lists yet or
+    // was removed so add it back.
+    // If the current free range was coalesced, then the death
+    // of the free range was recorded.  Record a birth now.
+    if (lastFreeRangeCoalesced()) {
+      _sp->coalBirth(size);
+    }
+    _sp->addChunkAndRepairOffsetTable(chunk, size,
+	    lastFreeRangeCoalesced());
+  }
+  set_inFreeRange(false);
+  set_freeRangeInFreeLists(false);
+}
+
+// We take a break if we've been at this for a while,
+// so as to avoid monopolizing the locks involved.
+void SweepClosure::do_yield_work(HeapWord* addr) {
+  // Return current free chunk being used for coalescing (if any)
+  // to the appropriate freelist.  After yielding, the next
+  // free block encountered will start a coalescing range of
+  // free blocks.  If the next free block is adjacent to the
+  // chunk just flushed, they will need to wait for the next
+  // sweep to be coalesced.
+  if (inFreeRange()) {
+    flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
+  }
+
+  // First give up the locks, then yield, then re-lock.
+  // We should probably use a constructor/destructor idiom to
+  // do this unlock/lock or modify the MutexUnlocker class to
+  // serve our purpose. XXX
+  assert_lock_strong(_bitMap->lock());
+  assert_lock_strong(_freelistLock);
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+         "CMS thread should hold CMS token");
+  _bitMap->lock()->unlock();
+  _freelistLock->unlock();
+  ConcurrentMarkSweepThread::desynchronize(true);
+  ConcurrentMarkSweepThread::acknowledge_yield_request();
+  _collector->stopTimer();
+  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
+  if (PrintCMSStatistics != 0) {
+    _collector->incrementYields();
+  }
+  _collector->icms_wait();
+
+  // See the comment in coordinator_yield()
+  for (unsigned i = 0; i < CMSYieldSleepCount &&
+	               ConcurrentMarkSweepThread::should_yield() &&
+	               !CMSCollector::foregroundGCIsActive(); ++i) {
+    os::sleep(Thread::current(), 1, false);    
+    ConcurrentMarkSweepThread::acknowledge_yield_request();
+  }
+
+  ConcurrentMarkSweepThread::synchronize(true);
+  _freelistLock->lock();
+  _bitMap->lock()->lock_without_safepoint_check();
+  _collector->startTimer();
+}
+
+#ifndef PRODUCT
+// This is actually very useful in a product build if it can
+// be called from the debugger.  Compile it into the product
+// as needed.
+bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
+  return debug_cms_space->verifyChunkInFreeLists(fc);
+}
+
+void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
+  if (CMSTraceSweeper) {
+    gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
+  }
+}
+#endif
+
+// CMSIsAliveClosure
+bool CMSIsAliveClosure::do_object_b(oop obj) {
+  HeapWord* addr = (HeapWord*)obj;
+  return addr != NULL &&
+         (!_span.contains(addr) || _bit_map->isMarked(addr));
+} 
+
+// CMSKeepAliveClosure: the serial version
+void CMSKeepAliveClosure::do_oop(oop* p) {
+  oop this_oop = *p;
+  HeapWord* addr = (HeapWord*)this_oop;
+  if (_span.contains(addr) &&
+      !_bit_map->isMarked(addr)) {
+    _bit_map->mark(addr);
+    bool simulate_overflow = false;
+    NOT_PRODUCT(
+      if (CMSMarkStackOverflowALot &&
+          _collector->simulate_overflow()) {
+        // simulate a stack overflow
+        simulate_overflow = true;
+      }
+    )
+    if (simulate_overflow || !_mark_stack->push(this_oop)) {
+      _collector->push_on_overflow_list(this_oop);
+      _collector->_ser_kac_ovflw++;
+    }
+  }
+}
+
+// CMSParKeepAliveClosure: a parallel version of the above.
+// The work queues are private to each closure (thread),
+// but (may be) available for stealing by other threads.
+void CMSParKeepAliveClosure::do_oop(oop* p) {
+  oop this_oop = *p;
+  HeapWord* addr = (HeapWord*)this_oop;
+  if (_span.contains(addr) &&
+      !_bit_map->isMarked(addr)) {
+    // In general, during recursive tracing, several threads
+    // may be concurrently getting here; the first one to
+    // "tag" it, claims it.
+    if (_bit_map->par_mark(addr)) { 
+      bool res = _work_queue->push(this_oop);
+      assert(res, "Low water mark should be much less than capacity");
+      // Do a recursive trim in the hope that this will keep
+      // stack usage lower, but leave some oops for potential stealers
+      trim_queue(_low_water_mark);
+    } // Else, another thread got there first
+  }
+}
+
+void CMSParKeepAliveClosure::trim_queue(uint max) {
+  while (_work_queue->size() > max) {
+    oop new_oop;
+    if (_work_queue->pop_local(new_oop)) {
+      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
+      assert(_bit_map->isMarked((HeapWord*)new_oop),
+             "no white objects on this stack!");
+      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
+      // iterate over the oops in this oop, marking and pushing
+      // the ones in CMS heap (i.e. in _span).
+      new_oop->oop_iterate(&_mark_and_push);
+    }
+  }
+}
+
+void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
+  oop this_oop = *p;
+  HeapWord* addr = (HeapWord*)this_oop;
+  if (_span.contains(addr) &&
+      !_bit_map->isMarked(addr)) {
+    if (_bit_map->par_mark(addr)) {
+      bool simulate_overflow = false;
+      NOT_PRODUCT(
+        if (CMSMarkStackOverflowALot &&
+            _collector->par_simulate_overflow()) {
+          // simulate a stack overflow
+          simulate_overflow = true;
+        }
+      )
+      if (simulate_overflow || !_work_queue->push(this_oop)) {
+        _collector->par_push_on_overflow_list(this_oop);
+        _collector->_par_kac_ovflw++;
+      }
+    } // Else another thread got there already
+  }
+}
+
+//////////////////////////////////////////////////////////////////
+//  CMSExpansionCause		     /////////////////////////////
+//////////////////////////////////////////////////////////////////
+const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
+  switch (cause) {
+    case _no_expansion:
+      return "No expansion";
+    case _satisfy_free_ratio:
+      return "Free ratio";
+    case _satisfy_promotion:
+      return "Satisfy promotion";
+    case _satisfy_allocation:
+      return "allocation";
+    case _allocate_par_lab:
+      return "Par LAB";
+    case _allocate_par_spooling_space:
+      return "Par Spooling Space";
+    case _adaptive_size_policy:
+      return "Ergonomics";
+    default:
+      return "unknown";
+  }
+}
+
+void CMSDrainMarkingStackClosure::do_void() {
+  // the max number to take from overflow list at a time
+  const size_t num = _mark_stack->capacity()/4;
+  while (!_mark_stack->isEmpty() ||
+         // if stack is empty, check the overflow list
+         _collector->take_from_overflow_list(num, _mark_stack)) {
+    oop this_oop = _mark_stack->pop();
+    HeapWord* addr = (HeapWord*)this_oop;
+    assert(_span.contains(addr), "Should be within span");
+    assert(_bit_map->isMarked(addr), "Should be marked");
+    assert(this_oop->is_oop(), "Should be an oop");
+    this_oop->oop_iterate(_keep_alive);
+  }
+}
+
+void CMSParDrainMarkingStackClosure::do_void() {
+  // drain queue
+  trim_queue(0);
+}
+
+// Trim our work_queue so its length is below max at return
+void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
+  while (_work_queue->size() > max) {
+    oop new_oop;
+    if (_work_queue->pop_local(new_oop)) {
+      assert(new_oop->is_oop(), "Expected an oop");
+      assert(_bit_map->isMarked((HeapWord*)new_oop),
+             "no white objects on this stack!");
+      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
+      // iterate over the oops in this oop, marking and pushing
+      // the ones in CMS heap (i.e. in _span).
+      new_oop->oop_iterate(&_mark_and_push);
+    }
+  }
+}
+
+////////////////////////////////////////////////////////////////////
+// Support for Marking Stack Overflow list handling and related code
+////////////////////////////////////////////////////////////////////
+// Much of the following code is similar in shape and spirit to the
+// code used in ParNewGC. We should try and share that code
+// as much as possible in the future.
+
+#ifndef PRODUCT
+// Debugging support for CMSStackOverflowALot
+
+// It's OK to call this multi-threaded;  the worst thing
+// that can happen is that we'll get a bunch of closely
+// spaced simulated oveflows, but that's OK, in fact
+// probably good as it would exercise the overflow code
+// under contention.
+bool CMSCollector::simulate_overflow() {
+  if (_overflow_counter-- <= 0) { // just being defensive
+    _overflow_counter = CMSMarkStackOverflowInterval;
+    return true;
+  } else {
+    return false;
+  }
+}
+
+bool CMSCollector::par_simulate_overflow() {
+  return simulate_overflow();
+}
+#endif
+
+// Single-threaded
+bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
+  assert(stack->isEmpty(), "Expected precondition");
+  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
+  size_t i = num;
+  oop  cur = _overflow_list;
+  const markOop proto = markOopDesc::prototype();
+  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
+    next = oop(cur->mark());
+    cur->set_mark(proto);   // until proven otherwise
+    bool res = stack->push(cur);
+    assert(res, "Bit off more than can chew?");
+  }
+  _overflow_list = cur;
+  return !stack->isEmpty();
+}
+
+// Multi-threaded; use CAS to break off a prefix
+bool CMSCollector::par_take_from_overflow_list(size_t num,
+                                               OopTaskQueue* work_q) {
+  assert(work_q->size() == 0, "That's the current policy");
+  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
+  if (_overflow_list == NULL) {
+    return false;
+  }
+  // Grab the entire list; we'll put back a suffix
+  oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
+  if (prefix == NULL) {  // someone grabbed it before we did ...
+    // ... we could spin for a short while, but for now we don't
+    return false;
+  }
+  size_t i = num;
+  oop cur = prefix;
+  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
+  if (cur->mark() != NULL) {
+    oop suffix_head = cur->mark(); // suffix will be put back on global list
+    cur->set_mark(NULL);           // break off suffix
+    // Find tail of suffix so we can prepend suffix to global list
+    for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
+    oop suffix_tail = cur;
+    assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
+           "Tautology");
+    oop observed_overflow_list = _overflow_list;
+    do {
+      cur = observed_overflow_list;
+      suffix_tail->set_mark(markOop(cur));
+      observed_overflow_list =
+        (oop) Atomic::cmpxchg_ptr(suffix_tail, &_overflow_list, cur);
+    } while (cur != observed_overflow_list);
+  }
+
+  // Push the prefix elements on work_q
+  assert(prefix != NULL, "control point invariant");
+  const markOop proto = markOopDesc::prototype();
+  oop next;
+  for (cur = prefix; cur != NULL; cur = next) {
+    next = oop(cur->mark());
+    cur->set_mark(proto);   // until proven otherwise
+    bool res = work_q->push(cur);
+    assert(res, "Bit off more than we can chew?");
+  }
+  return true;
+}
+
+// Single-threaded
+void CMSCollector::push_on_overflow_list(oop p) {
+  preserve_mark_if_necessary(p);
+  p->set_mark((markOop)_overflow_list);
+  _overflow_list = p;
+}
+
+// Multi-threaded; use CAS to prepend to overflow list
+void CMSCollector::par_push_on_overflow_list(oop p) {
+  par_preserve_mark_if_necessary(p);
+  oop observed_overflow_list = _overflow_list;
+  oop cur_overflow_list;
+  do {
+    cur_overflow_list = observed_overflow_list;
+    p->set_mark(markOop(cur_overflow_list));
+    observed_overflow_list =
+      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
+  } while (cur_overflow_list != observed_overflow_list);
+}
+
+// Single threaded
+// General Note on GrowableArray: pushes may silently fail
+// because we are (temporarily) out of C-heap for expanding
+// the stack. The problem is quite ubiquitous and affects
+// a lot of code in the JVM. The prudent thing for GrowableArray
+// to do (for now) is to exit with an error. However, that may
+// be too draconian in some cases because the caller may be
+// able to recover without much harm. For suych cases, we
+// should probably introduce a "soft_push" method which returns
+// an indication of success or failure with the assumption that
+// the caller may be able to recover from a failure; code in
+// the VM can then be changed, incrementally, to deal with such
+// failures where possible, thus, incrementally hardening the VM
+// in such low resource situations.
+void CMSCollector::preserve_mark_work(oop p, markOop m) {
+  int PreserveMarkStackSize = 128;
+
+  if (_preserved_oop_stack == NULL) {
+    assert(_preserved_mark_stack == NULL,
+           "bijection with preserved_oop_stack");
+    // Allocate the stacks
+    _preserved_oop_stack  = new (ResourceObj::C_HEAP) 
+      GrowableArray<oop>(PreserveMarkStackSize, true);
+    _preserved_mark_stack = new (ResourceObj::C_HEAP) 
+      GrowableArray<markOop>(PreserveMarkStackSize, true);
+    if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
+      vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
+                            "Preserved Mark/Oop Stack for CMS (C-heap)");
+    }
+  }
+  _preserved_oop_stack->push(p);
+  _preserved_mark_stack->push(m);
+  assert(m == p->mark(), "Mark word changed");
+  assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
+         "bijection");
+}
+
+// Single threaded
+void CMSCollector::preserve_mark_if_necessary(oop p) {
+  markOop m = p->mark();
+  if (m->must_be_preserved(p)) {
+    preserve_mark_work(p, m);
+  }
+}
+
+void CMSCollector::par_preserve_mark_if_necessary(oop p) {
+  markOop m = p->mark();
+  if (m->must_be_preserved(p)) {
+    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
+    // Even though we read the mark word without holding
+    // the lock, we are assured that it will not change
+    // because we "own" this oop, so no other thread can
+    // be trying to push it on the overflow list; see
+    // the assertion in preserve_mark_work() that checks
+    // that m == p->mark().
+    preserve_mark_work(p, m);
+  }
+}
+
+// We should be able to do this multi-threaded,
+// a chunk of stack being a task (this is
+// correct because each oop only ever appears
+// once in the overflow list. However, it's
+// not very easy to completely overlap this with
+// other operations, so will generally not be done
+// until all work's been completed. Because we
+// expect the preserved oop stack (set) to be small,
+// it's probably fine to do this single-threaded.
+// We can explore cleverer concurrent/overlapped/parallel
+// processing of preserved marks if we feel the
+// need for this in the future. Stack overflow should
+// be so rare in practice and, when it happens, its
+// effect on performance so great that this will
+// likely just be in the noise anyway.
+void CMSCollector::restore_preserved_marks_if_any() {
+  if (_preserved_oop_stack == NULL) {
+    assert(_preserved_mark_stack == NULL,
+           "bijection with preserved_oop_stack");
+    return;
+  }
+
+  assert(SafepointSynchronize::is_at_safepoint(),
+         "world should be stopped");
+  assert(Thread::current()->is_ConcurrentGC_thread() ||
+         Thread::current()->is_VM_thread(),
+         "should be single-threaded");
+
+  int length = _preserved_oop_stack->length();
+  assert(_preserved_mark_stack->length() == length, "bijection");
+  for (int i = 0; i < length; i++) {
+    oop p = _preserved_oop_stack->at(i);
+    assert(p->is_oop(), "Should be an oop");
+    assert(_span.contains(p), "oop should be in _span");
+    assert(p->mark() == markOopDesc::prototype(),
+           "Set when taken from overflow list");
+    markOop m = _preserved_mark_stack->at(i);
+    p->set_mark(m);
+  }
+  _preserved_mark_stack->clear();
+  _preserved_oop_stack->clear();
+  assert(_preserved_mark_stack->is_empty() &&
+         _preserved_oop_stack->is_empty(),
+         "stacks were cleared above");
+}
+
+#ifndef PRODUCT
+bool CMSCollector::no_preserved_marks() {
+  return (   (   _preserved_mark_stack == NULL
+              && _preserved_oop_stack == NULL)
+          || (   _preserved_mark_stack->is_empty()
+              && _preserved_oop_stack->is_empty()));
+}
+#endif
+
+CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
+{
+  GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
+  CMSAdaptiveSizePolicy* size_policy =
+    (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
+  assert(size_policy->is_gc_cms_adaptive_size_policy(),
+    "Wrong type for size policy");
+  return size_policy;
+}
+
+void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
+                                           size_t desired_promo_size) {
+  if (cur_promo_size < desired_promo_size) {
+    size_t expand_bytes = desired_promo_size - cur_promo_size;
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
+	"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
+	expand_bytes);
+    }
+    expand(expand_bytes,
+           MinHeapDeltaBytes,
+           CMSExpansionCause::_adaptive_size_policy);
+  } else if (desired_promo_size < cur_promo_size) {
+    size_t shrink_bytes = cur_promo_size - desired_promo_size;
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
+	"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
+	shrink_bytes);
+    }
+    shrink(shrink_bytes);
+  }
+}
+
+CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSGCAdaptivePolicyCounters* counters =
+    (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
+  assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
+    "Wrong kind of counters");
+  return counters;
+}
+
+
+void ASConcurrentMarkSweepGeneration::update_counters() {
+  if (UsePerfData) {
+    _space_counters->update_all();
+    _gen_counters->update_all();
+    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
+    GenCollectedHeap* gch = GenCollectedHeap::heap();
+    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
+    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
+      "Wrong gc statistics type");
+    counters->update_counters(gc_stats_l);
+  }
+}
+
+void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
+  if (UsePerfData) {
+    _space_counters->update_used(used);
+    _space_counters->update_capacity();
+    _gen_counters->update_all();
+
+    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
+    GenCollectedHeap* gch = GenCollectedHeap::heap();
+    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
+    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
+      "Wrong gc statistics type");
+    counters->update_counters(gc_stats_l);
+  }
+}
+
+// The desired expansion delta is computed so that:
+// . desired free percentage or greater is used
+void ASConcurrentMarkSweepGeneration::compute_new_size() {
+  assert_locked_or_safepoint(Heap_lock);
+
+  GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
+
+  // If incremental collection failed, we just want to expand
+  // to the limit.
+  if (incremental_collection_failed()) {
+    clear_incremental_collection_failed();
+    grow_to_reserved();
+    return;
+  }
+
+  assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
+
+  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
+    "Wrong type of heap");
+  int prev_level = level() - 1;
+  assert(prev_level >= 0, "The cms generation is the lowest generation");
+  Generation* prev_gen = gch->get_gen(prev_level);
+  assert(prev_gen->kind() == Generation::ASParNew,
+    "Wrong type of young generation");
+  ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
+  size_t cur_eden = younger_gen->eden()->capacity();
+  CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
+  size_t cur_promo = free();
+  size_policy->compute_tenured_generation_free_space(cur_promo, 
+						       max_available(),
+						       cur_eden);
+  resize(cur_promo, size_policy->promo_size());
+
+  // Record the new size of the space in the cms generation
+  // that is available for promotions.  This is temporary.
+  // It should be the desired promo size.
+  size_policy->avg_cms_promo()->sample(free());
+  size_policy->avg_old_live()->sample(used());
+
+  if (UsePerfData) {
+    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
+    counters->update_cms_capacity_counter(capacity());
+  }
+}
+
+void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
+  assert_locked_or_safepoint(Heap_lock);
+  assert_lock_strong(freelistLock());
+  HeapWord* old_end = _cmsSpace->end();
+  HeapWord* unallocated_start = _cmsSpace->unallocated_block();
+  assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
+  FreeChunk* chunk_at_end = find_chunk_at_end();
+  if (chunk_at_end == NULL) {
+    // No room to shrink
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print_cr("No room to shrink: old_end  "
+	PTR_FORMAT "  unallocated_start  " PTR_FORMAT 
+	" chunk_at_end  " PTR_FORMAT,
+        old_end, unallocated_start, chunk_at_end);
+    }
+    return;
+  } else {
+
+    // Find the chunk at the end of the space and determine
+    // how much it can be shrunk.
+    size_t shrinkable_size_in_bytes = chunk_at_end->size();
+    size_t aligned_shrinkable_size_in_bytes = 
+      align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
+    assert(unallocated_start <= chunk_at_end->end(),
+      "Inconsistent chunk at end of space");
+    size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
+    size_t word_size_before = heap_word_size(_virtual_space.committed_size());
+  
+    // Shrink the underlying space
+    _virtual_space.shrink_by(bytes);
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
+        " desired_bytes " SIZE_FORMAT 
+        " shrinkable_size_in_bytes " SIZE_FORMAT
+        " aligned_shrinkable_size_in_bytes " SIZE_FORMAT 
+        "  bytes  " SIZE_FORMAT, 
+        desired_bytes, shrinkable_size_in_bytes, 
+        aligned_shrinkable_size_in_bytes, bytes);
+      gclog_or_tty->print_cr("		old_end  " SIZE_FORMAT 
+        "  unallocated_start  " SIZE_FORMAT, 
+        old_end, unallocated_start);
+    }
+  
+    // If the space did shrink (shrinking is not guaranteed),
+    // shrink the chunk at the end by the appropriate amount.
+    if (((HeapWord*)_virtual_space.high()) < old_end) {
+      size_t new_word_size = 
+        heap_word_size(_virtual_space.committed_size());
+  
+      // Have to remove the chunk from the dictionary because it is changing
+      // size and might be someplace elsewhere in the dictionary.
+
+      // Get the chunk at end, shrink it, and put it
+      // back.
+      _cmsSpace->removeChunkFromDictionary(chunk_at_end);
+      size_t word_size_change = word_size_before - new_word_size;
+      size_t chunk_at_end_old_size = chunk_at_end->size();
+      assert(chunk_at_end_old_size >= word_size_change,
+        "Shrink is too large");
+      chunk_at_end->setSize(chunk_at_end_old_size - 
+  			  word_size_change);
+      _cmsSpace->freed((HeapWord*) chunk_at_end->end(), 
+        word_size_change);
+      
+      _cmsSpace->returnChunkToDictionary(chunk_at_end);
+  
+      MemRegion mr(_cmsSpace->bottom(), new_word_size);
+      _bts->resize(new_word_size);  // resize the block offset shared array
+      Universe::heap()->barrier_set()->resize_covered_region(mr);
+      _cmsSpace->assert_locked();
+      _cmsSpace->set_end((HeapWord*)_virtual_space.high());
+  
+      NOT_PRODUCT(_cmsSpace->dictionary()->verify());
+  
+      // update the space and generation capacity counters
+      if (UsePerfData) {
+        _space_counters->update_capacity();
+        _gen_counters->update_all();
+      }
+  
+      if (Verbose && PrintGCDetails) {
+        size_t new_mem_size = _virtual_space.committed_size();
+        size_t old_mem_size = new_mem_size + bytes;
+        gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
+                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
+      }
+    }
+  
+    assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), 
+      "Inconsistency at end of space");
+    assert(chunk_at_end->end() == _cmsSpace->end(), 
+      "Shrinking is inconsistent");
+    return;
+  }
+}
+
+// Transfer some number of overflown objects to usual marking
+// stack. Return true if some objects were transferred.
+bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
+  size_t num = MIN2((size_t)_mark_stack->capacity()/4,
+                    (size_t)ParGCDesiredObjsFromOverflowList);
+  
+  bool res = _collector->take_from_overflow_list(num, _mark_stack);
+  assert(_collector->overflow_list_is_empty() || res,
+         "If list is not empty, we should have taken something");
+  assert(!res || _mark_stack->isEmpty(),
+         "If we took something, it should now be on our stack");
+  return res;
+}
+
+size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
+  size_t res = _sp->block_size_no_stall(addr, _collector);
+  assert(res != 0, "Should always be able to compute a size");
+  if (_sp->block_is_obj(addr)) {
+    if (_live_bit_map->isMarked(addr)) {
+      // It can't have been dead in a previous cycle
+      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
+    } else {
+      _dead_bit_map->mark(addr);      // mark the dead object
+    }
+  }
+  return res;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,1823 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)concurrentMarkSweepGeneration.hpp	1.159 07/05/17 15:52:09 JVM"
+#endif
+/*
+ * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// ConcurrentMarkSweepGeneration is in support of a concurrent
+// mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
+// style. We assume, for now, that this generation is always the
+// seniormost generation (modulo the PermGeneration), and for simplicity
+// in the first implementation, that this generation is a single compactible
+// space. Neither of these restrictions appears essential, and will be
+// relaxed in the future when more time is available to implement the
+// greater generality (and there's a need for it).
+//
+// Concurrent mode failures are currently handled by
+// means of a sliding mark-compact.
+
+class CMSAdaptiveSizePolicy;
+class CMSConcMarkingTask;
+class CMSGCAdaptivePolicyCounters;
+class ConcurrentMarkSweepGeneration;
+class ConcurrentMarkSweepPolicy;
+class ConcurrentMarkSweepThread;
+class CompactibleFreeListSpace;
+class FreeChunk;
+class PromotionInfo;
+class ScanMarkedObjectsAgainCarefullyClosure;
+
+
+// A generic CMS bit map. It's the basis for both the CMS marking bit map
+// as well as for the mod union table (in each case only a subset of the
+// methods are used). This is essentially a wrapper around the BitMap class,
+// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
+// we have _shifter == 0. and for the mod union table we have
+// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
+// XXX 64-bit issues in BitMap?
+class CMSBitMap VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+
+  HeapWord* _bmStartWord;   // base address of range covered by map
+  size_t    _bmWordSize;    // map size (in #HeapWords covered)
+  const int _shifter;	    // shifts to convert HeapWord to bit position
+  VirtualSpace _virtual_space; // underlying the bit map
+  BitMap    _bm;            // the bit map itself
+ public:
+  Mutex* const _lock;       // mutex protecting _bm;
+
+ public:
+  // constructor
+  CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
+
+  // allocates the actual storage for the map
+  bool allocate(MemRegion mr);
+  // field getter
+  Mutex* lock() const { return _lock; }
+  // locking verifier convenience function
+  void assert_locked() const PRODUCT_RETURN;
+
+  // inquiries
+  HeapWord* startWord()   const { return _bmStartWord; }
+  size_t    sizeInWords() const { return _bmWordSize;  }
+  size_t    sizeInBits()  const { return _bm.size();   }
+  // the following is one past the last word in space
+  HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
+
+  // reading marks
+  bool isMarked(HeapWord* addr) const;
+  bool par_isMarked(HeapWord* addr) const; // do not lock checks
+  bool isUnmarked(HeapWord* addr) const;
+  bool isAllClear() const;
+
+  // writing marks
+  void mark(HeapWord* addr);
+  // For marking by parallel GC threads;
+  // returns true if we did, false if another thread did
+  bool par_mark(HeapWord* addr);
+
+  void mark_range(MemRegion mr);
+  void par_mark_range(MemRegion mr);
+  void mark_large_range(MemRegion mr);
+  void par_mark_large_range(MemRegion mr);
+  void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
+  void clear_range(MemRegion mr);
+  void par_clear_range(MemRegion mr);
+  void clear_large_range(MemRegion mr);
+  void par_clear_large_range(MemRegion mr);
+  void clear_all();
+  void clear_all_incrementally();  // Not yet implemented!!
+
+  NOT_PRODUCT(
+    // checks the memory region for validity
+    void region_invariant(MemRegion mr);
+  )
+  
+  // iteration
+  void iterate(BitMapClosure* cl) {
+    _bm.iterate(cl);
+  }
+  void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
+  void dirty_range_iterate_clear(MemRegionClosure* cl);
+  void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
+ 
+  // auxiliary support for iteration
+  HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
+  HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
+                                            HeapWord* end_addr) const;
+  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
+  HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
+                                              HeapWord* end_addr) const;
+  MemRegion getAndClearMarkedRegion(HeapWord* addr);
+  MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 
+                                           HeapWord* end_addr);
+
+  // conversion utilities
+  HeapWord* offsetToHeapWord(size_t offset) const;
+  size_t    heapWordToOffset(HeapWord* addr) const;
+  size_t    heapWordDiffToOffsetDiff(size_t diff) const;
+  
+  // debugging
+  // is this address range covered by the bit-map?
+  NOT_PRODUCT(
+    bool covers(MemRegion mr) const;
+    bool covers(HeapWord* start, size_t size = 0) const;
+  )
+  void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
+};
+
+// Represents a marking stack used by the CMS collector.
+// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
+class CMSMarkStack: public CHeapObj  {
+  // 
+  friend class CMSCollector;   // to get at expasion stats further below
+  //
+
+  VirtualSpace _virtual_space;  // space for the stack
+  oop*   _base;      // bottom of stack
+  size_t _index;     // one more than last occupied index
+  size_t _capacity;  // max #elements
+  Mutex  _par_lock;  // an advisory lock used in case of parallel access
+  NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
+
+ protected:
+  size_t _hit_limit;      // we hit max stack size limit
+  size_t _failed_double;  // we failed expansion before hitting limit
+
+ public:
+  CMSMarkStack():
+    _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
+    _hit_limit(0),
+    _failed_double(0) {}
+
+  bool allocate(size_t size);
+
+  size_t capacity() const { return _capacity; }
+
+  oop pop() {
+    if (!isEmpty()) {
+      return _base[--_index] ;
+    }
+    return NULL;
+  }
+
+  bool push(oop ptr) {
+    if (isFull()) {
+      return false;
+    } else {
+      _base[_index++] = ptr;
+      NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
+      return true;
+    }
+  }
+
+  bool isEmpty() const { return _index == 0; }
+  bool isFull()  const {
+    assert(_index <= _capacity, "buffer overflow");
+    return _index == _capacity;
+  }
+
+  size_t length() { return _index; }
+
+  // "Parallel versions" of some of the above
+  oop par_pop() {
+    // lock and pop
+    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
+    return pop();
+  }
+
+  bool par_push(oop ptr) {
+    // lock and push
+    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
+    return push(ptr);
+  }
+
+  // Forcibly reset the stack, losing all of its contents.
+  void reset() {
+    _index = 0;
+  }
+
+  // Expand the stack, typically in response to an overflow condition
+  void expand();
+
+  // Compute the least valued stack element.
+  oop least_value(HeapWord* low) {
+     oop least = (oop)low;
+     for (size_t i = 0; i < _index; i++) {
+       least = MIN2(least, _base[i]);
+     }
+     return least;
+  }
+
+  // Exposed here to allow stack expansion in || case
+  Mutex* par_lock() { return &_par_lock; }
+};
+
+class CardTableRS;
+class CMSParGCThreadState;
+
+class ModUnionClosure: public MemRegionClosure {
+ protected:
+  CMSBitMap* _t;
+ public:
+  ModUnionClosure(CMSBitMap* t): _t(t) { }
+  void do_MemRegion(MemRegion mr);
+};
+
+class ModUnionClosurePar: public ModUnionClosure {
+ public:
+  ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
+  void do_MemRegion(MemRegion mr);
+};
+
+// Survivor Chunk Array in support of parallelization of
+// Survivor Space rescan.
+class ChunkArray: public CHeapObj {
+  size_t _index;
+  size_t _capacity;
+  HeapWord** _array;   // storage for array
+
+ public:
+  ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
+  ChunkArray(HeapWord** a, size_t c):
+    _index(0), _capacity(c), _array(a) {}
+
+  HeapWord** array() { return _array; }
+  void set_array(HeapWord** a) { _array = a; }
+
+  size_t capacity() { return _capacity; }
+  void set_capacity(size_t c) { _capacity = c; }
+
+  size_t end() {
+    assert(_index < capacity(), "_index out of bounds");
+    return _index;
+  }  // exclusive
+
+  HeapWord* nth(size_t n) {
+    assert(n < end(), "Out of bounds access");
+    return _array[n];
+  }
+  
+  void reset() {
+    _index = 0;
+  }
+
+  void record_sample(HeapWord* p, size_t sz) {
+    // For now we do not do anything with the size
+    if (_index < _capacity) {
+      _array[_index++] = p;
+    }
+  }
+};
+
+// 
+// Timing, allocation and promotion statistics for gc scheduling and incremental
+// mode pacing.  Most statistics are exponential averages.
+// 
+class CMSStats VALUE_OBJ_CLASS_SPEC {
+ private:
+  ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
+
+  // The following are exponential averages with factor alpha:
+  //   avg = (100 - alpha) * avg + alpha * cur_sample
+  // 
+  //   The durations measure:  end_time[n] - start_time[n]
+  //   The periods measure:    start_time[n] - start_time[n-1]
+  //
+  // The cms period and duration include only concurrent collections; time spent
+  // in foreground cms collections due to System.gc() or because of a failure to
+  // keep up are not included.
+  //
+  // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
+  // real value, but is used only after the first period.  A value of 100 is
+  // used for the first sample so it gets the entire weight.
+  unsigned int _saved_alpha; // 0-100
+  unsigned int _gc0_alpha;
+  unsigned int _cms_alpha;
+
+  double _gc0_duration;
+  double _gc0_period;
+  size_t _gc0_promoted;		// bytes promoted per gc0
+  double _cms_duration;
+  double _cms_duration_pre_sweep; // time from initiation to start of sweep
+  double _cms_duration_per_mb;
+  double _cms_period;
+  size_t _cms_allocated;	// bytes of direct allocation per gc0 period
+
+  // Timers.
+  elapsedTimer _cms_timer;
+  TimeStamp    _gc0_begin_time;
+  TimeStamp    _cms_begin_time;
+  TimeStamp    _cms_end_time;
+
+  // Snapshots of the amount used in the CMS generation.
+  size_t _cms_used_at_gc0_begin;
+  size_t _cms_used_at_gc0_end;
+  size_t _cms_used_at_cms_begin;
+
+  // Used to prevent the duty cycle from being reduced in the middle of a cms
+  // cycle.
+  bool _allow_duty_cycle_reduction;
+
+  enum {
+    _GC0_VALID = 0x1,
+    _CMS_VALID = 0x2,
+    _ALL_VALID = _GC0_VALID | _CMS_VALID
+  };
+
+  unsigned int _valid_bits;
+
+  unsigned int _icms_duty_cycle;	// icms duty cycle (0-100).
+
+ protected:
+
+  // Return a duty cycle that avoids wild oscillations, by limiting the amount
+  // of change between old_duty_cycle and new_duty_cycle (the latter is treated
+  // as a recommended value).
+  static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
+					     unsigned int new_duty_cycle);
+  unsigned int icms_update_duty_cycle_impl();
+
+ public:
+  CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
+	   unsigned int alpha = CMSExpAvgFactor);
+
+  // Whether or not the statistics contain valid data; higher level statistics
+  // cannot be called until this returns true (they require at least one young
+  // gen and one cms cycle to have completed).
+  bool valid() const;
+
+  // Record statistics.
+  void record_gc0_begin();
+  void record_gc0_end(size_t cms_gen_bytes_used);
+  void record_cms_begin();
+  void record_cms_end();
+
+  // Allow management of the cms timer, which must be stopped/started around
+  // yield points.
+  elapsedTimer& cms_timer()     { return _cms_timer; }
+  void start_cms_timer()        { _cms_timer.start(); }
+  void stop_cms_timer()         { _cms_timer.stop(); }
+
+  // Basic statistics; units are seconds or bytes.
+  double gc0_period() const     { return _gc0_period; }
+  double gc0_duration() const   { return _gc0_duration; }
+  size_t gc0_promoted() const   { return _gc0_promoted; }
+  double cms_period() const          { return _cms_period; }
+  double cms_duration() const        { return _cms_duration; }
+  double cms_duration_per_mb() const { return _cms_duration_per_mb; }
+  size_t cms_allocated() const       { return _cms_allocated; }
+
+  size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
+
+  // Seconds since the last background cms cycle began or ended.
+  double cms_time_since_begin() const;
+  double cms_time_since_end() const;
+
+  // Higher level statistics--caller must check that valid() returns true before
+  // calling.
+
+  // Returns bytes promoted per second of wall clock time.
+  double promotion_rate() const;
+
+  // Returns bytes directly allocated per second of wall clock time.
+  double cms_allocation_rate() const;
+
+  // Rate at which space in the cms generation is being consumed (sum of the
+  // above two).
+  double cms_consumption_rate() const;
+
+  // Returns an estimate of the number of seconds until the cms generation will
+  // fill up, assuming no collection work is done.
+  double time_until_cms_gen_full() const;
+
+  // Returns an estimate of the number of seconds remaining until
+  // the cms generation collection should start.
+  double time_until_cms_start() const;
+
+  // End of higher level statistics.
+
+  // Returns the cms incremental mode duty cycle, as a percentage (0-100).
+  unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
+
+  // Update the duty cycle and return the new value.
+  unsigned int icms_update_duty_cycle();
+
+  // Debugging.
+  void print_on(outputStream* st) const PRODUCT_RETURN;
+  void print() const { print_on(gclog_or_tty); }
+};
+
+// A closure related to weak references processing which
+// we embed in the CMSCollector, since we need to pass
+// it to the reference processor for secondary filtering
+// of references based on reachability of referent;
+// see role of _is_alive_non_header closure in the
+// ReferenceProcessor class.
+// For objects in the CMS generation, this closure checks
+// if the object is "live" (reachable). Used in weak
+// reference processing.
+class CMSIsAliveClosure: public BoolObjectClosure {
+  MemRegion  _span;
+  const CMSBitMap* _bit_map;
+
+  friend class CMSCollector;
+ protected:
+  void set_span(MemRegion span) { _span = span; }
+ public:
+  CMSIsAliveClosure(CMSBitMap* bit_map):
+    _bit_map(bit_map) { }
+
+  CMSIsAliveClosure(MemRegion span,
+                    CMSBitMap* bit_map):
+    _span(span),
+    _bit_map(bit_map) { }
+  void do_object(oop obj) {
+    assert(false, "not to be invoked");
+  }
+  bool do_object_b(oop obj);
+};
+
+
+// Implements AbstractRefProcTaskExecutor for CMS.
+class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+public:
+
+  CMSRefProcTaskExecutor(CMSCollector& collector)
+    : _collector(collector)
+  { }
+  
+  // Executes a task using worker threads.  
+  virtual void execute(ProcessTask& task);
+  virtual void execute(EnqueueTask& task);
+private:
+  CMSCollector& _collector;
+};
+
+
+class CMSCollector: public CHeapObj {
+  friend class VMStructs;
+  friend class ConcurrentMarkSweepThread;
+  friend class ConcurrentMarkSweepGeneration;
+  friend class CompactibleFreeListSpace;
+  friend class CMSParRemarkTask;
+  friend class CMSConcMarkingTask;
+  friend class CMSRefProcTaskProxy;
+  friend class CMSRefProcTaskExecutor;
+  friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
+  friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
+  friend class PushOrMarkClosure;             // to access _restart_addr
+  friend class Par_PushOrMarkClosure;             // to access _restart_addr
+  friend class MarkFromRootsClosure;          //  -- ditto --
+                                              // ... and for clearing cards
+  friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
+                                              // ... and for clearing cards
+  friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
+  friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
+  friend class PushAndMarkVerifyClosure;      //  -- ditto --
+  friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
+  friend class PushAndMarkClosure;            //  -- ditto --
+  friend class Par_PushAndMarkClosure;        //  -- ditto --
+  friend class CMSKeepAliveClosure;           //  -- ditto --
+  friend class CMSDrainMarkingStackClosure;   //  -- ditto --
+  friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
+  NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
+  friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
+  friend class VM_CMS_Operation;
+  friend class VM_CMS_Initial_Mark_Operation;
+  friend class VM_CMS_Final_Remark_Operation;
+
+ private:
+  jlong _time_of_last_gc;
+  void update_time_of_last_gc(jlong now) {
+    _time_of_last_gc = now;
+  }
+  
+  OopTaskQueueSet* _task_queues;
+
+  // Overflow list of grey objects, threaded through mark-word
+  // Manipulated with CAS in the parallel/multi-threaded case.
+  oop _overflow_list;
+  // The following array-pair keeps track of mark words
+  // displaced for accomodating overflow list above.
+  // This code will likely be revisited under RFE#4922830.
+  GrowableArray<oop>*     _preserved_oop_stack; 
+  GrowableArray<markOop>* _preserved_mark_stack; 
+
+  int*             _hash_seed;
+
+  // In support of multi-threaded concurrent phases
+  YieldingFlexibleWorkGang* _conc_workers;
+
+  // Performance Counters
+  CollectorCounters* _gc_counters;
+
+  // Initialization Errors
+  bool _completed_initialization;
+
+  // In support of ExplicitGCInvokesConcurrent
+  static   bool _full_gc_requested;
+  unsigned int  _collection_count_start;
+  // Should we unload classes this concurrent cycle?
+  // Set in response to a concurrent full gc request.
+  bool _unload_classes;
+  bool _unloaded_classes_last_cycle;
+  // Did we (allow) unload classes in the previous concurrent cycle?
+  bool cms_unloaded_classes_last_cycle() const {
+    return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled;
+  }
+
+  // Verification support
+  CMSBitMap     _verification_mark_bm;
+  void verify_after_remark_work_1();
+  void verify_after_remark_work_2();
+
+  // true if any verification flag is on.
+  bool _verifying;
+  bool verifying() const { return _verifying; }
+  void set_verifying(bool v) { _verifying = v; }
+
+  // Collector policy
+  ConcurrentMarkSweepPolicy* _collector_policy;
+  ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
+
+  // Check whether the gc time limit has been 
+  // exceeded and set the size policy flag
+  // appropriately.
+  void check_gc_time_limit();
+  // XXX Move these to CMSStats ??? FIX ME !!!
+  elapsedTimer _sweep_timer;
+  AdaptivePaddedAverage _sweep_estimate;
+
+ protected:
+  ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
+  ConcurrentMarkSweepGeneration* _permGen; // perm gen
+  MemRegion                      _span;    // span covering above two
+  CardTableRS*                   _ct;      // card table
+
+  // CMS marking support structures
+  CMSBitMap     _markBitMap;
+  CMSBitMap     _modUnionTable;
+  CMSMarkStack  _markStack;
+  CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
+                                          // to revisit
+  CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
+
+  HeapWord*     _restart_addr; // in support of marking stack overflow
+  void          lower_restart_addr(HeapWord* low);
+
+  // Counters in support of marking stack / work queue overflow handling:
+  // a non-zero value indicates certain types of overflow events during
+  // the current CMS cycle and could lead to stack resizing efforts at
+  // an opportune future time.
+  size_t        _ser_pmc_preclean_ovflw;
+  size_t        _ser_pmc_remark_ovflw;
+  size_t        _par_pmc_remark_ovflw;
+  size_t        _ser_kac_ovflw;
+  size_t        _par_kac_ovflw;
+
+  // ("Weak") Reference processing support
+  ReferenceProcessor*            _ref_processor;
+  CMSIsAliveClosure              _is_alive_closure;
+      // keep this textually after _markBitMap; c'tor dependency
+
+  ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
+  ModUnionClosure    _modUnionClosure;
+  ModUnionClosurePar _modUnionClosurePar;
+
+  // CMS abstract state machine
+  // initial_state: Idling
+  // next_state(Idling)            = {Marking}
+  // next_state(Marking)           = {Precleaning, Sweeping}
+  // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
+  // next_state(AbortablePreclean) = {FinalMarking}
+  // next_state(FinalMarking)      = {Sweeping}
+  // next_state(Sweeping)          = {Resizing}
+  // next_state(Resizing)          = {Resetting}
+  // next_state(Resetting)         = {Idling}
+  // The numeric values below are chosen so that:
+  // . _collectorState <= Idling ==  post-sweep && pre-mark
+  // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
+  //                                            precleaning || abortablePrecleanb
+  enum CollectorState {
+    Resizing            = 0,
+    Resetting           = 1,
+    Idling              = 2,
+    InitialMarking      = 3,
+    Marking             = 4,
+    Precleaning         = 5,
+    AbortablePreclean   = 6,
+    FinalMarking        = 7,
+    Sweeping            = 8 
+  };
+  static CollectorState _collectorState;
+
+  // State related to prologue/epilogue invocation for my generations
+  bool _between_prologue_and_epilogue;
+
+  // Signalling/State related to coordination between fore- and backgroud GC
+  // Note: When the baton has been passed from background GC to foreground GC,
+  // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
+  static bool _foregroundGCIsActive;    // true iff foreground collector is active or
+                                 // wants to go active
+  static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
+                                 // yet passed the baton to the foreground GC
+
+  // Support for CMSScheduleRemark (abortable preclean)
+  bool _abort_preclean;
+  bool _start_sampling;
+
+  int    _numYields;
+  size_t _numDirtyCards;
+  uint   _sweepCount;
+  // number of full gc's since the last concurrent gc.
+  uint	 _full_gcs_since_conc_gc;
+
+  // if occupancy exceeds this, start a new gc cycle
+  double _initiatingOccupancy;
+  // occupancy used for bootstrapping stats
+  double _bootstrap_occupancy;
+
+  // timer
+  elapsedTimer _timer;
+
+  // Timing, allocation and promotion statistics, used for scheduling.
+  CMSStats      _stats;
+
+  // Allocation limits installed in the young gen, used only in
+  // CMSIncrementalMode.  When an allocation in the young gen would cross one of
+  // these limits, the cms generation is notified and the cms thread is started
+  // or stopped, respectively.
+  HeapWord*	_icms_start_limit;
+  HeapWord*	_icms_stop_limit;
+
+  enum CMS_op_type {
+    CMS_op_checkpointRootsInitial,
+    CMS_op_checkpointRootsFinal
+  };
+
+  void do_CMS_operation(CMS_op_type op);
+  bool stop_world_and_do(CMS_op_type op);
+
+  OopTaskQueueSet* task_queues() { return _task_queues; }
+  int*             hash_seed(int i) { return &_hash_seed[i]; }
+  YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
+
+  // Support for parallelizing Eden rescan in CMS remark phase
+  void sample_eden(); // ... sample Eden space top
+
+ private:
+  // Support for parallelizing young gen rescan in CMS remark phase
+  Generation* _young_gen;  // the younger gen
+  HeapWord** _top_addr;    // ... Top of Eden
+  HeapWord** _end_addr;    // ... End of Eden
+  HeapWord** _eden_chunk_array; // ... Eden partitioning array
+  size_t     _eden_chunk_index; // ... top (exclusive) of array
+  size_t     _eden_chunk_capacity;  // ... max entries in array
+
+  // Support for parallelizing survivor space rescan 
+  HeapWord** _survivor_chunk_array;
+  size_t     _survivor_chunk_index;
+  size_t     _survivor_chunk_capacity;
+  size_t*    _cursor;
+  ChunkArray* _survivor_plab_array;
+
+  // Support for marking stack overflow handling
+  bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
+  bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
+  void push_on_overflow_list(oop p);
+  void par_push_on_overflow_list(oop p);
+  // the following is, obviously, not, in general, "MT-stable"
+  bool overflow_list_is_empty() { return _overflow_list == NULL; }
+  
+  void preserve_mark_if_necessary(oop p);
+  void par_preserve_mark_if_necessary(oop p);
+  void preserve_mark_work(oop p, markOop m);
+  void restore_preserved_marks_if_any();
+  NOT_PRODUCT(bool no_preserved_marks();)
+  // in support of testing overflow code
+  NOT_PRODUCT(int _overflow_counter;)
+  NOT_PRODUCT(bool simulate_overflow();)       // sequential
+  NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
+
+  int _roots_scanning_options;
+  int roots_scanning_options() const      { return _roots_scanning_options; }
+  void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
+  void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
+
+  // CMS work methods
+  void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
+
+  // a return value of false indicates failure due to stack overflow
+  bool markFromRootsWork(bool asynch);  // concurrent marking work
+
+ public:   // FIX ME!!! only for testing
+  bool do_marking_st(bool asynch);      // single-threaded marking
+  bool do_marking_mt(bool asynch);      // multi-threaded  marking
+
+ private:
+
+  // concurrent precleaning work
+  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
+                                  ScanMarkedObjectsAgainCarefullyClosure* cl);
+  size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
+                             ScanMarkedObjectsAgainCarefullyClosure* cl);
+  // Does precleaning work, returning a quantity indicative of
+  // the amount of "useful work" done.
+  size_t preclean_work(bool clean_refs, bool clean_survivors);
+  void abortable_preclean(); // Preclean while looking for possible abort
+  void initialize_sequential_subtasks_for_young_gen_rescan(int i);
+  // Helper function for above; merge-sorts the per-thread plab samples
+  void merge_survivor_plab_arrays(ContiguousSpace* surv);
+  // Resets (i.e. clears) the per-thread plab sample vectors
+  void reset_survivor_plab_arrays();
+
+  // final (second) checkpoint work
+  void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
+                                bool init_mark_was_synchronous);
+  // work routine for parallel version of remark
+  void do_remark_parallel();
+  // work routine for non-parallel version of remark
+  void do_remark_non_parallel();
+  // reference processing work routine (during second checkpoint)
+  void refProcessingWork(bool asynch, bool clear_all_soft_refs);
+
+  // concurrent sweeping work
+  void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
+
+  // (concurrent) resetting of support data structures
+  void reset(bool asynch);
+
+  // Clear _expansion_cause fields of constituent generations
+  void clear_expansion_cause();
+
+  // An auxilliary method used to record the ends of
+  // used regions of each generation to limit the extent of sweep
+  void save_sweep_limits();
+
+  // Resize the generations included in the collector.
+  void compute_new_size();
+
+  // A work method used by foreground collection to determine
+  // what type of collection (compacting or not, continuing or fresh)
+  // it should do.
+  void decide_foreground_collection_type(bool clear_all_soft_refs,
+    bool* should_compact, bool* should_start_over);
+
+  // A work method used by the foreground collector to do
+  // a mark-sweep-compact.
+  void do_compaction_work(bool clear_all_soft_refs);
+
+  // A work method used by the foreground collector to do
+  // a mark-sweep, after taking over from a possibly on-going
+  // concurrent mark-sweep collection.
+  void do_mark_sweep_work(bool clear_all_soft_refs,
+    CollectorState first_state, bool should_start_over);
+
+  // If the backgrould GC is active, acquire control from the background
+  // GC and do the collection.
+  void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
+
+  // For synchronizing passing of control from background to foreground
+  // GC.  waitForForegroundGC() is called by the background
+  // collector.  It if had to wait for a foreground collection,
+  // it returns true and the background collection should assume
+  // that the collection was finished by the foreground
+  // collector.
+  bool waitForForegroundGC();
+
+  // Incremental mode triggering:  recompute the icms duty cycle and set the
+  // allocation limits in the young gen.
+  void icms_update_allocation_limits();
+
+  size_t block_size_using_printezis_bits(HeapWord* addr) const;
+  size_t block_size_if_printezis_bits(HeapWord* addr) const;
+  HeapWord* next_card_start_after_block(HeapWord* addr) const;
+
+  void setup_cms_unloading_and_verification_state();
+ public:
+  CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
+               ConcurrentMarkSweepGeneration* permGen,
+               CardTableRS*                   ct,
+	       ConcurrentMarkSweepPolicy*     cp);
+  ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
+
+  ReferenceProcessor* ref_processor() { return _ref_processor; }
+  void ref_processor_init();
+
+  Mutex* bitMapLock()        const { return _markBitMap.lock();    }
+  static CollectorState abstract_state() { return _collectorState;  }
+  double initiatingOccupancy() const { return _initiatingOccupancy; }
+
+  bool should_abort_preclean() const; // Whether preclean should be aborted.
+  size_t get_eden_used() const;
+  size_t get_eden_capacity() const;
+
+  ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
+
+  // locking checks
+  NOT_PRODUCT(static bool have_cms_token();)
+
+  // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
+  bool shouldConcurrentCollect();
+
+  void collect(bool   full,
+               bool   clear_all_soft_refs,
+               size_t size,
+               bool   tlab);
+  void collect_in_background(bool clear_all_soft_refs);
+  void collect_in_foreground(bool clear_all_soft_refs);
+
+  // In support of ExplicitGCInvokesConcurrent
+  static void request_full_gc(unsigned int full_gc_count);
+  // Should we unload classes in a particular concurrent cycle?
+  bool cms_should_unload_classes() const {
+    assert(!_unload_classes ||  ExplicitGCInvokesConcurrentAndUnloadsClasses,
+           "Inconsistency; see CR 6541037");
+    return _unload_classes || CMSClassUnloadingEnabled;
+  }
+
+  void direct_allocated(HeapWord* start, size_t size);
+
+  // Object is dead if not marked and current phase is sweeping.
+  bool is_dead_obj(oop obj) const;
+
+  // After a promotion (of "start"), do any necessary marking.
+  // If "par", then it's being done by a parallel GC thread.
+  // The last two args indicate if we need precise marking
+  // and if so the size of the object so it can be dirtied
+  // in its entirety.
+  void promoted(bool par, HeapWord* start,
+                bool is_obj_array, size_t obj_size);
+
+  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
+				     size_t word_size);
+
+  void getFreelistLocks() const;
+  void releaseFreelistLocks() const;
+  bool haveFreelistLocks() const;
+
+  // GC prologue and epilogue
+  void gc_prologue(bool full);
+  void gc_epilogue(bool full);
+
+  jlong time_of_last_gc(jlong now) {
+    if (_collectorState <= Idling) {
+      // gc not in progress
+      return _time_of_last_gc;
+    } else {
+      // collection in progress
+      return now;
+    }
+  }
+
+  // Support for parallel remark of survivor space
+  void* get_data_recorder(int thr_num);
+
+  CMSBitMap* markBitMap()  { return &_markBitMap; }
+  void directAllocated(HeapWord* start, size_t size);
+
+  // main CMS steps and related support
+  void checkpointRootsInitial(bool asynch);
+  bool markFromRoots(bool asynch);  // a return value of false indicates failure
+                                    // due to stack overflow
+  void preclean();
+  void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
+                            bool init_mark_was_synchronous);
+  void sweep(bool asynch);
+
+  // Check that the currently executing thread is the expected
+  // one (foreground collector or background collector).
+  void check_correct_thread_executing()        PRODUCT_RETURN;
+  // XXXPERM void print_statistics()           PRODUCT_RETURN;
+
+  bool is_cms_reachable(HeapWord* addr);
+
+  // Performance Counter Support
+  CollectorCounters* counters()    { return _gc_counters; }
+
+  // timer stuff
+  void    startTimer() { _timer.start();   }
+  void    stopTimer()  { _timer.stop();    }
+  void    resetTimer() { _timer.reset();   }
+  double  timerValue() { return _timer.seconds(); }
+
+  int  yields()          { return _numYields; }
+  void resetYields()     { _numYields = 0;    }
+  void incrementYields() { _numYields++;      }
+  void resetNumDirtyCards()               { _numDirtyCards = 0; }
+  void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
+  size_t  numDirtyCards()                 { return _numDirtyCards; }
+
+  static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
+  static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
+  static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
+  static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
+  uint  sweepCount() const             { return _sweepCount; }
+  void incrementSweepCount()           { _sweepCount++; }
+
+  // Timers/stats for gc scheduling and incremental mode pacing.
+  CMSStats& stats() { return _stats; }
+
+  // Convenience methods that check whether CMSIncrementalMode is enabled and
+  // forward to the corresponding methods in ConcurrentMarkSweepThread.
+  static void start_icms();
+  static void stop_icms();    // Called at the end of the cms cycle.
+  static void disable_icms(); // Called before a foreground collection.
+  static void enable_icms();  // Called after a foreground collection.
+  void icms_wait();	     // Called at yield points.
+
+  // Adaptive size policy
+  CMSAdaptiveSizePolicy* size_policy();
+  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
+
+  // debugging
+  void verify(bool);
+  bool verify_after_remark();
+  void verify_ok_to_terminate() const PRODUCT_RETURN;
+
+  // convenience methods in support of debugging
+  static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
+  HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
+
+  // accessors
+  CMSMarkStack* verification_mark_stack() { return &_markStack; }
+  CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
+
+  // Get the bit map with a perm gen "deadness" information.
+  CMSBitMap* perm_gen_verify_bit_map()       { return &_perm_gen_verify_bit_map; }
+
+  // Initialization errors
+  bool completed_initialization() { return _completed_initialization; }
+};
+
+class CMSExpansionCause : public AllStatic  {
+ public:
+  enum Cause {
+    _no_expansion,
+    _satisfy_free_ratio,
+    _satisfy_promotion,
+    _satisfy_allocation,
+    _allocate_par_lab,
+    _allocate_par_spooling_space,
+    _adaptive_size_policy
+  };
+  // Return a string describing the cause of the expansion.
+  static const char* to_string(CMSExpansionCause::Cause cause);
+};
+
+class ConcurrentMarkSweepGeneration: public CardGeneration {
+  friend class VMStructs;
+  friend class ConcurrentMarkSweepThread;
+  friend class ConcurrentMarkSweep;
+  friend class CMSCollector;
+ protected:
+  static CMSCollector*       _collector; // the collector that collects us
+  CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
+
+  // Performance Counters
+  GenerationCounters*      _gen_counters;
+  GSpaceCounters*          _space_counters;
+
+  // Words directly allocated, used by CMSStats.
+  size_t _direct_allocated_words;
+
+  // Non-product stat counters
+  NOT_PRODUCT(
+    int _numObjectsPromoted;
+    int _numWordsPromoted;
+    int _numObjectsAllocated;
+    int _numWordsAllocated;
+  )
+
+  // Used for sizing decisions
+  bool _incremental_collection_failed;
+  bool incremental_collection_failed() {
+    return _incremental_collection_failed;
+  }
+  void set_incremental_collection_failed() {
+    _incremental_collection_failed = true;
+  }
+  void clear_incremental_collection_failed() {
+    _incremental_collection_failed = false;
+  }
+
+ private:
+  // For parallel young-gen GC support.
+  CMSParGCThreadState** _par_gc_thread_states;
+
+  // Reason generation was expanded
+  CMSExpansionCause::Cause _expansion_cause;
+
+  // accessors
+  void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
+  CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
+
+  // In support of MinChunkSize being larger than min object size
+  const double _dilatation_factor;
+
+  enum CollectionTypes {
+    Concurrent_collection_type		= 0,
+    MS_foreground_collection_type	= 1,
+    MSC_foreground_collection_type	= 2,
+    Unknown_collection_type		= 3
+  };
+
+  CollectionTypes _debug_collection_type;
+
+ protected:
+  // Grow generation by specified size (returns false if unable to grow)
+  bool grow_by(size_t bytes);
+  // Grow generation to reserved size.
+  bool grow_to_reserved();
+  // Shrink generation by specified size (returns false if unable to shrink)
+  virtual void shrink_by(size_t bytes);
+
+  // Update statistics for GC
+  virtual void update_gc_stats(int level, bool full);
+
+  // Maximum available space in the generation (including uncommitted)
+  // space.
+  size_t max_available() const;
+
+ public:
+  ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
+                                int level, CardTableRS* ct,
+				bool use_adaptive_freelists,
+                                FreeBlockDictionary::DictionaryChoice);
+
+  // Accessors
+  CMSCollector* collector() const { return _collector; }
+  static void set_collector(CMSCollector* collector) {
+    assert(_collector == NULL, "already set");
+    _collector = collector;
+  }
+  CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
+  
+  Mutex* freelistLock() const;
+
+  virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
+
+  // Adaptive size policy
+  CMSAdaptiveSizePolicy* size_policy();
+
+  bool refs_discovery_is_atomic() const { return false; }
+  bool refs_discovery_is_mt()     const {
+    // Note: CMS does MT-discovery during the parallel-remark
+    // phases. Use ReferenceProcessorMTMutator to make refs
+    // discovery MT-safe during such phases or other parallel
+    // discovery phases in the future. This may all go away
+    // if/when we decide that refs discovery is sufficiently
+    // rare that the cost of the CAS's involved is in the
+    // noise. That's a measurement that should be done, and
+    // the code simplified if that turns out to be the case.
+    return false;
+  }
+
+  // Override
+  virtual void ref_processor_init();
+
+  void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
+
+  // Space enquiries
+  size_t capacity() const;
+  size_t used() const;
+  size_t free() const;
+  double occupancy()      { return ((double)used())/((double)capacity()); }
+  size_t contiguous_available() const;
+  size_t unsafe_max_alloc_nogc() const;
+
+  // over-rides
+  MemRegion used_region() const;
+  MemRegion used_region_at_save_marks() const;
+
+  // Does a "full" (forced) collection invoked on this generation collect
+  // all younger generations as well? Note that the second conjunct is a
+  // hack to allow the collection of the younger gen first if the flag is
+  // set. This is better than using th policy's should_collect_gen0_first()
+  // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
+  virtual bool full_collects_younger_generations() const {
+    return UseCMSCompactAtFullCollection && !CollectGen0First;
+  }
+
+  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
+
+  // Support for compaction
+  CompactibleSpace* first_compaction_space() const;
+  // Adjust quantites in the generation affected by
+  // the compaction.
+  void reset_after_compaction();
+
+  // Allocation support
+  HeapWord* allocate(size_t size, bool tlab);
+  HeapWord* have_lock_and_allocate(size_t size, bool tlab);
+  oop       promote(oop obj, size_t obj_size, oop* ref);
+  HeapWord* par_allocate(size_t size, bool tlab) {
+    return allocate(size, tlab);
+  }
+
+  // Incremental mode triggering.
+  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
+				     size_t word_size);
+
+  // Used by CMSStats to track direct allocation.  The value is sampled and
+  // reset after each young gen collection.
+  size_t direct_allocated_words() const { return _direct_allocated_words; }
+  void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
+
+  // Overrides for parallel promotion.
+  virtual oop par_promote(int thread_num,
+			  oop obj, markOop m, size_t word_sz);
+  // This one should not be called for CMS.
+  virtual void par_promote_alloc_undo(int thread_num,
+				      HeapWord* obj, size_t word_sz);
+  virtual void par_promote_alloc_done(int thread_num);
+  virtual void par_oop_since_save_marks_iterate_done(int thread_num);
+
+  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
+    bool younger_handles_promotion_failure) const;
+
+  bool should_collect(bool full, size_t size, bool tlab);
+    // XXXPERM
+  bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM
+  void collect(bool   full,
+               bool   clear_all_soft_refs,
+               size_t size,
+               bool   tlab);
+
+  HeapWord* expand_and_allocate(size_t word_size,
+				bool tlab,
+				bool parallel = false);
+
+  // GC prologue and epilogue
+  void gc_prologue(bool full);
+  void gc_prologue_work(bool full, bool registerClosure,
+                        ModUnionClosure* modUnionClosure);
+  void gc_epilogue(bool full);
+  void gc_epilogue_work(bool full);
+
+  // Time since last GC of this generation
+  jlong time_of_last_gc(jlong now) {
+    return collector()->time_of_last_gc(now);
+  }
+  void update_time_of_last_gc(jlong now) {
+    collector()-> update_time_of_last_gc(now);
+  }
+
+  // Allocation failure
+  void expand(size_t bytes, size_t expand_bytes, 
+    CMSExpansionCause::Cause cause);
+  void shrink(size_t bytes);
+  HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
+  bool expand_and_ensure_spooling_space(PromotionInfo* promo);
+
+  // Iteration support and related enquiries
+  void save_marks();
+  bool no_allocs_since_save_marks();
+  void object_iterate_since_last_GC(ObjectClosure* cl);
+  void younger_refs_iterate(OopsInGenClosure* cl);
+
+  // Iteration support specific to CMS generations
+  void save_sweep_limit();
+
+  // More iteration support
+  virtual void oop_iterate(MemRegion mr, OopClosure* cl);
+  virtual void oop_iterate(OopClosure* cl);
+  virtual void object_iterate(ObjectClosure* cl);
+
+  // Need to declare the full complement of closures, whether we'll
+  // override them or not, or get message from the compiler:
+  //   oop_since_save_marks_iterate_nv hides virtual function...
+  #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
+    void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
+  ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
+
+  // Smart allocation  XXX -- move to CFLSpace?
+  void setNearLargestChunk();
+  bool isNearLargestChunk(HeapWord* addr);
+
+  // Get the chunk at the end of the space.  Delagates to
+  // the space.
+  FreeChunk* find_chunk_at_end(); 
+
+  // Overriding of unused functionality (sharing not yet supported with CMS)
+  void pre_adjust_pointers();
+  void post_compact();
+
+  // Debugging
+  void prepare_for_verify();
+  void verify(bool allow_dirty);
+  void print_statistics()               PRODUCT_RETURN;
+
+  // Performance Counters support
+  virtual void update_counters();
+  virtual void update_counters(size_t used);
+  void initialize_performance_counters();
+  CollectorCounters* counters()  { return collector()->counters(); }
+
+  // Support for parallel remark of survivor space
+  void* get_data_recorder(int thr_num) {
+    //Delegate to collector
+    return collector()->get_data_recorder(thr_num);
+  }
+
+  // Printing
+  const char* name() const;
+  virtual const char* short_name() const { return "CMS"; }
+  void        print() const;
+  void printOccupancy(const char* s);
+  bool must_be_youngest() const { return false; }
+  bool must_be_oldest()   const { return true; }
+
+  void compute_new_size();
+
+  CollectionTypes debug_collection_type() { return _debug_collection_type; }
+  void rotate_debug_collection_type();
+};
+
+class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
+
+  // Return the size policy from the heap's collector
+  // policy casted to CMSAdaptiveSizePolicy*.
+  CMSAdaptiveSizePolicy* cms_size_policy() const;
+
+  // Resize the generation based on the adaptive size
+  // policy.
+  void resize(size_t cur_promo, size_t desired_promo);
+
+  // Return the GC counters from the collector policy
+  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
+
+  virtual void shrink_by(size_t bytes);
+
+ public:
+  virtual void compute_new_size();
+  ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
+                                  int level, CardTableRS* ct,
+				  bool use_adaptive_freelists,
+                                  FreeBlockDictionary::DictionaryChoice 
+				    dictionaryChoice) :
+    ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
+      use_adaptive_freelists, dictionaryChoice) {}
+
+  virtual const char* short_name() const { return "ASCMS"; }
+  virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
+
+  virtual void update_counters();
+  virtual void update_counters(size_t used);
+};
+
+//
+// Closures of various sorts used by CMS to accomplish its work
+//
+
+// This closure is used to check that a certain set of oops is empty.
+class FalseClosure: public OopClosure {
+ public:
+  void do_oop(oop* p) {
+    guarantee(false, "Should be an empty set");
+  }
+};
+
+// This closure is used to do concurrent marking from the roots
+// following the first checkpoint. 
+class MarkFromRootsClosure: public BitMapClosure {
+  CMSCollector*  _collector;
+  MemRegion      _span;
+  CMSBitMap*     _bitMap;
+  CMSBitMap*     _mut;
+  CMSMarkStack*  _markStack;
+  CMSMarkStack*  _revisitStack;
+  bool           _yield;
+  int            _skipBits;
+  HeapWord*      _finger;
+  HeapWord*      _threshold;
+  DEBUG_ONLY(bool _verifying;)
+
+ public:
+  MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
+                       CMSBitMap* bitMap,
+                       CMSMarkStack*  markStack,
+                       CMSMarkStack*  revisitStack,
+                       bool should_yield, bool verifying = false);
+  void do_bit(size_t offset);
+  void reset(HeapWord* addr);
+  inline void do_yield_check();
+
+ private:
+  void scanOopsInOop(HeapWord* ptr);
+  void do_yield_work();
+};
+
+// This closure is used to do concurrent multi-threaded
+// marking from the roots following the first checkpoint. 
+// XXX This should really be a subclass of The serial version
+// above, but i have not had the time to refactor things cleanly.
+// That willbe done for Dolphin.
+class Par_MarkFromRootsClosure: public BitMapClosure {
+  CMSCollector*  _collector;
+  MemRegion      _whole_span;
+  MemRegion      _span;
+  CMSBitMap*     _bit_map;
+  CMSBitMap*     _mut;
+  OopTaskQueue*  _work_queue;
+  CMSMarkStack*  _overflow_stack;
+  CMSMarkStack*  _revisit_stack;
+  bool           _yield;
+  int            _skip_bits;
+  HeapWord*      _finger;
+  HeapWord*      _threshold;
+  CMSConcMarkingTask* _task;
+ public:
+  Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
+                       MemRegion span,
+                       CMSBitMap* bit_map,
+                       OopTaskQueue* work_queue,
+                       CMSMarkStack*  overflow_stack,
+                       CMSMarkStack*  revisit_stack,
+                       bool should_yield);
+  void do_bit(size_t offset);
+  inline void do_yield_check();
+
+ private:
+  void scan_oops_in_oop(HeapWord* ptr);
+  void do_yield_work();
+  bool get_work_from_overflow_stack();
+};
+
+// The following closures are used to do certain kinds of verification of
+// CMS marking.
+class PushAndMarkVerifyClosure: public OopClosure {
+  CMSCollector*    _collector;
+  MemRegion        _span;
+  CMSBitMap*       _verification_bm;
+  CMSBitMap*       _cms_bm;
+  CMSMarkStack*    _mark_stack;
+ public:
+  PushAndMarkVerifyClosure(CMSCollector* cms_collector,
+                           MemRegion span,
+                           CMSBitMap* verification_bm,
+                           CMSBitMap* cms_bm,
+                           CMSMarkStack*  mark_stack);
+  void do_oop(oop* p);
+  // Deal with a stack overflow condition
+  void handle_stack_overflow(HeapWord* lost);
+};
+
+class MarkFromRootsVerifyClosure: public BitMapClosure {
+  CMSCollector*  _collector;
+  MemRegion      _span;
+  CMSBitMap*     _verification_bm;
+  CMSBitMap*     _cms_bm;
+  CMSMarkStack*  _mark_stack;
+  HeapWord*      _finger;
+  PushAndMarkVerifyClosure _pam_verify_closure;
+ public:
+  MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
+                             CMSBitMap* verification_bm,
+                             CMSBitMap* cms_bm,
+                             CMSMarkStack*  mark_stack);
+  void do_bit(size_t offset);
+  void reset(HeapWord* addr);
+};
+
+
+// This closure is used to check that a certain set of bits is
+// "empty" (i.e. the bit vector doesn't have any 1-bits).
+class FalseBitMapClosure: public BitMapClosure {
+ public:
+  void do_bit(size_t offset) {
+    guarantee(false, "Should not have a 1 bit"); 
+  }
+};
+
+// This closure is used during the second checkpointing phase
+// to rescan the marked objects on the dirty cards in the mod
+// union table and the card table proper. It's invoked via
+// MarkFromDirtyCardsClosure below. It uses either
+// [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
+// declared in genOopClosures.hpp to accomplish some of its work.
+// In the parallel case the bitMap is shared, so access to
+// it needs to be suitably synchronized for updates by embedded
+// closures that update it; however, this closure itself only
+// reads the bit_map and because it is idempotent, is immune to
+// reading stale values.
+class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
+  #ifdef ASSERT
+    CMSCollector*          _collector;
+    MemRegion              _span;
+    union {
+      CMSMarkStack*        _mark_stack;
+      OopTaskQueue*        _work_queue;
+    };
+  #endif // ASSERT
+  bool                       _parallel;
+  CMSBitMap*                 _bit_map;
+  union {
+    MarkRefsIntoAndScanClosure*     _scan_closure;
+    Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
+  };
+
+ public:
+  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
+                                MemRegion span,
+                                ReferenceProcessor* rp,
+                                CMSBitMap* bit_map,
+                                CMSMarkStack*  mark_stack,
+                                CMSMarkStack*  revisit_stack,
+                                MarkRefsIntoAndScanClosure* cl):
+    #ifdef ASSERT
+      _collector(collector),
+      _span(span),
+      _mark_stack(mark_stack),
+    #endif // ASSERT
+    _parallel(false),
+    _bit_map(bit_map),
+    _scan_closure(cl) { }
+
+  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
+                                MemRegion span,
+                                ReferenceProcessor* rp,
+                                CMSBitMap* bit_map,
+                                OopTaskQueue* work_queue,
+                                CMSMarkStack* revisit_stack,
+                                Par_MarkRefsIntoAndScanClosure* cl):
+    #ifdef ASSERT
+      _collector(collector),
+      _span(span),
+      _work_queue(work_queue),
+    #endif // ASSERT
+    _parallel(true),
+    _bit_map(bit_map),
+    _par_scan_closure(cl) { }
+                                
+  void do_object(oop obj) {
+    guarantee(false, "Call do_object_b(oop, MemRegion) instead");
+  }
+  bool do_object_b(oop obj) {
+    guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
+    return false;
+  }
+  bool do_object_bm(oop p, MemRegion mr);
+};
+
+// This closure is used during the second checkpointing phase
+// to rescan the marked objects on the dirty cards in the mod
+// union table and the card table proper. It invokes
+// ScanMarkedObjectsAgainClosure above to accomplish much of its work.
+// In the parallel case, the bit map is shared and requires
+// synchronized access.
+class MarkFromDirtyCardsClosure: public MemRegionClosure {
+  CompactibleFreeListSpace*      _space;
+  ScanMarkedObjectsAgainClosure  _scan_cl;
+  size_t                         _num_dirty_cards;
+
+ public:
+  MarkFromDirtyCardsClosure(CMSCollector* collector,
+                            MemRegion span,
+                            CompactibleFreeListSpace* space,
+                            CMSBitMap* bit_map,
+                            CMSMarkStack* mark_stack,
+                            CMSMarkStack* revisit_stack,
+                            MarkRefsIntoAndScanClosure* cl):
+    _space(space),
+    _num_dirty_cards(0),
+    _scan_cl(collector, span, collector->ref_processor(), bit_map,
+                 mark_stack, revisit_stack, cl) { }
+
+  MarkFromDirtyCardsClosure(CMSCollector* collector,
+                            MemRegion span,
+                            CompactibleFreeListSpace* space,
+                            CMSBitMap* bit_map,
+                            OopTaskQueue* work_queue,
+                            CMSMarkStack* revisit_stack,
+                            Par_MarkRefsIntoAndScanClosure* cl):
+    _space(space),
+    _num_dirty_cards(0),
+    _scan_cl(collector, span, collector->ref_processor(), bit_map,
+             work_queue, revisit_stack, cl) { }
+
+  void do_MemRegion(MemRegion mr);
+  void set_space(CompactibleFreeListSpace* space) { _space = space; }
+  size_t num_dirty_cards() { return _num_dirty_cards; }
+};
+
+// This closure is used in the non-product build to check
+// that there are no MemRegions with a certain property.
+class FalseMemRegionClosure: public MemRegionClosure {
+  void do_MemRegion(MemRegion mr) {
+    guarantee(!mr.is_empty(), "Shouldn't be empty");
+    guarantee(false, "Should never be here");
+  }
+};
+
+// This closure is used during the precleaning phase
+// to "carefully" rescan marked objects on dirty cards.
+// It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
+// to accomplish some of its work.
+class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
+  CMSCollector*                  _collector;
+  MemRegion                      _span;
+  bool                           _yield;
+  Mutex*                         _freelistLock;
+  CMSBitMap*                     _bitMap;
+  CMSMarkStack*                  _markStack;
+  MarkRefsIntoAndScanClosure*    _scanningClosure;
+
+ public:
+  ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
+                                         MemRegion     span,
+                                         CMSBitMap* bitMap,
+                                         CMSMarkStack*  markStack,
+                                         CMSMarkStack*  revisitStack,
+                                         MarkRefsIntoAndScanClosure* cl,
+                                         bool should_yield):
+    _collector(collector),
+    _span(span),
+    _yield(should_yield),
+    _bitMap(bitMap),
+    _markStack(markStack),
+    _scanningClosure(cl) {
+  }
+  
+  void do_object(oop p) {
+    guarantee(false, "call do_object_careful instead");
+  }
+
+  size_t      do_object_careful(oop p) {
+    guarantee(false, "Unexpected caller");
+    return 0;
+  }
+
+  size_t      do_object_careful_m(oop p, MemRegion mr);
+
+  void setFreelistLock(Mutex* m) {
+    _freelistLock = m;
+    _scanningClosure->set_freelistLock(m);
+  }
+
+ private:
+  inline bool do_yield_check();
+
+  void do_yield_work();
+};
+
+class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
+  CMSCollector*                  _collector;
+  MemRegion                      _span;
+  bool                           _yield;
+  CMSBitMap*                     _bit_map;
+  CMSMarkStack*                  _mark_stack;
+  PushAndMarkClosure*            _scanning_closure;
+  unsigned int                   _before_count;
+
+ public:
+  SurvivorSpacePrecleanClosure(CMSCollector* collector,
+                               MemRegion     span,
+                               CMSBitMap*    bit_map,
+                               CMSMarkStack* mark_stack,
+                               PushAndMarkClosure* cl,
+                               unsigned int  before_count,
+                               bool          should_yield):
+    _collector(collector),
+    _span(span),
+    _yield(should_yield),
+    _bit_map(bit_map),
+    _mark_stack(mark_stack),
+    _scanning_closure(cl),
+    _before_count(before_count)
+  { }
+
+  void do_object(oop p) {
+    guarantee(false, "call do_object_careful instead");
+  }
+
+  size_t      do_object_careful(oop p);
+
+  size_t      do_object_careful_m(oop p, MemRegion mr) {
+    guarantee(false, "Unexpected caller");
+    return 0;
+  }
+
+ private:
+  inline void do_yield_check();
+  void do_yield_work();
+};
+
+// This closure is used to accomplish the sweeping work
+// after the second checkpoint but before the concurrent reset
+// phase.
+// 
+// Terminology
+//   left hand chunk (LHC) - block of one or more chunks currently being
+//     coalesced.  The LHC is available for coalescing with a new chunk.
+//   right hand chunk (RHC) - block that is currently being swept that is
+//     free or garbage that can be coalesced with the LHC.
+// _inFreeRange is true if there is currently a LHC
+// _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
+// _freeRangeInFreeLists is true if the LHC is in the free lists.
+// _freeFinger is the address of the current LHC
+class SweepClosure: public BlkClosureCareful {
+  CMSCollector*                  _collector;  // collector doing the work
+  ConcurrentMarkSweepGeneration* _g;	// Generation being swept
+  CompactibleFreeListSpace*      _sp;	// Space being swept
+  HeapWord*                      _limit;
+  Mutex*                         _freelistLock;	// Free list lock (in space)
+  CMSBitMap*                     _bitMap;	// Marking bit map (in 
+						// generation)
+  bool                           _inFreeRange;	// Indicates if we are in the
+						// midst of a free run
+  bool				 _freeRangeInFreeLists;	
+					// Often, we have just found
+					// a free chunk and started
+					// a new free range; we do not
+					// eagerly remove this chunk from
+					// the free lists unless there is
+					// a possibility of coalescing.
+					// When true, this flag indicates
+					// that the _freeFinger below
+					// points to a potentially free chunk
+					// that may still be in the free lists
+  bool				 _lastFreeRangeCoalesced;
+					// free range contains chunks
+					// coalesced
+  bool                           _yield;	
+					// Whether sweeping should be 
+					// done with yields. For instance 
+					// when done by the foreground 
+					// collector we shouldn't yield.
+  HeapWord*                      _freeFinger;	// When _inFreeRange is set, the
+						// pointer to the "left hand 
+						// chunk"
+  size_t			 _freeRangeSize; 
+					// When _inFreeRange is set, this 
+					// indicates the accumulated size 
+					// of the "left hand chunk"
+  NOT_PRODUCT(
+    size_t		         _numObjectsFreed;
+    size_t		         _numWordsFreed;
+    size_t			 _numObjectsLive;
+    size_t			 _numWordsLive;
+    size_t			 _numObjectsAlreadyFree;
+    size_t			 _numWordsAlreadyFree;
+    FreeChunk*			 _last_fc;
+  )
+ private:
+  // Code that is common to a free chunk or garbage when
+  // encountered during sweeping.
+  void doPostIsFreeOrGarbageChunk(FreeChunk *fc, 
+				  size_t chunkSize);
+  // Process a free chunk during sweeping.
+  void doAlreadyFreeChunk(FreeChunk *fc);
+  // Process a garbage chunk during sweeping.
+  size_t doGarbageChunk(FreeChunk *fc);
+  // Process a live chunk during sweeping.
+  size_t doLiveChunk(FreeChunk* fc);
+
+  // Accessors.
+  HeapWord* freeFinger() const	 	{ return _freeFinger; }
+  void set_freeFinger(HeapWord* v)  	{ _freeFinger = v; }
+  size_t freeRangeSize() const	 	{ return _freeRangeSize; }
+  void set_freeRangeSize(size_t v)  	{ _freeRangeSize = v; }
+  bool inFreeRange() 	const	 	{ return _inFreeRange; }
+  void set_inFreeRange(bool v)  	{ _inFreeRange = v; }
+  bool lastFreeRangeCoalesced()	const	 { return _lastFreeRangeCoalesced; }
+  void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
+  bool freeRangeInFreeLists() const	{ return _freeRangeInFreeLists; }
+  void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
+
+  // Initialize a free range.
+  void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
+  // Return this chunk to the free lists.
+  void flushCurFreeChunk(HeapWord* chunk, size_t size);
+
+  // Check if we should yield and do so when necessary.
+  inline void do_yield_check(HeapWord* addr);
+
+  // Yield
+  void do_yield_work(HeapWord* addr);
+
+  // Debugging/Printing
+  void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
+
+ public:
+  SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
+               CMSBitMap* bitMap, bool should_yield);
+  ~SweepClosure();
+
+  size_t       do_blk_careful(HeapWord* addr);
+};
+
+// Closures related to weak references processing
+
+// During CMS' weak reference processing, this is a
+// work-routine/closure used to complete transitive
+// marking of objects as live after a certain point
+// in which an initial set has been completely accumulated.
+class CMSDrainMarkingStackClosure: public VoidClosure {
+  CMSCollector*        _collector;
+  MemRegion            _span;
+  CMSMarkStack*        _mark_stack;
+  CMSBitMap*           _bit_map;
+  CMSKeepAliveClosure* _keep_alive;
+ public:
+  CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
+                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
+                      CMSKeepAliveClosure* keep_alive):
+    _collector(collector),
+    _span(span),
+    _bit_map(bit_map),
+    _mark_stack(mark_stack),
+    _keep_alive(keep_alive) { }
+
+  void do_void();
+};
+
+// A parallel version of CMSDrainMarkingStackClosure above.
+class CMSParDrainMarkingStackClosure: public VoidClosure {
+  CMSCollector*           _collector;
+  MemRegion               _span;
+  OopTaskQueue*           _work_queue;
+  CMSBitMap*              _bit_map;
+  CMSInnerParMarkAndPushClosure _mark_and_push;
+
+ public:
+  CMSParDrainMarkingStackClosure(CMSCollector* collector,
+                                 MemRegion span, CMSBitMap* bit_map,
+                                 OopTaskQueue* work_queue):
+    _collector(collector),
+    _span(span),
+    _bit_map(bit_map),
+    _work_queue(work_queue),
+    _mark_and_push(collector, span, bit_map, work_queue) { }
+
+ public:
+  void trim_queue(uint max);
+  void do_void();
+};
+
+// Allow yielding or short-circuiting of reference list
+// prelceaning work.
+class CMSPrecleanRefsYieldClosure: public YieldClosure {
+  CMSCollector* _collector;
+  void do_yield_work();
+ public:
+  CMSPrecleanRefsYieldClosure(CMSCollector* collector):
+    _collector(collector) {}
+  virtual bool should_return();
+};
+
+
+// Convenience class that locks free list locks for given CMS collector
+class FreelistLocker: public StackObj {
+ private:
+  CMSCollector* _collector;
+ public:
+  FreelistLocker(CMSCollector* collector):
+    _collector(collector) {
+    _collector->getFreelistLocks();
+  }
+
+  ~FreelistLocker() {
+    _collector->releaseFreelistLocks();
+  }
+};
+
+// Mark all dead objects in a given space.
+class MarkDeadObjectsClosure: public BlkClosure {
+  const CMSCollector*             _collector;
+  const CompactibleFreeListSpace* _sp;
+  CMSBitMap*                      _live_bit_map;
+  CMSBitMap*                      _dead_bit_map;
+public:
+  MarkDeadObjectsClosure(const CMSCollector* collector,
+                         const CompactibleFreeListSpace* sp, 
+                         CMSBitMap *live_bit_map,
+                         CMSBitMap *dead_bit_map) :
+    _collector(collector),
+    _sp(sp),
+    _live_bit_map(live_bit_map),
+    _dead_bit_map(dead_bit_map) {}
+  size_t do_blk(HeapWord* addr);
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,510 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)concurrentMarkSweepGeneration.inline.hpp	1.47 07/05/17 15:52:12 JVM"
+#endif
+/*
+ * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+inline void CMSBitMap::clear_all() {
+  assert_locked();
+  // CMS bitmaps are usually cover large memory regions
+  _bm.clear_large();
+  return;
+}
+
+inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
+  return (pointer_delta(addr, _bmStartWord)) >> _shifter;
+}
+
+inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
+  return _bmStartWord + (offset << _shifter);
+}
+
+inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
+  assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
+  return diff >> _shifter;
+}
+
+inline void CMSBitMap::mark(HeapWord* addr) {
+  assert_locked();
+  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
+         "outside underlying space?");
+  _bm.set_bit(heapWordToOffset(addr));
+}
+
+inline bool CMSBitMap::par_mark(HeapWord* addr) {
+  assert_locked();
+  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
+         "outside underlying space?");
+  return _bm.par_at_put(heapWordToOffset(addr), true);
+}
+
+inline void CMSBitMap::par_clear(HeapWord* addr) {
+  assert_locked();
+  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
+         "outside underlying space?");
+  _bm.par_at_put(heapWordToOffset(addr), false);
+}
+
+inline void CMSBitMap::mark_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size is usually just 1 bit.
+  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                BitMap::small_range);
+}
+
+inline void CMSBitMap::clear_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size is usually just 1 bit.
+  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                  BitMap::small_range);
+}
+
+inline void CMSBitMap::par_mark_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size is usually just 1 bit.
+  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                    BitMap::small_range);
+}
+
+inline void CMSBitMap::par_clear_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size is usually just 1 bit.
+  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                      BitMap::small_range);
+}
+
+inline void CMSBitMap::mark_large_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size must be greater than 32 bytes.
+  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                BitMap::large_range);
+}
+
+inline void CMSBitMap::clear_large_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size must be greater than 32 bytes.
+  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                  BitMap::large_range);
+}
+
+inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size must be greater than 32 bytes.
+  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                    BitMap::large_range);
+}
+
+inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
+  NOT_PRODUCT(region_invariant(mr));
+  // Range size must be greater than 32 bytes.
+  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
+                      BitMap::large_range);
+}
+
+// Starting at "addr" (inclusive) return a memory region
+// corresponding to the first maximally contiguous marked ("1") region.
+inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
+  return getAndClearMarkedRegion(addr, endWord());
+}
+
+// Starting at "start_addr" (inclusive) return a memory region
+// corresponding to the first maximal contiguous marked ("1") region
+// strictly less than end_addr.
+inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
+                                                    HeapWord* end_addr) {
+  HeapWord *start, *end;
+  assert_locked();
+  start = getNextMarkedWordAddress  (start_addr, end_addr);
+  end   = getNextUnmarkedWordAddress(start,      end_addr);
+  assert(start <= end, "Consistency check");
+  MemRegion mr(start, end);
+  if (!mr.is_empty()) {
+    clear_range(mr);
+  }
+  return mr;
+}
+
+inline bool CMSBitMap::isMarked(HeapWord* addr) const {
+  assert_locked();
+  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
+         "outside underlying space?");
+  return _bm.at(heapWordToOffset(addr));
+}
+
+// The same as isMarked() but without a lock check.
+inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
+  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
+         "outside underlying space?");
+  return _bm.at(heapWordToOffset(addr));
+}
+
+
+inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
+  assert_locked();
+  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
+         "outside underlying space?");
+  return !_bm.at(heapWordToOffset(addr));
+}
+
+// Return the HeapWord address corresponding to next "1" bit
+// (inclusive).
+inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
+  return getNextMarkedWordAddress(addr, endWord());
+}
+
+// Return the least HeapWord address corresponding to next "1" bit
+// starting at start_addr (inclusive) but strictly less than end_addr.
+inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
+  HeapWord* start_addr, HeapWord* end_addr) const {
+  assert_locked();
+  size_t nextOffset = _bm.get_next_one_offset(
+                        heapWordToOffset(start_addr),
+                        heapWordToOffset(end_addr));
+  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
+  assert(nextAddr >= start_addr &&
+         nextAddr <= end_addr, "get_next_one postcondition");
+  assert((nextAddr == end_addr) ||
+         isMarked(nextAddr), "get_next_one postcondition");
+  return nextAddr;
+}
+
+
+// Return the HeapWord address corrsponding to the next "0" bit
+// (inclusive).
+inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
+  return getNextUnmarkedWordAddress(addr, endWord());
+}
+
+// Return the HeapWord address corrsponding to the next "0" bit
+// (inclusive).
+inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
+  HeapWord* start_addr, HeapWord* end_addr) const {
+  assert_locked();
+  size_t nextOffset = _bm.get_next_zero_offset(
+                        heapWordToOffset(start_addr),
+                        heapWordToOffset(end_addr));
+  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
+  assert(nextAddr >= start_addr &&
+         nextAddr <= end_addr, "get_next_zero postcondition");
+  assert((nextAddr == end_addr) ||
+          isUnmarked(nextAddr), "get_next_zero postcondition");
+  return nextAddr;
+}
+
+inline bool CMSBitMap::isAllClear() const {
+  assert_locked();
+  return getNextMarkedWordAddress(startWord()) >= endWord();
+}
+
+inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
+                            HeapWord* right) {
+  assert_locked();
+  left = MAX2(_bmStartWord, left);
+  right = MIN2(_bmStartWord + _bmWordSize, right);
+  if (right > left) {
+    _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
+  }
+}
+
+inline void CMSCollector::start_icms() {
+  if (CMSIncrementalMode) {
+    ConcurrentMarkSweepThread::start_icms();
+  }
+}
+
+inline void CMSCollector::stop_icms() {
+  if (CMSIncrementalMode) {
+    ConcurrentMarkSweepThread::stop_icms();
+  }
+}
+
+inline void CMSCollector::disable_icms() {
+  if (CMSIncrementalMode) {
+    ConcurrentMarkSweepThread::disable_icms();
+  }
+}
+
+inline void CMSCollector::enable_icms() {
+  if (CMSIncrementalMode) {
+    ConcurrentMarkSweepThread::enable_icms();
+  }
+}
+
+inline void CMSCollector::icms_wait() {
+  if (CMSIncrementalMode) {
+    cmsThread()->icms_wait();
+  }
+}
+
+inline void CMSCollector::save_sweep_limits() {
+  _cmsGen->save_sweep_limit();
+  _permGen->save_sweep_limit();
+}
+
+inline bool CMSCollector::is_dead_obj(oop obj) const {
+  HeapWord* addr = (HeapWord*)obj;
+  assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
+	  && _cmsGen->cmsSpace()->block_is_obj(addr))
+	 ||
+         (_permGen->cmsSpace()->is_in_reserved(addr)
+	  && _permGen->cmsSpace()->block_is_obj(addr)),
+	 "must be object");
+  return  cms_should_unload_classes() &&
+          _collectorState == Sweeping &&
+         !_markBitMap.isMarked(addr);
+}
+
+inline bool CMSCollector::should_abort_preclean() const {
+  // We are in the midst of an "abortable preclean" and either
+  // scavenge is done or foreground GC wants to take over collection
+  return _collectorState == AbortablePreclean &&
+         (_abort_preclean || _foregroundGCIsActive ||
+          GenCollectedHeap::heap()->incremental_collection_will_fail());
+}
+
+inline size_t CMSCollector::get_eden_used() const {
+  return _young_gen->as_DefNewGeneration()->eden()->used();
+}
+
+inline size_t CMSCollector::get_eden_capacity() const {
+  return _young_gen->as_DefNewGeneration()->eden()->capacity();
+}
+
+inline bool CMSStats::valid() const {
+  return _valid_bits == _ALL_VALID;
+}
+
+inline void CMSStats::record_gc0_begin() {
+  if (_gc0_begin_time.is_updated()) {
+    float last_gc0_period = _gc0_begin_time.seconds();
+    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period, 
+      last_gc0_period, _gc0_alpha);
+    _gc0_alpha = _saved_alpha;
+    _valid_bits |= _GC0_VALID;
+  }
+  _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
+
+  _gc0_begin_time.update();
+}
+
+inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
+  float last_gc0_duration = _gc0_begin_time.seconds();
+  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration, 
+    last_gc0_duration, _gc0_alpha);
+
+  // Amount promoted.
+  _cms_used_at_gc0_end = cms_gen_bytes_used;
+
+  size_t promoted_bytes = 0;
+  if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
+    promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
+  } 
+
+  // If the younger gen collections were skipped, then the
+  // number of promoted bytes will be 0 and adding it to the
+  // average will incorrectly lessen the average.  It is, however,
+  // also possible that no promotion was needed.
+  // 
+  // _gc0_promoted used to be calculated as
+  // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
+  //  promoted_bytes, _gc0_alpha);
+  _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
+  _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
+
+  // Amount directly allocated.
+  size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
+  _cms_gen->reset_direct_allocated_words();
+  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated, 
+    allocated_bytes, _gc0_alpha);
+}
+
+inline void CMSStats::record_cms_begin() {
+  _cms_timer.stop();
+
+  // This is just an approximate value, but is good enough.
+  _cms_used_at_cms_begin = _cms_used_at_gc0_end;
+
+  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period, 
+    (float) _cms_timer.seconds(), _cms_alpha);
+  _cms_begin_time.update();
+
+  _cms_timer.reset();
+  _cms_timer.start();
+}
+
+inline void CMSStats::record_cms_end() {
+  _cms_timer.stop();
+
+  float cur_duration = _cms_timer.seconds();
+  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, 
+    cur_duration, _cms_alpha);
+
+  // Avoid division by 0.
+  const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
+  _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
+				 cur_duration / cms_used_mb,
+				 _cms_alpha);
+
+  _cms_end_time.update();
+  _cms_alpha = _saved_alpha;
+  _allow_duty_cycle_reduction = true;
+  _valid_bits |= _CMS_VALID;
+
+  _cms_timer.start();
+}
+
+inline double CMSStats::cms_time_since_begin() const {
+  return _cms_begin_time.seconds();
+}
+
+inline double CMSStats::cms_time_since_end() const {
+  return _cms_end_time.seconds();
+}
+
+inline double CMSStats::promotion_rate() const {
+  assert(valid(), "statistics not valid yet");
+  return gc0_promoted() / gc0_period();
+}
+
+inline double CMSStats::cms_allocation_rate() const {
+  assert(valid(), "statistics not valid yet");
+  return cms_allocated() / gc0_period();
+}
+
+inline double CMSStats::cms_consumption_rate() const {
+  assert(valid(), "statistics not valid yet");
+  return (gc0_promoted() + cms_allocated()) / gc0_period();
+}
+
+inline unsigned int CMSStats::icms_update_duty_cycle() {
+  // Update the duty cycle only if pacing is enabled and the stats are valid
+  // (after at least one young gen gc and one cms cycle have completed).
+  if (CMSIncrementalPacing && valid()) {
+    return icms_update_duty_cycle_impl();
+  }
+  return _icms_duty_cycle;
+}
+
+inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
+  cmsSpace()->save_sweep_limit();
+}
+
+inline size_t ConcurrentMarkSweepGeneration::capacity() const {
+  return _cmsSpace->capacity();
+}
+
+inline size_t ConcurrentMarkSweepGeneration::used() const {
+  return _cmsSpace->used();
+}
+
+inline size_t ConcurrentMarkSweepGeneration::free() const {
+  return _cmsSpace->free();
+}
+
+inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
+  return _cmsSpace->used_region();
+}
+
+inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
+  return _cmsSpace->used_region_at_save_marks();
+}
+
+inline void MarkFromRootsClosure::do_yield_check() {
+  if (ConcurrentMarkSweepThread::should_yield() &&
+      !_collector->foregroundGCIsActive() &&
+      _yield) {
+    do_yield_work();
+  }
+}
+
+inline void Par_MarkFromRootsClosure::do_yield_check() {
+  if (ConcurrentMarkSweepThread::should_yield() &&
+      !_collector->foregroundGCIsActive() &&
+      _yield) {
+    do_yield_work();
+  }
+}
+
+// Return value of "true" indicates that the on-going preclean
+// should be aborted.
+inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
+  if (ConcurrentMarkSweepThread::should_yield() &&
+      !_collector->foregroundGCIsActive() &&
+      _yield) {
+    // Sample young gen size before and after yield
+    _collector->sample_eden(); 
+    do_yield_work();
+    _collector->sample_eden();
+    return _collector->should_abort_preclean();
+  }
+  return false;
+}
+
+inline void SurvivorSpacePrecleanClosure::do_yield_check() {
+  if (ConcurrentMarkSweepThread::should_yield() &&
+      !_collector->foregroundGCIsActive() &&
+      _yield) {
+    // Sample young gen size before and after yield
+    _collector->sample_eden();
+    do_yield_work();
+    _collector->sample_eden();
+  }
+}
+
+inline void SweepClosure::do_yield_check(HeapWord* addr) {
+  if (ConcurrentMarkSweepThread::should_yield() &&
+      !_collector->foregroundGCIsActive() &&
+      _yield) {
+    do_yield_work(addr);
+  }
+}
+
+inline void MarkRefsIntoAndScanClosure::do_yield_check() {
+  // The conditions are ordered for the remarking phase
+  // when _yield is false.
+  if (_yield &&
+      !_collector->foregroundGCIsActive() &&
+      ConcurrentMarkSweepThread::should_yield()) {
+    do_yield_work();
+  }
+}
+
+
+inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
+  // Align the end of mr so it's at a card boundary.
+  // This is superfluous except at the end of the space;
+  // we should do better than this XXX
+  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
+                 CardTableModRefBS::card_size /* bytes */));
+  _t->mark_range(mr2);
+}
+
+inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
+  // Align the end of mr so it's at a card boundary.
+  // This is superfluous except at the end of the space;
+  // we should do better than this XXX
+  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
+                 CardTableModRefBS::card_size /* bytes */));
+  _t->par_mark_range(mr2);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,353 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)concurrentMarkSweepThread.cpp	1.48 07/05/05 17:06:45 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_concurrentMarkSweepThread.cpp.incl"
+
+// ======= Concurrent Mark Sweep Thread ========
+
+// The CMS thread is created when Concurrent Mark Sweep is used in the
+// older of two generations in a generational memory system.
+
+ConcurrentMarkSweepThread*
+     ConcurrentMarkSweepThread::_cmst     = NULL;
+CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
+bool ConcurrentMarkSweepThread::_should_terminate = false;
+int  ConcurrentMarkSweepThread::_CMS_flag         = CMS_nil;
+
+volatile jint ConcurrentMarkSweepThread::_pending_yields      = 0;
+volatile jint ConcurrentMarkSweepThread::_pending_decrements  = 0;
+
+volatile bool ConcurrentMarkSweepThread::_icms_enabled   = false;
+volatile bool ConcurrentMarkSweepThread::_should_run     = false;
+// When icms is enabled, the icms thread is stopped until explicitly
+// started.
+volatile bool ConcurrentMarkSweepThread::_should_stop    = true;
+
+SurrogateLockerThread*
+     ConcurrentMarkSweepThread::_slt = NULL;
+SurrogateLockerThread::SLT_msg_type
+     ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
+Monitor*
+     ConcurrentMarkSweepThread::_sltMonitor = NULL;
+
+ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
+  : ConcurrentGCThread() {
+  assert(UseConcMarkSweepGC,  "UseConcMarkSweepGC should be set");
+  assert(_cmst == NULL, "CMS thread already created");
+  _cmst = this;
+  assert(_collector == NULL, "Collector already set");
+  _collector = collector;
+
+  set_name("Concurrent Mark-Sweep GC Thread");
+
+  if (os::create_thread(this, os::cgc_thread)) {
+    // XXX: need to set this to low priority
+    // unless "agressive mode" set; priority
+    // should be just less than that of VMThread.
+    os::set_priority(this, NearMaxPriority);
+    if (!DisableStartThread) {
+      os::start_thread(this);
+    }
+  }
+  _sltMonitor = SLT_lock;
+  set_icms_enabled(CMSIncrementalMode);
+}
+
+void ConcurrentMarkSweepThread::run() {
+  assert(this == cmst(), "just checking");
+
+  this->record_stack_base_and_size();
+  this->initialize_thread_local_storage();
+  this->set_active_handles(JNIHandleBlock::allocate_block());
+  // From this time Thread::current() should be working.
+  assert(this == Thread::current(), "just checking");
+  if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
+    warning("Couldn't bind CMS thread to processor %u", CPUForCMSThread);
+  }
+  // Wait until Universe::is_fully_initialized()
+  {
+    CMSLoopCountWarn loopX("CMS::run", "waiting for "
+                           "Universe::is_fully_initialized()", 2);
+    MutexLockerEx x(CGC_lock, true);
+    set_CMS_flag(CMS_cms_wants_token);
+    // Wait until Universe is initialized and all initialization is completed.
+    while (!is_init_completed() && !Universe::is_fully_initialized() &&
+           !_should_terminate) {
+      CGC_lock->wait(true, 200);
+      loopX.tick();
+    }
+    // Wait until the surrogate locker thread that will do
+    // pending list locking on our behalf has been created.
+    // We cannot start the SLT thread ourselves since we need
+    // to be a JavaThread to do so.
+    CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
+    while (_slt == NULL && !_should_terminate) {
+      CGC_lock->wait(true, 200);
+      loopY.tick();
+    }
+    clear_CMS_flag(CMS_cms_wants_token);
+  }
+
+  while (!_should_terminate) {
+    sleepBeforeNextCycle();
+    if (_should_terminate) break;
+    _collector->collect_in_background(false);  // !clear_all_soft_refs
+  }
+  assert(_should_terminate, "just checking");
+  // Check that the state of any protocol for synchronization
+  // between background (CMS) and foreground collector is "clean"
+  // (i.e. will not potentially block the foreground collector,
+  // requiring action by us).
+  verify_ok_to_terminate();
+  // Signal that it is terminated
+  {
+    MutexLockerEx mu(Terminator_lock,
+                     Mutex::_no_safepoint_check_flag);
+    assert(_cmst == this, "Weird!");
+    _cmst = NULL;
+    Terminator_lock->notify();
+  }
+  
+  // Thread destructor usually does this..
+  ThreadLocalStorage::set_thread(NULL);
+}
+
+#ifndef PRODUCT
+void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
+  assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
+           cms_thread_wants_cms_token()),
+         "Must renounce all worldly possessions and desires for nirvana");
+  _collector->verify_ok_to_terminate();
+}
+#endif
+
+// create and start a new ConcurrentMarkSweep Thread for given CMS generation
+ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
+  if (!_should_terminate) {
+    assert(cmst() == NULL, "start() called twice?");
+    ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
+    assert(cmst() == th, "Where did the just-created CMS thread go?");
+    return th;
+  }
+  return NULL;
+}
+
+void ConcurrentMarkSweepThread::stop() {
+  if (CMSIncrementalMode) {
+    // Disable incremental mode and wake up the thread so it notices the change.
+    disable_icms();
+    start_icms();
+  }
+  // it is ok to take late safepoints here, if needed
+  {
+    MutexLockerEx x(Terminator_lock);
+    _should_terminate = true;  
+  }
+  { // Now post a notify on CGC_lock so as to nudge
+    // CMS thread(s) that might be slumbering in
+    // sleepBeforeNextCycle.
+    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+    CGC_lock->notify_all();
+  }
+  { // Now wait until (all) CMS thread(s) have exited
+    MutexLockerEx x(Terminator_lock);
+    while(cmst() != NULL) {
+      Terminator_lock->wait();
+    }
+  }  
+}
+
+void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
+  assert(tc != NULL, "Null ThreadClosure");
+  if (_cmst != NULL) {
+    tc->do_thread(_cmst);
+  }
+  assert(Universe::is_fully_initialized(), 
+         "Called too early, make sure heap is fully initialized");
+  if (_collector != NULL) {
+    AbstractWorkGang* gang = _collector->conc_workers();
+    if (gang != NULL) {
+      gang->threads_do(tc);
+    }
+  }
+}
+
+void ConcurrentMarkSweepThread::print_on(outputStream* st) const {
+  st->print("\"%s\" ", name());
+  Thread::print_on(st);
+  st->cr();
+}
+
+void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
+  if (_cmst != NULL) {
+    _cmst->print_on(st);
+  }
+  if (_collector != NULL) {
+    AbstractWorkGang* gang = _collector->conc_workers();
+    if (gang != NULL) {
+      gang->print_worker_threads_on(st);
+    }
+  }
+}
+
+void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
+  assert(UseConcMarkSweepGC, "just checking");
+
+  MutexLockerEx x(CGC_lock,
+                  Mutex::_no_safepoint_check_flag);
+  if (!is_cms_thread) {
+    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
+    CMSSynchronousYieldRequest yr;
+    while (CMS_flag_is_set(CMS_cms_has_token)) {
+      // indicate that we want to get the token
+      set_CMS_flag(CMS_vm_wants_token);
+      CGC_lock->wait(true);
+    }
+    // claim the token and proceed
+    clear_CMS_flag(CMS_vm_wants_token);
+    set_CMS_flag(CMS_vm_has_token);
+  } else {
+    assert(Thread::current()->is_ConcurrentGC_thread(),
+           "Not a CMS thread");
+    // The following barrier assumes there's only one CMS thread.
+    // This will need to be modified is there are more CMS threads than one.
+    while (CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token)) {
+      set_CMS_flag(CMS_cms_wants_token);
+      CGC_lock->wait(true);
+    }
+    // claim the token
+    clear_CMS_flag(CMS_cms_wants_token);
+    set_CMS_flag(CMS_cms_has_token);
+  }
+}
+
+void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
+  assert(UseConcMarkSweepGC, "just checking");
+
+  MutexLockerEx x(CGC_lock,
+                  Mutex::_no_safepoint_check_flag);
+  if (!is_cms_thread) {
+    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
+    assert(CMS_flag_is_set(CMS_vm_has_token), "just checking");
+    clear_CMS_flag(CMS_vm_has_token);
+    if (CMS_flag_is_set(CMS_cms_wants_token)) {
+      // wake-up a waiting CMS thread
+      CGC_lock->notify();
+    }
+    assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
+           "Should have been cleared");
+  } else {
+    assert(Thread::current()->is_ConcurrentGC_thread(),
+           "Not a CMS thread");
+    assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
+    clear_CMS_flag(CMS_cms_has_token);
+    if (CMS_flag_is_set(CMS_vm_wants_token)) {
+      // wake-up a waiting VM thread
+      CGC_lock->notify();
+    }
+    assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
+           "Should have been cleared");
+  }
+}
+
+// Wait until the next synchronous GC or a timeout, whichever is earlier.
+void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) {
+  MutexLockerEx x(CGC_lock,
+                  Mutex::_no_safepoint_check_flag);
+  set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
+  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t);
+  clear_CMS_flag(CMS_cms_wants_token);
+  assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
+         "Should not be set");
+}
+
+void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
+  while (!_should_terminate) {
+    if (CMSIncrementalMode) {
+      icms_wait();
+      return;
+    } else {
+      // Wait until the next synchronous GC or a timeout, whichever is earlier
+      wait_on_cms_lock(CMSWaitDuration);
+    }
+    // Check if we should start a CMS collection cycle
+    if (_collector->shouldConcurrentCollect()) {
+      return;
+    }
+    // .. collection criterion not yet met, let's go back 
+    // and wait some more
+  }
+}
+
+// Incremental CMS
+void ConcurrentMarkSweepThread::start_icms() {
+  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
+  MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
+  trace_state("start_icms");
+  _should_run = true;
+  iCMS_lock->notify_all();
+}
+
+void ConcurrentMarkSweepThread::stop_icms() {
+  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
+  MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
+  if (!_should_stop) {
+    trace_state("stop_icms");
+    _should_stop = true;
+    _should_run = false;
+    asynchronous_yield_request();
+    iCMS_lock->notify_all();
+  }
+}
+
+void ConcurrentMarkSweepThread::icms_wait() {
+  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
+  if (_should_stop && icms_enabled()) {
+    MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
+    trace_state("pause_icms");
+    _collector->stats().stop_cms_timer();
+    while(!_should_run && icms_enabled()) {
+      iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
+    }
+    _collector->stats().start_cms_timer();
+    _should_stop = false;
+    trace_state("pause_icms end");
+  }
+}
+
+// Note: this method, although exported by the ConcurrentMarkSweepThread,
+// which is a non-JavaThread, can only be called by a JavaThread.
+// Currently this is done at vm creation time (post-vm-init) by the
+// main/Primordial (Java)Thread.
+// XXX Consider changing this in the future to allow the CMS thread
+// itself to create this thread?
+void ConcurrentMarkSweepThread::makeSurrogateLockerThread(TRAPS) {
+  assert(UseConcMarkSweepGC, "SLT thread needed only for CMS GC");
+  assert(_slt == NULL, "SLT already created");
+  _slt = SurrogateLockerThread::make(THREAD);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,232 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)concurrentMarkSweepThread.hpp	1.38 07/05/05 17:06:46 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class ConcurrentMarkSweepGeneration;
+class CMSCollector;
+
+// The Concurrent Mark Sweep GC Thread (could be several in the future).
+class ConcurrentMarkSweepThread: public ConcurrentGCThread {
+  friend class VMStructs;
+  friend class ConcurrentMarkSweepGeneration;   // XXX should remove friendship
+  friend class CMSCollector;
+ public:
+  virtual void run();
+
+ private:
+  static ConcurrentMarkSweepThread*     _cmst;
+  static CMSCollector*                  _collector;
+  static SurrogateLockerThread*         _slt;
+  static SurrogateLockerThread::SLT_msg_type _sltBuffer;
+  static Monitor*                       _sltMonitor;
+
+  ConcurrentMarkSweepThread*            _next;
+
+  static bool _should_terminate;
+
+  enum CMS_flag_type {
+    CMS_nil             = NoBits,
+    CMS_cms_wants_token = nth_bit(0),
+    CMS_cms_has_token   = nth_bit(1),
+    CMS_vm_wants_token  = nth_bit(2),
+    CMS_vm_has_token    = nth_bit(3)
+  };
+
+  static int _CMS_flag;
+
+  static bool CMS_flag_is_set(int b)        { return (_CMS_flag & b) != 0;   }
+  static bool set_CMS_flag(int b)           { return (_CMS_flag |= b) != 0;  }
+  static bool clear_CMS_flag(int b)         { return (_CMS_flag &= ~b) != 0; }
+  void sleepBeforeNextCycle();
+
+  // CMS thread should yield for a young gen collection, direct allocation,
+  // and iCMS activity.
+  static char _pad_1[64 - sizeof(jint)];    // prevent cache-line sharing
+  static volatile jint _pending_yields;
+  static volatile jint _pending_decrements; // decrements to _pending_yields
+  static char _pad_2[64 - sizeof(jint)];    // prevent cache-line sharing
+
+  // Tracing messages, enabled by CMSTraceThreadState.
+  static inline void trace_state(const char* desc);
+
+  static volatile bool _icms_enabled;	// iCMS enabled?
+  static volatile bool _should_run;	// iCMS may run
+  static volatile bool _should_stop;	// iCMS should stop
+
+  // debugging
+  void verify_ok_to_terminate() const PRODUCT_RETURN;
+
+ public:
+  // Constructor
+  ConcurrentMarkSweepThread(CMSCollector* collector);
+
+  static void makeSurrogateLockerThread(TRAPS);
+  static SurrogateLockerThread* slt() { return _slt; }
+
+  // Tester
+  bool is_ConcurrentGC_thread() const { return true;       }
+
+  static void threads_do(ThreadClosure* tc);
+
+  // Printing
+  void print_on(outputStream* st) const;
+  void print() const 				      { print_on(tty); }
+  static void print_all_on(outputStream* st);
+  static void print_all() 			      { print_all_on(tty); }
+
+  // Returns the CMS Thread
+  static ConcurrentMarkSweepThread* cmst()    { return _cmst; }
+  static CMSCollector*         collector()    { return _collector;  }
+
+  // Create and start the CMS Thread, or stop it on shutdown
+  static ConcurrentMarkSweepThread* start(CMSCollector* collector);
+  static void stop();
+  static bool should_terminate() { return _should_terminate; }
+
+  // Synchronization using CMS token
+  static void synchronize(bool is_cms_thread);
+  static void desynchronize(bool is_cms_thread);
+  static bool vm_thread_has_cms_token() {
+    return CMS_flag_is_set(CMS_vm_has_token);
+  }
+  static bool cms_thread_has_cms_token() {
+    return CMS_flag_is_set(CMS_cms_has_token);
+  }
+  static bool vm_thread_wants_cms_token() {
+    return CMS_flag_is_set(CMS_vm_wants_token);
+  }
+  static bool cms_thread_wants_cms_token() {
+    return CMS_flag_is_set(CMS_cms_wants_token);
+  }
+
+  // Wait on CMS lock until the next synchronous GC 
+  // or given timeout, whichever is earlier.
+  void    wait_on_cms_lock(long t); // milliseconds
+
+  // The CMS thread will yield during the work portion of it's cycle
+  // only when requested to.  Both synchronous and asychronous requests
+  // are provided.  A synchronous request is used for young gen
+  // collections and direct allocations.  The requesting thread increments
+  // pending_yields at the beginning of an operation, and decrements it when
+  // the operation is completed.  The CMS thread yields when pending_yields
+  // is positive.  An asynchronous request is used by iCMS in the stop_icms()
+  // operation. A single yield satisfies the outstanding asynch yield requests.
+  // The requesting thread increments both pending_yields and pending_decrements.
+  // After yielding, the CMS thread decrements both by the amount in
+  // pending_decrements.
+  // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
+  // we cannot easily test that invariant, since the counters are manipulated via
+  // atomic instructions without explicit locking and we cannot read
+  // the two counters atomically together: one suggestion is to
+  // use (for example) 16-bit counters so as to be able to read the
+  // two counters atomically even on 32-bit platforms. Notice that
+  // the second assert in acknowledge_yield_request() does indeed
+  // check a form of the above invariant, albeit indirectly.
+
+  static void increment_pending_yields()   {
+    Atomic::inc(&_pending_yields);
+    assert(_pending_yields >= 0, "can't be negative");
+  }
+  static void decrement_pending_yields()   {
+    Atomic::dec(&_pending_yields);
+    assert(_pending_yields >= 0, "can't be negative");
+  }
+  static void asynchronous_yield_request() {
+    increment_pending_yields();
+    Atomic::inc(&_pending_decrements);
+    assert(_pending_decrements >= 0, "can't be negative");
+  }
+  static void acknowledge_yield_request() {
+    jint decrement = _pending_decrements;
+    if (decrement > 0) {
+      // Order important to preserve: _pending_yields >= _pending_decrements
+      Atomic::add(-decrement, &_pending_decrements);
+      Atomic::add(-decrement, &_pending_yields);
+      assert(_pending_decrements >= 0, "can't be negative");
+      assert(_pending_yields >= 0, "can't be negative");
+    }
+  }
+  static bool should_yield()   { return _pending_yields > 0; }
+
+  // CMS incremental mode.
+  static void start_icms(); // notify thread to start a quantum of work
+  static void stop_icms();  // request thread to stop working
+  void icms_wait();	    // if asked to stop, wait until notified to start
+
+  // Incremental mode is enabled globally by the flag CMSIncrementalMode.  It
+  // must also be enabled/disabled dynamically to allow foreground collections.
+  static inline void enable_icms()              { _icms_enabled = true; }
+  static inline void disable_icms()             { _icms_enabled = false; }
+  static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
+  static inline bool icms_enabled()             { return _icms_enabled; } 
+};
+
+inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
+  if (CMSTraceThreadState) {
+    char buf[128];
+    TimeStamp& ts = gclog_or_tty->time_stamp();
+    if (!ts.is_updated()) {
+      ts.update();
+    }
+    jio_snprintf(buf, sizeof(buf), " [%.3f:  CMSThread %s] ",
+		 ts.seconds(), desc);
+    buf[sizeof(buf) - 1] = '\0';
+    gclog_or_tty->print(buf);
+  }
+}
+
+// For scoped increment/decrement of yield requests
+class CMSSynchronousYieldRequest: public StackObj {
+ public:
+  CMSSynchronousYieldRequest() {
+    ConcurrentMarkSweepThread::increment_pending_yields();
+  }
+  ~CMSSynchronousYieldRequest() {
+    ConcurrentMarkSweepThread::decrement_pending_yields();
+  }
+};
+
+// Used to emit a warning in case of unexpectedly excessive
+// looping (in "apparently endless loops") in CMS code.
+class CMSLoopCountWarn: public StackObj {
+ private:
+  const char* _src;
+  const char* _msg;
+  const intx  _threshold;
+  intx        _ticks;
+
+ public:
+  inline CMSLoopCountWarn(const char* src, const char* msg,
+                          const intx threshold) :
+    _src(src), _msg(msg), _threshold(threshold), _ticks(0) { }
+
+  inline void tick() {
+    _ticks++;
+    if (CMSLoopWarn && _ticks % _threshold == 0) {
+      warning("%s has looped %d times %s", _src, _ticks, _msg);
+    }
+  }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,51 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)freeBlockDictionary.cpp	1.12 07/05/05 17:05:47 JVM"
+#endif
+/*
+ * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_freeBlockDictionary.cpp.incl"
+
+#ifndef PRODUCT
+Mutex* FreeBlockDictionary::par_lock() const {
+  return _lock;
+}
+
+void FreeBlockDictionary::set_par_lock(Mutex* lock) {
+  _lock = lock;
+}
+
+void FreeBlockDictionary::verify_par_locked() const {
+#ifdef ASSERT
+  if (ParallelGCThreads > 0) {
+    Thread* myThread = Thread::current();
+    if (myThread->is_GC_task_thread()) {
+      assert(par_lock() != NULL, "Should be using locking?");
+      assert_lock_strong(par_lock());
+    }
+  }
+#endif // ASSERT
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,174 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)freeBlockDictionary.hpp	1.32 07/05/05 17:05:47 JVM"
+#endif
+/*
+ * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+//
+// Free block maintenance for Concurrent Mark Sweep Generation
+//
+// The main data structure for free blocks are
+// . an indexed array of small free blocks, and
+// . a dictionary of large free blocks
+//
+
+// No virtuals in FreeChunk (don't want any vtables).
+
+// A FreeChunk is merely a chunk that can be in a doubly linked list
+// and has a size field. NOTE: FreeChunks are distinguished from allocated
+// objects in two ways (by the sweeper). The second word (prev) has the
+// LSB set to indicate a free chunk; allocated objects' klass() pointers
+// don't have their LSB set. The corresponding bit in the CMSBitMap is
+// set when the chunk is allocated. There are also blocks that "look free"
+// but are not part of the free list and should not be coalesced into larger
+// free blocks. These free blocks have their two LSB's set.
+
+class FreeChunk VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+  FreeChunk* _next;
+  FreeChunk* _prev;
+  size_t     _size;
+
+ public:
+  NOT_PRODUCT(static const size_t header_size();)
+  // Returns "true" if the "wrd", which is required to be the second word
+  // of a block, indicates that the block represents a free chunk.
+  static bool secondWordIndicatesFreeChunk(intptr_t wrd) {
+    return (wrd & 0x1) == 0x1;
+  }
+  bool isFree()       const {
+    return secondWordIndicatesFreeChunk((intptr_t)_prev);
+  }
+  bool cantCoalesce() const { return (((intptr_t)_prev) & 0x3) == 0x3; }
+  FreeChunk* next()   const { return _next; }
+  FreeChunk* prev()   const { return (FreeChunk*)(((intptr_t)_prev) & ~(0x3)); }
+  debug_only(void* prev_addr() const { return (void*)&_prev; })
+
+  void linkAfter(FreeChunk* ptr) {
+    linkNext(ptr);
+    if (ptr != NULL) ptr->linkPrev(this);
+  }
+  void linkAfterNonNull(FreeChunk* ptr) {
+    assert(ptr != NULL, "precondition violation");
+    linkNext(ptr);
+    ptr->linkPrev(this);
+  }
+  void linkNext(FreeChunk* ptr) { _next = ptr; }
+  void linkPrev(FreeChunk* ptr) { _prev = (FreeChunk*)((intptr_t)ptr | 0x1); }
+  void clearPrev()              { _prev = NULL; }
+  void clearNext()              { _next = NULL; }
+  void dontCoalesce()      {
+    // the block should be free
+    assert(isFree(), "Should look like a free block");
+    _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
+  }
+  void markFree()    { _prev = (FreeChunk*)((intptr_t)_prev | 0x1);    }
+  void markNotFree() { _prev = NULL; }
+
+  size_t size()           const { return _size; }
+  void setSize(size_t size)     { _size = size; }
+
+  // For volatile reads:
+  size_t* size_addr()           { return &_size; }
+
+  // Return the address past the end of this chunk
+  HeapWord* end() const { return ((HeapWord*) this) + _size; }
+
+  // debugging
+  void verify()             const PRODUCT_RETURN;
+  void verifyList()         const PRODUCT_RETURN;
+  void mangleAllocated(size_t size) PRODUCT_RETURN;
+  void mangleFreed(size_t size)     PRODUCT_RETURN; 
+};
+
+// Alignment helpers etc.
+#define numQuanta(x,y) ((x+y-1)/y)
+enum AlignmentConstants {
+  MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
+};
+
+// A FreeBlockDictionary is an abstract superclass that will allow
+// a number of alternative implementations in the future.
+class FreeBlockDictionary: public CHeapObj {
+ public:
+  enum Dither {
+    atLeast,
+    exactly,
+    roughly
+  };
+  enum DictionaryChoice {
+    dictionaryBinaryTree = 0,
+    dictionarySplayTree  = 1,
+    dictionarySkipList   = 2
+  };
+
+ private:
+  NOT_PRODUCT(Mutex* _lock;)
+
+ public:
+  virtual void       removeChunk(FreeChunk* fc) = 0;
+  virtual FreeChunk* getChunk(size_t size, Dither dither = atLeast) = 0;
+  virtual void       returnChunk(FreeChunk* chunk) = 0;
+  virtual size_t     totalChunkSize(debug_only(const Mutex* lock)) const = 0;
+  virtual size_t     maxChunkSize()   const = 0;
+  virtual size_t     minSize()        const = 0;
+  // Reset the dictionary to the initial conditions for a single
+  // block.
+  virtual void	     reset(HeapWord* addr, size_t size) = 0;
+  virtual void	     reset() = 0;
+
+  virtual void       dictCensusUpdate(size_t size, bool split, bool birth) = 0;
+  virtual bool       coalDictOverPopulated(size_t size) = 0;
+  virtual void       beginSweepDictCensus(double coalSurplusPercent,
+                       float sweep_current, float sweep_ewstimate) = 0;
+  virtual void       endSweepDictCensus(double splitSurplusPercent) = 0;
+  virtual FreeChunk* findLargestDict() const = 0;
+  // verify that the given chunk is in the dictionary.
+  virtual bool verifyChunkInFreeLists(FreeChunk* tc) const = 0;
+
+  // Sigma_{all_free_blocks} (block_size^2)
+  virtual double sum_of_squared_block_sizes() const = 0;
+
+  virtual FreeChunk* find_chunk_ends_at(HeapWord* target) const = 0;
+  virtual void inc_totalSize(size_t v) = 0;
+  virtual void dec_totalSize(size_t v) = 0;
+
+  NOT_PRODUCT (
+    virtual size_t   sumDictReturnedBytes() = 0;
+    virtual void     initializeDictReturnedBytes() = 0;
+    virtual size_t   totalCount() = 0;
+  )
+
+  virtual void       reportStatistics() const {
+    gclog_or_tty->print("No statistics available");
+  }
+
+  virtual void 	     printDictCensus() const = 0;
+
+  virtual void       verify()         const = 0;
+
+  Mutex* par_lock()                const PRODUCT_RETURN0;
+  void   set_par_lock(Mutex* lock)       PRODUCT_RETURN;
+  void   verify_par_locked()       const PRODUCT_RETURN;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,72 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)freeChunk.cpp	1.16 07/05/05 17:05:47 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_freeChunk.cpp.incl"
+
+#ifndef PRODUCT
+
+#define baadbabeHeapWord badHeapWordVal
+#define deadbeefHeapWord 0xdeadbeef
+
+size_t const FreeChunk::header_size() {
+  return sizeof(FreeChunk)/HeapWordSize;
+}
+
+void FreeChunk::mangleAllocated(size_t size) {
+  // mangle all but the header of a just-allocated block
+  // of storage
+  assert(size >= MinChunkSize, "smallest size of object");
+  // we can't assert that _size == size because this may be an
+  // allocation out of a linear allocation block
+  assert(sizeof(FreeChunk) % HeapWordSize == 0,
+         "shouldn't write beyond chunk");
+  HeapWord* addr = (HeapWord*)this;
+  size_t hdr = header_size();
+  Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord);
+}
+
+void FreeChunk::mangleFreed(size_t size) {
+  assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns");
+  // mangle all but the header of a just-freed block of storage
+  // just prior to passing it to the storage dictionary
+  assert(size >= MinChunkSize, "smallest size of object");
+  assert(size == _size, "just checking");
+  HeapWord* addr = (HeapWord*)this;
+  size_t hdr = header_size();
+  Copy::fill_to_words(addr + hdr, size - hdr, deadbeefHeapWord);
+}
+
+void FreeChunk::verifyList() const {
+  FreeChunk* nextFC = next();
+  if (nextFC != NULL) {
+    assert(this == nextFC->prev(), "broken chain");
+    assert(size() == nextFC->size(), "wrong size");
+    nextFC->verifyList();
+  }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,307 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)freeList.cpp	1.31 07/05/05 17:05:48 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_freeList.cpp.incl"
+
+// Free list.  A FreeList is used to access a linked list of chunks
+// of space in the heap.  The head and tail are maintained so that
+// items can be (as in the current implementation) added at the 
+// at the tail of the list and removed from the head of the list to
+// maintain a FIFO queue.
+
+FreeList::FreeList() :
+  _head(NULL), _tail(NULL)
+#ifdef ASSERT
+  , _protecting_lock(NULL)
+#endif
+{
+  _size		= 0;
+  _count	= 0;
+  _hint		= 0;
+  init_statistics();
+}
+
+FreeList::FreeList(FreeChunk* fc) :
+  _head(fc), _tail(fc)
+#ifdef ASSERT
+  , _protecting_lock(NULL)
+#endif
+{
+  _size		= fc->size();
+  _count	= 1;
+  _hint		= 0;
+  init_statistics();
+#ifndef PRODUCT
+  _allocation_stats.set_returnedBytes(size() * HeapWordSize);
+#endif
+}
+
+FreeList::FreeList(HeapWord* addr, size_t size) :
+  _head((FreeChunk*) addr), _tail((FreeChunk*) addr)
+#ifdef ASSERT
+  , _protecting_lock(NULL)
+#endif
+{
+  assert(size > sizeof(FreeChunk), "size is too small");
+  head()->setSize(size);
+  _size		= size;
+  _count	= 1;
+  init_statistics();
+#ifndef PRODUCT
+  _allocation_stats.set_returnedBytes(_size * HeapWordSize);
+#endif
+}
+
+void FreeList::reset(size_t hint) {
+  set_count(0);
+  set_head(NULL);
+  set_tail(NULL);
+  set_hint(hint);
+}
+
+void FreeList::init_statistics() {
+  _allocation_stats.initialize();
+}
+
+FreeChunk* FreeList::getChunkAtHead() {
+  assert_proper_lock_protection();
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+  FreeChunk* fc = head();
+  if (fc != NULL) {
+    FreeChunk* nextFC = fc->next();
+    if (nextFC != NULL) {
+      // The chunk fc being removed has a "next".  Set the "next" to the
+      // "prev" of fc.
+      nextFC->linkPrev(NULL);
+    } else { // removed tail of list
+      link_tail(NULL);
+    }
+    link_head(nextFC);
+    decrement_count();
+  }
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+  return fc;
+}
+
+
+void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) {
+  assert_proper_lock_protection();
+  assert(fl->count() == 0, "Precondition");
+  if (count() > 0) {
+    int k = 1;
+    fl->set_head(head()); n--;
+    FreeChunk* tl = head();
+    while (tl->next() != NULL && n > 0) {
+      tl = tl->next(); n--; k++;
+    }
+    assert(tl != NULL, "Loop Inv.");
+    
+    // First, fix up the list we took from.
+    FreeChunk* new_head = tl->next();
+    set_head(new_head);
+    set_count(count() - k);
+    if (new_head == NULL) {
+      set_tail(NULL);
+    } else {
+      new_head->linkPrev(NULL);
+    }
+    // Now we can fix up the tail.
+    tl->linkNext(NULL);
+    // And return the result.
+    fl->set_tail(tl);
+    fl->set_count(k);
+  }
+}
+
+// Remove this chunk from the list
+void FreeList::removeChunk(FreeChunk*fc) {
+   assert_proper_lock_protection();
+   assert(head() != NULL, "Remove from empty list");
+   assert(fc != NULL, "Remove a NULL chunk");
+   assert(size() == fc->size(), "Wrong list");
+   assert(head() == NULL || head()->prev() == NULL, "list invariant");
+   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+
+   FreeChunk* prevFC = fc->prev();
+   FreeChunk* nextFC = fc->next();
+   if (nextFC != NULL) {
+     // The chunk fc being removed has a "next".  Set the "next" to the
+     // "prev" of fc.
+     nextFC->linkPrev(prevFC);
+   } else { // removed tail of list
+     link_tail(prevFC);
+   }
+   if (prevFC == NULL) { // removed head of list
+     link_head(nextFC);
+     assert(nextFC == NULL || nextFC->prev() == NULL, 
+       "Prev of head should be NULL");
+   } else {
+     prevFC->linkNext(nextFC);
+     assert(tail() != prevFC || prevFC->next() == NULL,
+       "Next of tail should be NULL");
+   }
+   decrement_count();
+#define TRAP_CODE 1
+#if TRAP_CODE
+   if (head() == NULL) {
+     guarantee(tail() == NULL, "INVARIANT");
+     guarantee(count() == 0, "INVARIANT");
+   }
+#endif
+   // clear next and prev fields of fc, debug only
+   NOT_PRODUCT(
+     fc->linkPrev(NULL);
+     fc->linkNext(NULL);
+   )
+   assert(fc->isFree(), "Should still be a free chunk");
+   assert(head() == NULL || head()->prev() == NULL, "list invariant");
+   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+   assert(head() == NULL || head()->size() == size(), "wrong item on list");
+   assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
+}
+
+// Add this chunk at the head of the list.
+void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
+  assert_proper_lock_protection();
+  assert(chunk != NULL, "insert a NULL chunk");
+  assert(size() == chunk->size(), "Wrong size");
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+  
+  FreeChunk* oldHead = head();
+  assert(chunk != oldHead, "double insertion");
+  chunk->linkAfter(oldHead);
+  link_head(chunk);
+  if (oldHead == NULL) { // only chunk in list
+    assert(tail() == NULL, "inconsistent FreeList");
+    link_tail(chunk);
+  }
+  increment_count(); // of # of chunks in list
+  DEBUG_ONLY(
+    if (record_return) {
+      increment_returnedBytes_by(size()*HeapWordSize);
+    }
+  )
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+  assert(head() == NULL || head()->size() == size(), "wrong item on list");
+  assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
+}
+
+void FreeList::returnChunkAtHead(FreeChunk* chunk) {
+  assert_proper_lock_protection();
+  returnChunkAtHead(chunk, true);
+}
+
+// Add this chunk at the tail of the list.
+void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
+  assert_proper_lock_protection();
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+  assert(chunk != NULL, "insert a NULL chunk");
+  assert(size() == chunk->size(), "wrong size");
+
+  FreeChunk* oldTail = tail();
+  assert(chunk != oldTail, "double insertion");
+  if (oldTail != NULL) {
+    oldTail->linkAfter(chunk);
+  } else { // only chunk in list
+    assert(head() == NULL, "inconsistent FreeList");
+    link_head(chunk);
+  }
+  link_tail(chunk);
+  increment_count();  // of # of chunks in list
+  DEBUG_ONLY(
+    if (record_return) {
+      increment_returnedBytes_by(size()*HeapWordSize);
+    }
+  )
+  assert(head() == NULL || head()->prev() == NULL, "list invariant");
+  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
+  assert(head() == NULL || head()->size() == size(), "wrong item on list");
+  assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
+}
+
+void FreeList::returnChunkAtTail(FreeChunk* chunk) {
+  returnChunkAtTail(chunk, true);
+}
+
+void FreeList::prepend(FreeList* fl) {
+  assert_proper_lock_protection();
+  if (fl->count() > 0) {
+    if (count() == 0) {
+      set_head(fl->head());
+      set_tail(fl->tail());
+      set_count(fl->count());
+    } else {
+      // Both are non-empty.
+      FreeChunk* fl_tail = fl->tail();
+      FreeChunk* this_head = head();
+      assert(fl_tail->next() == NULL, "Well-formedness of fl");
+      fl_tail->linkNext(this_head);
+      this_head->linkPrev(fl_tail);
+      set_head(fl->head());
+      set_count(count() + fl->count());
+    }
+    fl->set_head(NULL);
+    fl->set_tail(NULL);
+    fl->set_count(0);
+  }
+}
+
+// verifyChunkInFreeLists() is used to verify that an item is in this free list.
+// It is used as a debugging aid.
+bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
+  // This is an internal consistency check, not part of the check that the
+  // chunk is in the free lists.
+  guarantee(fc->size() == size(), "Wrong list is being searched");
+  FreeChunk* curFC = head();
+  while (curFC) {
+    // This is an internal consistency check.
+    guarantee(size() == curFC->size(), "Chunk is in wrong list.");
+    if (fc == curFC) {
+      return true;
+    }
+    curFC = curFC->next();
+  }
+  return false;
+}
+
+#ifndef PRODUCT
+void FreeList::assert_proper_lock_protection_work() const {
+#ifdef ASSERT
+  if (_protecting_lock != NULL &&
+      SharedHeap::heap()->n_par_threads() > 0) {
+    // Should become an assert.
+    guarantee(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
+  }
+#endif
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,304 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)freeList.hpp	1.31 07/05/05 17:05:48 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class CompactibleFreeListSpace;
+
+// A class for maintaining a free list of FreeChunk's.  The FreeList
+// maintains a the structure of the list (head, tail, etc.) plus
+// statistics for allocations from the list.  The links between items
+// are not part of FreeList.  The statistics are
+// used to make decisions about coalescing FreeChunk's when they
+// are swept during collection.
+//
+// See the corresponding .cpp file for a description of the specifics
+// for that implementation.
+
+class Mutex;
+
+class FreeList VALUE_OBJ_CLASS_SPEC {
+  friend class CompactibleFreeListSpace;
+  FreeChunk*	_head;		// List of free chunks
+  FreeChunk*	_tail;		// Tail of list of free chunks
+  size_t	_size;		// Size in Heap words of each chunks
+  ssize_t	_count;		// Number of entries in list
+  size_t        _hint;		// next larger size list with a positive surplus
+
+  AllocationStats _allocation_stats;		// statistics for smart allocation
+
+#ifdef ASSERT
+  Mutex*	_protecting_lock;
+#endif
+
+  // Asserts false if the protecting lock (if any) is not held.
+  void assert_proper_lock_protection_work() const PRODUCT_RETURN;
+  void assert_proper_lock_protection() const {
+#ifdef ASSERT
+    if (_protecting_lock != NULL)
+      assert_proper_lock_protection_work();
+#endif
+  }
+
+  // Initialize the allocation statistics.
+ protected:
+  void init_statistics();
+  void set_count(ssize_t v) { _count = v;}
+  void increment_count() { _count++; }
+  void decrement_count() {
+    _count--;
+    assert(_count >= 0, "Count should not be negative"); }
+
+ public:
+  // Constructor
+  // Construct a list without any entries.
+  FreeList();
+  // Construct a list with "fc" as the first (and lone) entry in the list.
+  FreeList(FreeChunk* fc);
+  // Construct a list which will have a FreeChunk at address "addr" and
+  // of size "size" as the first (and lone) entry in the list.
+  FreeList(HeapWord* addr, size_t size);
+
+  // Reset the head, tail, hint, and count of a free list.
+  void reset(size_t hint);
+
+  // Declare the current free list to be protected by the given lock.
+#ifdef ASSERT
+  void set_protecting_lock(Mutex* protecting_lock) {
+    _protecting_lock = protecting_lock;
+  }
+#endif
+
+  // Accessors.
+  FreeChunk* head() const {
+    assert_proper_lock_protection();
+    return _head;
+  }
+  void set_head(FreeChunk* v) { 
+    assert_proper_lock_protection();
+    _head = v; 
+    assert(!_head || _head->size() == _size, "bad chunk size"); 
+  }
+  // Set the head of the list and set the prev field of non-null
+  // values to NULL.
+  void link_head(FreeChunk* v) {
+    assert_proper_lock_protection();
+    set_head(v); 
+    // If this method is not used (just set the head instead),
+    // this check can be avoided.
+    if (v != NULL) {
+      v->linkPrev(NULL);
+    }
+  }
+
+  FreeChunk* tail() const {
+    assert_proper_lock_protection();
+    return _tail;
+  }
+  void set_tail(FreeChunk* v) { 
+    assert_proper_lock_protection();
+    _tail = v; 
+    assert(!_tail || _tail->size() == _size, "bad chunk size");
+  }
+  // Set the tail of the list and set the next field of non-null
+  // values to NULL.
+  void link_tail(FreeChunk* v) {
+    assert_proper_lock_protection();
+    set_tail(v); 
+    if (v != NULL) {
+      v->clearNext();
+    }
+  }
+
+  // No locking checks in read-accessors: lock-free reads (only) are benign.
+  // Readers are expected to have the lock if they are doing work that
+  // requires atomicity guarantees in sections of code.
+  size_t size() const {
+    return _size;
+  }
+  void set_size(size_t v) {
+    assert_proper_lock_protection();
+    _size = v;
+  }
+  ssize_t count() const {
+    return _count;
+  }
+  size_t hint() const {
+    return _hint;
+  }
+  void set_hint(size_t v) {
+    assert_proper_lock_protection();
+    assert(v == 0 || _size < v, "Bad hint"); _hint = v;
+  }
+
+  // Accessors for statistics
+  AllocationStats* allocation_stats() {
+    assert_proper_lock_protection();
+    return &_allocation_stats;
+  }
+
+  ssize_t desired() const {
+    return _allocation_stats.desired();
+  }
+  void compute_desired(float inter_sweep_current,
+                       float inter_sweep_estimate) {
+    assert_proper_lock_protection();
+    _allocation_stats.compute_desired(_count,
+                                      inter_sweep_current,
+                                      inter_sweep_estimate);
+  }
+  ssize_t coalDesired() const {
+    return _allocation_stats.coalDesired();
+  }
+  void set_coalDesired(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_coalDesired(v);
+  }
+
+  ssize_t surplus() const {
+    return _allocation_stats.surplus();
+  }
+  void set_surplus(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_surplus(v);
+  }
+  void increment_surplus() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_surplus();
+  }
+  void decrement_surplus() {
+    assert_proper_lock_protection();
+    _allocation_stats.decrement_surplus();
+  }
+
+  ssize_t bfrSurp() const {
+    return _allocation_stats.bfrSurp();
+  }
+  void set_bfrSurp(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_bfrSurp(v);
+  }
+  ssize_t prevSweep() const {
+    return _allocation_stats.prevSweep();
+  }
+  void set_prevSweep(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_prevSweep(v);
+  }
+  ssize_t beforeSweep() const {
+    return _allocation_stats.beforeSweep();
+  }
+  void set_beforeSweep(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_beforeSweep(v);
+  }
+
+  ssize_t coalBirths() const {
+    return _allocation_stats.coalBirths();
+  }
+  void set_coalBirths(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_coalBirths(v);
+  }
+  void increment_coalBirths() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_coalBirths();
+  }
+
+  ssize_t coalDeaths() const {
+    return _allocation_stats.coalDeaths();
+  }
+  void set_coalDeaths(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_coalDeaths(v);
+  }
+  void increment_coalDeaths() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_coalDeaths();
+  }
+
+  ssize_t splitBirths() const {
+    return _allocation_stats.splitBirths();
+  }
+  void set_splitBirths(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_splitBirths(v);
+  }
+  void increment_splitBirths() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_splitBirths();
+  }
+
+  ssize_t splitDeaths() const {
+    return _allocation_stats.splitDeaths();
+  }
+  void set_splitDeaths(ssize_t v) {
+    assert_proper_lock_protection();
+    _allocation_stats.set_splitDeaths(v);
+  }
+  void increment_splitDeaths() {
+    assert_proper_lock_protection();
+    _allocation_stats.increment_splitDeaths();
+  }
+
+  NOT_PRODUCT(
+    // For debugging.  The "_returnedBytes" in all the lists are summed
+    // and compared with the total number of bytes swept during a 
+    // collection.
+    size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
+    void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
+    void increment_returnedBytes_by(size_t v) { 
+      _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v); 
+    }
+  )
+
+  // Unlink head of list and return it.  Returns NULL if
+  // the list is empty.
+  FreeChunk* getChunkAtHead();
+
+  // Remove the first "n" or "count", whichever is smaller, chunks from the 
+  // list, setting "fl", which is required to be empty, to point to them.
+  void getFirstNChunksFromList(size_t n, FreeList* fl);
+
+  // Unlink this chunk from it's free list
+  void removeChunk(FreeChunk* fc);
+
+  // Add this chunk to this free list.
+  void returnChunkAtHead(FreeChunk* fc);
+  void returnChunkAtTail(FreeChunk* fc);
+
+  // Similar to returnChunk* but also records some diagnostic
+  // information.
+  void returnChunkAtHead(FreeChunk* fc, bool record_return);
+  void returnChunkAtTail(FreeChunk* fc, bool record_return);
+
+  // Prepend "fl" (whose size is required to be the same as that of "this")
+  // to the front of "this" list.
+  void prepend(FreeList* fl);
+
+  // Verify that the chunk is in the list.
+  // found.  Return NULL if "fc" is not found.
+  bool verifyChunkInFreeLists(FreeChunk* fc) const;
+};
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Fri May 25 00:49:14 2007 +0000
@@ -24,15 +24,13 @@
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
 
-asParNewGeneration.hpp			adaptiveSizePolicy.hpp
-asParNewGeneration.hpp			parNewGeneration.hpp
+binaryTreeDictionary.cpp                allocationStats.hpp
+binaryTreeDictionary.cpp                binaryTreeDictionary.hpp
+binaryTreeDictionary.cpp                globals.hpp
+binaryTreeDictionary.cpp                ostream.hpp
 
-asParNewGeneration.cpp			asParNewGeneration.hpp
-asParNewGeneration.cpp			cmsAdaptiveSizePolicy.hpp
-asParNewGeneration.cpp                  cmsGCAdaptivePolicyCounters.hpp
-asParNewGeneration.cpp			defNewGeneration.inline.hpp
-asParNewGeneration.cpp			parNewGeneration.hpp
-asParNewGeneration.cpp                  referencePolicy.hpp
+binaryTreeDictionary.hpp                freeBlockDictionary.hpp
+binaryTreeDictionary.hpp                freeList.hpp
 
 cmsAdaptiveSizePolicy.cpp		cmsAdaptiveSizePolicy.hpp
 cmsAdaptiveSizePolicy.cpp		defNewGeneration.hpp
@@ -51,6 +49,132 @@
 cmsGCAdaptivePolicyCounters.hpp		gcStats.hpp
 cmsGCAdaptivePolicyCounters.hpp		perfData.hpp
 
+cmsLockVerifier.cpp                     cmsLockVerifier.hpp
+cmsLockVerifier.cpp                     concurrentMarkSweepThread.hpp
+cmsLockVerifier.cpp                     vmThread.hpp
+
+cmsLockVerifier.hpp                     mutex.hpp
+
+compactibleFreeListSpace.cpp            allocation.inline.hpp
+compactibleFreeListSpace.cpp            blockOffsetTable.inline.hpp
+compactibleFreeListSpace.cpp            cmsLockVerifier.hpp
+compactibleFreeListSpace.cpp            collectedHeap.hpp
+compactibleFreeListSpace.cpp            compactibleFreeListSpace.hpp
+compactibleFreeListSpace.cpp            concurrentMarkSweepGeneration.inline.hpp
+compactibleFreeListSpace.cpp            concurrentMarkSweepThread.hpp
+compactibleFreeListSpace.cpp            copy.hpp
+compactibleFreeListSpace.cpp            globals.hpp
+compactibleFreeListSpace.cpp            handles.inline.hpp
+compactibleFreeListSpace.cpp            init.hpp
+compactibleFreeListSpace.cpp            java.hpp
+compactibleFreeListSpace.cpp            liveRange.hpp
+compactibleFreeListSpace.cpp            oop.inline.hpp
+compactibleFreeListSpace.cpp            resourceArea.hpp
+compactibleFreeListSpace.cpp            universe.inline.hpp
+compactibleFreeListSpace.cpp            vmThread.hpp
+
+compactibleFreeListSpace.hpp            binaryTreeDictionary.hpp
+compactibleFreeListSpace.hpp            freeList.hpp
+compactibleFreeListSpace.hpp            space.hpp
+
+compactingPermGenGen.cpp                concurrentMarkSweepGeneration.inline.hpp
+
+concurrentGCThread.cpp                  concurrentGCThread.hpp
+concurrentGCThread.cpp                  init.hpp
+concurrentGCThread.cpp                  instanceRefKlass.hpp
+concurrentGCThread.cpp                  interfaceSupport.hpp
+concurrentGCThread.cpp                  java.hpp
+concurrentGCThread.cpp                  javaCalls.hpp
+concurrentGCThread.cpp                  oop.inline.hpp
+concurrentGCThread.cpp                  systemDictionary.hpp
+
+concurrentGCThread.hpp                  thread.hpp
+
+concurrentMarkSweepGeneration.cpp       cardTableRS.hpp
+concurrentMarkSweepGeneration.cpp       cmsAdaptiveSizePolicy.hpp
+concurrentMarkSweepGeneration.cpp       cmsGCAdaptivePolicyCounters.hpp
+concurrentMarkSweepGeneration.cpp       codeCache.hpp
+concurrentMarkSweepGeneration.cpp       collectedHeap.inline.hpp
+concurrentMarkSweepGeneration.cpp       collectorCounters.hpp
+concurrentMarkSweepGeneration.cpp       collectorPolicy.hpp
+concurrentMarkSweepGeneration.cpp       compactibleFreeListSpace.hpp
+concurrentMarkSweepGeneration.cpp       concurrentMarkSweepGeneration.inline.hpp
+concurrentMarkSweepGeneration.cpp       concurrentMarkSweepThread.hpp
+concurrentMarkSweepGeneration.cpp       gcLocker.inline.hpp
+concurrentMarkSweepGeneration.cpp       genCollectedHeap.hpp
+concurrentMarkSweepGeneration.cpp       genMarkSweep.hpp
+concurrentMarkSweepGeneration.cpp       genOopClosures.inline.hpp
+concurrentMarkSweepGeneration.cpp       globals_extension.hpp
+concurrentMarkSweepGeneration.cpp       handles.inline.hpp
+concurrentMarkSweepGeneration.cpp       isGCActiveMark.hpp
+concurrentMarkSweepGeneration.cpp       java.hpp
+concurrentMarkSweepGeneration.cpp       jvmtiExport.hpp
+concurrentMarkSweepGeneration.cpp       oop.inline.hpp
+concurrentMarkSweepGeneration.cpp       referencePolicy.hpp
+concurrentMarkSweepGeneration.cpp       resourceArea.hpp
+concurrentMarkSweepGeneration.cpp       runtimeService.hpp
+concurrentMarkSweepGeneration.cpp       symbolTable.hpp
+concurrentMarkSweepGeneration.cpp       systemDictionary.hpp
+concurrentMarkSweepGeneration.cpp       vmCMSOperations.hpp
+concurrentMarkSweepGeneration.cpp       vmThread.hpp
+
+concurrentMarkSweepGeneration.hpp       bitMap.hpp
+concurrentMarkSweepGeneration.hpp       freeBlockDictionary.hpp
+concurrentMarkSweepGeneration.hpp       gSpaceCounters.hpp
+concurrentMarkSweepGeneration.hpp       gcStats.hpp
+concurrentMarkSweepGeneration.hpp       generation.hpp
+concurrentMarkSweepGeneration.hpp       generationCounters.hpp
+concurrentMarkSweepGeneration.hpp       mutexLocker.hpp
+concurrentMarkSweepGeneration.hpp       taskqueue.hpp
+concurrentMarkSweepGeneration.hpp       virtualspace.hpp
+concurrentMarkSweepGeneration.hpp       yieldingWorkgroup.hpp
+
+concurrentMarkSweepGeneration.inline.hpp cmsLockVerifier.hpp
+concurrentMarkSweepGeneration.inline.hpp compactibleFreeListSpace.hpp
+concurrentMarkSweepGeneration.inline.hpp concurrentMarkSweepGeneration.hpp
+concurrentMarkSweepGeneration.inline.hpp concurrentMarkSweepThread.hpp
+concurrentMarkSweepGeneration.inline.hpp defNewGeneration.hpp
+concurrentMarkSweepGeneration.inline.hpp gcUtil.hpp
+
+concurrentMarkSweepThread.cpp           concurrentMarkSweepGeneration.inline.hpp
+concurrentMarkSweepThread.cpp           concurrentMarkSweepThread.hpp
+concurrentMarkSweepThread.cpp           genCollectedHeap.hpp
+concurrentMarkSweepThread.cpp           init.hpp
+concurrentMarkSweepThread.cpp           instanceRefKlass.hpp
+concurrentMarkSweepThread.cpp           interfaceSupport.hpp
+concurrentMarkSweepThread.cpp           java.hpp
+concurrentMarkSweepThread.cpp           javaCalls.hpp
+concurrentMarkSweepThread.cpp           mutexLocker.hpp
+concurrentMarkSweepThread.cpp           oop.inline.hpp
+concurrentMarkSweepThread.cpp           os.hpp
+concurrentMarkSweepThread.cpp           systemDictionary.hpp
+concurrentMarkSweepThread.cpp           vmThread.hpp
+
+concurrentMarkSweepThread.hpp           concurrentGCThread.hpp
+concurrentMarkSweepThread.hpp           concurrentMarkSweepGeneration.hpp
+concurrentMarkSweepThread.hpp           thread_<os_family>.inline.hpp
+
+freeBlockDictionary.cpp                 freeBlockDictionary.hpp
+freeBlockDictionary.cpp                 thread_<os_family>.inline.hpp
+
+freeBlockDictionary.hpp                 allocation.hpp
+freeBlockDictionary.hpp                 debug.hpp
+freeBlockDictionary.hpp                 globalDefinitions.hpp
+freeBlockDictionary.hpp                 memRegion.hpp
+freeBlockDictionary.hpp                 mutex.hpp
+freeBlockDictionary.hpp                 ostream.hpp
+
+freeChunk.cpp                           copy.hpp
+freeChunk.cpp                           freeBlockDictionary.hpp
+
+freeList.cpp                            freeBlockDictionary.hpp
+freeList.cpp                            freeList.hpp
+freeList.cpp                            globals.hpp
+freeList.cpp                            mutex.hpp
+freeList.cpp                            sharedHeap.hpp
+
+freeList.hpp                            allocationStats.hpp
+
 vmCMSOperations.cpp			concurrentMarkSweepGeneration.hpp
 vmCMSOperations.cpp			concurrentMarkSweepThread.hpp
 vmCMSOperations.cpp			dtrace.hpp
@@ -59,3 +183,7 @@
 
 vmCMSOperations.hpp			gcCause.hpp
 vmCMSOperations.hpp			vmGCOperations.hpp
+
+yieldingWorkgroup.cpp                   yieldingWorkgroup.hpp
+
+yieldingWorkgroup.hpp                   workgroup.hpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,52 @@
+// @(#)includeDB_gc_parNew	1.2 07/04/24 19:40:42
+//
+// Copyright 1993-2002 Sun Microsystems, Inc.  All rights reserved.
+// SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+//
+
+// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
+
+asParNewGeneration.hpp			adaptiveSizePolicy.hpp
+asParNewGeneration.hpp			parNewGeneration.hpp
+
+asParNewGeneration.cpp			asParNewGeneration.hpp
+asParNewGeneration.cpp			cmsAdaptiveSizePolicy.hpp
+asParNewGeneration.cpp                  cmsGCAdaptivePolicyCounters.hpp
+asParNewGeneration.cpp			defNewGeneration.inline.hpp
+asParNewGeneration.cpp			parNewGeneration.hpp
+asParNewGeneration.cpp                  referencePolicy.hpp
+
+parGCAllocBuffer.cpp                    arrayOop.hpp
+parGCAllocBuffer.cpp                    oop.inline.hpp
+parGCAllocBuffer.cpp                    parGCAllocBuffer.hpp
+parGCAllocBuffer.cpp                    sharedHeap.hpp
+
+parGCAllocBuffer.hpp                    allocation.hpp
+parGCAllocBuffer.hpp                    globalDefinitions.hpp
+parGCAllocBuffer.hpp                    threadLocalAllocBuffer.hpp
+
+parNewGeneration.cpp                    adaptiveSizePolicy.hpp
+parNewGeneration.cpp                    ageTable.hpp
+parNewGeneration.cpp                    copy.hpp
+parNewGeneration.cpp                    defNewGeneration.inline.hpp
+parNewGeneration.cpp                    genCollectedHeap.hpp
+parNewGeneration.cpp                    genOopClosures.inline.hpp
+parNewGeneration.cpp                    generation.hpp
+parNewGeneration.cpp                    generation.inline.hpp
+parNewGeneration.cpp                    globalDefinitions.hpp
+parNewGeneration.cpp                    handles.hpp
+parNewGeneration.cpp                    handles.inline.hpp
+parNewGeneration.cpp                    java.hpp
+parNewGeneration.cpp                    objArrayOop.hpp
+parNewGeneration.cpp                    oop.inline.hpp
+parNewGeneration.cpp                    parGCAllocBuffer.hpp
+parNewGeneration.cpp                    parNewGeneration.hpp
+parNewGeneration.cpp                    referencePolicy.hpp
+parNewGeneration.cpp                    resourceArea.hpp
+parNewGeneration.cpp                    sharedHeap.hpp
+parNewGeneration.cpp                    space.hpp
+parNewGeneration.cpp                    workgroup.hpp
+
+parNewGeneration.hpp                    defNewGeneration.hpp
+parNewGeneration.hpp                    parGCAllocBuffer.hpp
+parNewGeneration.hpp                    taskqueue.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Fri May 25 00:49:14 2007 +0000
@@ -212,10 +212,6 @@
 psMarkSweep.cpp                         gcCause.hpp
 psMarkSweep.cpp                         gcLocker.inline.hpp
 psMarkSweep.cpp                         isGCActiveMark.hpp
-// #ifdef JVMPI_SUPPORT
-// psMarkSweep.cpp                         jvmpi.hpp
-// psMarkSweep.cpp                         jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 psMarkSweep.cpp                         oop.inline.hpp
 psMarkSweep.cpp                         memoryService.hpp
 psMarkSweep.cpp                         management.hpp
@@ -255,10 +251,6 @@
 psParallelCompact.cpp			gcLocker.inline.hpp
 psParallelCompact.cpp                   gcTaskManager.hpp
 psParallelCompact.cpp			isGCActiveMark.hpp
-// #ifdef JVMPI_SUPPORT
-// psParallelCompact.cpp			jvmpi.hpp
-// psParallelCompact.cpp			jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 psParallelCompact.cpp			oop.inline.hpp
 psParallelCompact.cpp			oop.pcgc.inline.hpp
 psParallelCompact.cpp			memoryService.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Fri May 25 00:49:14 2007 +0000
@@ -111,9 +111,6 @@
 markSweep.cpp                           oop.inline.hpp
 
 markSweep.hpp                           growableArray.hpp
-// #ifdef JVMPI_SUPPORT
-// markSweep.hpp                           jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 markSweep.hpp                           markOop.hpp
 markSweep.hpp                           oop.hpp
 markSweep.hpp                           timer.hpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,633 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)asParNewGeneration.cpp	1.11 07/05/05 17:05:25 JVM"
+#endif
+/*
+ * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_asParNewGeneration.cpp.incl"
+
+ASParNewGeneration::ASParNewGeneration(ReservedSpace rs, 
+				       size_t initial_byte_size, 
+				       size_t min_byte_size,
+				       int level) :
+  ParNewGeneration(rs, initial_byte_size, level), 
+  _min_gen_size(min_byte_size) {}
+
+const char* ASParNewGeneration::name() const {
+  return "adaptive size par new generation";
+}
+
+void ASParNewGeneration::adjust_desired_tenuring_threshold() {
+  assert(UseAdaptiveSizePolicy, 
+    "Should only be used with UseAdaptiveSizePolicy");
+}
+
+void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) {
+  // Resize the generation if needed. If the generation resize
+  // reports false, do not attempt to resize the spaces.
+  if (resize_generation(eden_size, survivor_size)) {
+    // Then we lay out the spaces inside the generation
+    resize_spaces(eden_size, survivor_size);
+
+    space_invariants();
+
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr("Young generation size: "
+        "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
+        " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
+        " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
+        eden_size, survivor_size, used(), capacity(),
+        max_gen_size(), min_gen_size());
+    }
+  }
+}
+
+size_t ASParNewGeneration::available_to_min_gen() {
+  assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
+  return virtual_space()->committed_size() - min_gen_size();
+}
+
+// This method assumes that from-space has live data and that
+// any shrinkage of the young gen is limited by location of
+// from-space.
+size_t ASParNewGeneration::available_to_live() const {
+#undef SHRINKS_AT_END_OF_EDEN
+#ifdef SHRINKS_AT_END_OF_EDEN
+  size_t delta_in_survivor = 0;
+  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  const size_t space_alignment = heap->intra_generation_alignment();
+  const size_t gen_alignment = heap->generation_alignment();
+
+  MutableSpace* space_shrinking = NULL;
+  if (from_space()->end() > to_space()->end()) {
+    space_shrinking = from_space();
+  } else {
+    space_shrinking = to_space();
+  }
+
+  // Include any space that is committed but not included in
+  // the survivor spaces.
+  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
+    "Survivor space beyond high end");
+  size_t unused_committed = pointer_delta(virtual_space()->high(),
+    space_shrinking->end(), sizeof(char));   
+
+  if (space_shrinking->is_empty()) {
+    // Don't let the space shrink to 0
+    assert(space_shrinking->capacity_in_bytes() >= space_alignment, 
+      "Space is too small");
+    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
+  } else {
+    delta_in_survivor = pointer_delta(space_shrinking->end(), 
+				      space_shrinking->top(),
+				      sizeof(char));
+  }
+
+  size_t delta_in_bytes = unused_committed + delta_in_survivor;
+  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
+  return delta_in_bytes;
+#else
+  // The only space available for shrinking is in to-space if it
+  // is above from-space.
+  if (to()->bottom() > from()->bottom()) {
+    const size_t alignment = os::vm_page_size();
+    if (to()->capacity() < alignment) {
+      return 0;
+    } else {
+      return to()->capacity() - alignment;
+    }
+  } else {
+    return 0;
+  }
+#endif
+}
+
+// Return the number of bytes available for resizing down the young
+// generation.  This is the minimum of
+// 	input "bytes"
+//	bytes to the minimum young gen size
+//	bytes to the size currently being used + some small extra
+size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
+  // Allow shrinkage into the current eden but keep eden large enough
+  // to maintain the minimum young gen size
+  bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
+  return align_size_down(bytes, os::vm_page_size());
+}
+
+// Note that the the alignment used is the OS page size as
+// opposed to an alignment associated with the virtual space
+// (as is done in the ASPSYoungGen/ASPSOldGen)
+bool ASParNewGeneration::resize_generation(size_t eden_size, 
+					   size_t survivor_size) {
+  const size_t alignment = os::vm_page_size();
+  size_t orig_size = virtual_space()->committed_size();
+  bool size_changed = false;
+
+  // There used to be this guarantee there.
+  // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
+  // Code below forces this requirement.  In addition the desired eden
+  // size and disired survivor sizes are desired goals and may
+  // exceed the total generation size.
+
+  assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(), 
+    "just checking");
+
+  // Adjust new generation size
+  const size_t eden_plus_survivors =
+	  align_size_up(eden_size + 2 * survivor_size, alignment);
+  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()), 
+			     min_gen_size());
+  assert(desired_size <= max_gen_size(), "just checking");
+
+  if (desired_size > orig_size) {
+    // Grow the generation
+    size_t change = desired_size - orig_size;
+    assert(change % alignment == 0, "just checking");
+    if (!virtual_space()->expand_by(change)) {
+      return false; // Error if we fail to resize!
+    }
+
+    size_changed = true;
+  } else if (desired_size < orig_size) {
+    size_t desired_change = orig_size - desired_size;
+    assert(desired_change % alignment == 0, "just checking");
+
+    desired_change = limit_gen_shrink(desired_change);
+
+    if (desired_change > 0) {
+      virtual_space()->shrink_by(desired_change);
+      reset_survivors_after_shrink();
+
+      size_changed = true;
+    }
+  } else {
+    if (Verbose && PrintGC) {
+      if (orig_size == max_gen_size()) {
+        gclog_or_tty->print_cr("ASParNew generation size at maximum: "
+          SIZE_FORMAT "K", orig_size/K);
+      } else if (orig_size == min_gen_size()) {
+        gclog_or_tty->print_cr("ASParNew generation size at minium: "
+          SIZE_FORMAT "K", orig_size/K);
+      }
+    }
+  }
+
+  if (size_changed) {
+    MemRegion cmr((HeapWord*)virtual_space()->low(),
+                  (HeapWord*)virtual_space()->high());
+    GenCollectedHeap::heap()->barrier_set()->resize_covered_region(cmr);
+
+    if (Verbose && PrintGC) {
+      size_t current_size  = virtual_space()->committed_size();
+      gclog_or_tty->print_cr("ASParNew generation size changed: "
+			     SIZE_FORMAT "K->" SIZE_FORMAT "K",
+			     orig_size/K, current_size/K);
+    }
+  }
+
+  guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
+	    virtual_space()->committed_size() == max_gen_size(), "Sanity");
+
+  return true;
+}
+
+void ASParNewGeneration::reset_survivors_after_shrink() {
+
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  HeapWord* new_end = (HeapWord*)virtual_space()->high();
+  
+  if (from()->end() > to()->end()) {
+    assert(new_end >= from()->end(), "Shrinking past from-space");
+  } else {
+    assert(new_end >= to()->bottom(), "Shrink was too large");
+    // Was there a shrink of the survivor space?
+    if (new_end < to()->end()) {
+      MemRegion mr(to()->bottom(), new_end);
+      to()->initialize(mr, false /* clear */);
+    }
+  }
+}
+void ASParNewGeneration::resize_spaces(size_t requested_eden_size, 
+			               size_t requested_survivor_size) {
+  assert(UseAdaptiveSizePolicy, "sanity check");
+  assert(requested_eden_size > 0  && requested_survivor_size > 0, 
+	 "just checking");
+  CollectedHeap* heap = Universe::heap();
+  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
+      
+
+  // We require eden and to space to be empty
+  if ((!eden()->is_empty()) || (!to()->is_empty())) {
+    return;
+  }
+
+  size_t cur_eden_size = eden()->capacity();
+
+  if (PrintAdaptiveSizePolicy && Verbose) {
+    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: " 
+                  SIZE_FORMAT 
+                  ", requested_survivor_size: " SIZE_FORMAT ")",
+                  requested_eden_size, requested_survivor_size);
+    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+                  SIZE_FORMAT, 
+                  eden()->bottom(), 
+                  eden()->end(), 
+                  pointer_delta(eden()->end(),
+                                eden()->bottom(),
+                                sizeof(char)));
+    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+		  SIZE_FORMAT, 
+                  from()->bottom(), 
+                  from()->end(), 
+                  pointer_delta(from()->end(),
+                                from()->bottom(),
+                                sizeof(char)));
+    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " 
+		  SIZE_FORMAT, 
+                  to()->bottom(),   
+                  to()->end(), 
+                  pointer_delta(  to()->end(),
+                                  to()->bottom(),
+                                  sizeof(char)));
+  }
+
+  // There's nothing to do if the new sizes are the same as the current
+  if (requested_survivor_size == to()->capacity() && 
+      requested_survivor_size == from()->capacity() &&
+      requested_eden_size == eden()->capacity()) {
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
+    }
+    return;
+  }
+  
+  char* eden_start = (char*)eden()->bottom();
+  char* eden_end   = (char*)eden()->end();   
+  char* from_start = (char*)from()->bottom();
+  char* from_end   = (char*)from()->end();
+  char* to_start   = (char*)to()->bottom();
+  char* to_end     = (char*)to()->end();
+
+  const size_t alignment = os::vm_page_size();
+  const bool maintain_minimum = 
+    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
+
+  // Check whether from space is below to space
+  if (from_start < to_start) {
+    // Eden, from, to
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr("  Eden, from, to:");
+    }
+
+    // Set eden
+    // "requested_eden_size" is a goal for the size of eden
+    // and may not be attainable.  "eden_size" below is
+    // calculated based on the location of from-space and
+    // the goal for the size of eden.  from-space is
+    // fixed in place because it contains live data.
+    // The calculation is done this way to avoid 32bit
+    // overflow (i.e., eden_start + requested_eden_size
+    // may too large for representation in 32bits).
+    size_t eden_size;
+    if (maintain_minimum) {
+      // Only make eden larger than the requested size if
+      // the minimum size of the generation has to be maintained.
+      // This could be done in general but policy at a higher
+      // level is determining a requested size for eden and that
+      // should be honored unless there is a fundamental reason.
+      eden_size = pointer_delta(from_start, 
+				eden_start, 
+				sizeof(char));
+    } else {
+      eden_size = MIN2(requested_eden_size,
+                       pointer_delta(from_start, eden_start, sizeof(char)));
+    }
+
+// tty->print_cr("eden_size before: " SIZE_FORMAT, eden_size);
+    eden_size = align_size_down(eden_size, alignment);
+// tty->print_cr("eden_size after: " SIZE_FORMAT, eden_size);
+    eden_end = eden_start + eden_size;
+    assert(eden_end >= eden_start, "addition overflowed")
+
+    // To may resize into from space as long as it is clear of live data.
+    // From space must remain page aligned, though, so we need to do some
+    // extra calculations.
+
+    // First calculate an optimal to-space
+    to_end   = (char*)virtual_space()->high();
+    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, 
+				    sizeof(char));
+
+    // Does the optimal to-space overlap from-space?
+    if (to_start < (char*)from()->end()) {
+      // Calculate the minimum offset possible for from_end
+      size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));
+
+      // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
+      if (from_size == 0) {
+        from_size = alignment;
+      } else {
+        from_size = align_size_up(from_size, alignment);
+      }
+
+      from_end = from_start + from_size;
+      assert(from_end > from_start, "addition overflow or from_size problem");
+
+      guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");
+
+      // Now update to_start with the new from_end
+      to_start = MAX2(from_end, to_start);
+    } else {
+      // If shrinking, move to-space down to abut the end of from-space
+      // so that shrinking will move to-space down.  If not shrinking
+      // to-space is moving up to allow for growth on the next expansion.
+      if (requested_eden_size <= cur_eden_size) {
+        to_start = from_end;
+        if (to_start + requested_survivor_size > to_start) {
+	  to_end = to_start + requested_survivor_size;
+        }
+      }
+      // else leave to_end pointing to the high end of the virtual space.
+    }
+
+    guarantee(to_start != to_end, "to space is zero sized");
+      
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+                    eden_start, 
+                    eden_end, 
+                    pointer_delta(eden_end, eden_start, sizeof(char)));
+      gclog_or_tty->print_cr("    [from_start .. from_end): "
+                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+                    from_start, 
+                    from_end, 
+                    pointer_delta(from_end, from_start, sizeof(char)));
+      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
+                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+                    to_start,   
+                    to_end, 
+                    pointer_delta(  to_end,   to_start, sizeof(char)));
+    }
+  } else {
+    // Eden, to, from
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr("  Eden, to, from:");
+    }
+
+    // Calculate the to-space boundaries based on
+    // the start of from-space.
+    to_end = from_start;
+    to_start = (char*)pointer_delta(from_start, 
+                                    (char*)requested_survivor_size, 
+				    sizeof(char));
+    // Calculate the ideal eden boundaries.
+    // eden_end is already at the bottom of the generation
+    assert(eden_start == virtual_space()->low(), 
+      "Eden is not starting at the low end of the virtual space");
+    if (eden_start + requested_eden_size >= eden_start) {
+      eden_end = eden_start + requested_eden_size;
+    } else {
+      eden_end = to_start;
+    }
+
+    // Does eden intrude into to-space?  to-space
+    // gets priority but eden is not allowed to shrink
+    // to 0.
+    if (eden_end > to_start) {
+      eden_end = to_start;
+    }
+
+    // Don't let eden shrink down to 0 or less.
+    eden_end = MAX2(eden_end, eden_start + alignment);
+    assert(eden_start + alignment >= eden_start, "Overflow");
+
+    size_t eden_size;
+    if (maintain_minimum) {
+      // Use all the space available.
+      eden_end = MAX2(eden_end, to_start);
+      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
+      eden_size = MIN2(eden_size, cur_eden_size);
+    } else {
+      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
+    }
+    eden_size = align_size_down(eden_size, alignment);
+    assert(maintain_minimum || eden_size <= requested_eden_size, 
+      "Eden size is too large");
+    assert(eden_size >= alignment, "Eden size is too small");
+    eden_end = eden_start + eden_size;
+
+    // Move to-space down to eden.
+    if (requested_eden_size < cur_eden_size) {
+      to_start = eden_end;
+      if (to_start + requested_survivor_size > to_start) {
+        to_end = MIN2(from_start, to_start + requested_survivor_size);
+      } else {
+        to_end = from_start;
+      }
+    }
+
+    // eden_end may have moved so again make sure
+    // the to-space and eden don't overlap.
+    to_start = MAX2(eden_end, to_start);
+
+    // from-space
+    size_t from_used = from()->used();
+    if (requested_survivor_size > from_used) {
+      if (from_start + requested_survivor_size >= from_start) {
+        from_end = from_start + requested_survivor_size;
+      }
+      if (from_end > virtual_space()->high()) {
+	from_end = virtual_space()->high();
+      }
+    }
+
+    assert(to_start >= eden_end, "to-space should be above eden");
+    if (PrintAdaptiveSizePolicy && Verbose) {
+      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
+                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+                    eden_start, 
+                    eden_end, 
+                    pointer_delta(eden_end, eden_start, sizeof(char)));
+      gclog_or_tty->print_cr("    [  to_start ..   to_end): " 
+                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+                    to_start,   
+                    to_end, 
+                    pointer_delta(  to_end,   to_start, sizeof(char)));
+      gclog_or_tty->print_cr("    [from_start .. from_end): " 
+                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, 
+                    from_start, 
+                    from_end, 
+                    pointer_delta(from_end, from_start, sizeof(char)));
+    }
+  }
+  
+
+  guarantee((HeapWord*)from_start <= from()->bottom(), 
+            "from start moved to the right");
+  guarantee((HeapWord*)from_end >= from()->top(),
+            "from end moved into live data");
+  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
+  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
+  assert(is_object_aligned((intptr_t)to_start), "checking alignment");
+
+  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
+  MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
+  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
+
+  // Let's make sure the call to initialize doesn't reset "top"!
+  HeapWord* old_from_top = from()->top();
+
+  // For PrintAdaptiveSizePolicy block  below
+  size_t old_from = from()->capacity();
+  size_t old_to   = to()->capacity();
+
+  // The call to initialize NULL's the next compaction space
+  eden()->initialize(edenMR, true);
+  eden()->set_next_compaction_space(from());
+    to()->initialize(toMR  , true);
+  from()->initialize(fromMR, false);     // Note, not cleared!
+
+  assert(from()->top() == old_from_top, "from top changed!");
+
+  if (PrintAdaptiveSizePolicy) {
+    GenCollectedHeap* gch = GenCollectedHeap::heap();
+    assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
+
+    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
+                  "collection: %d "
+                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
+                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
+                  gch->total_collections(),
+                  old_from, old_to,
+                  from()->capacity(),
+                  to()->capacity());
+    gclog_or_tty->cr();
+  }
+}
+
+void ASParNewGeneration::compute_new_size() {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
+    "not a CMS generational heap");
+
+
+  CMSAdaptiveSizePolicy* size_policy = 
+    (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
+  assert(size_policy->is_gc_cms_adaptive_size_policy(),
+    "Wrong type of size policy");
+
+  size_t survived = from()->used();
+  if (!survivor_overflow()) {
+    // Keep running averages on how much survived
+    size_policy->avg_survived()->sample(survived);
+  } else {
+    size_t promoted = 
+      (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
+    assert(promoted < gch->capacity(), "Conversion problem?");
+    size_t survived_guess = survived + promoted;
+    size_policy->avg_survived()->sample(survived_guess);
+  }
+
+  size_t survivor_limit = max_survivor_size();
+  _tenuring_threshold =
+    size_policy->compute_survivor_space_size_and_threshold(
+                                                     _survivor_overflow,
+                                                     _tenuring_threshold,
+                                                     survivor_limit);
+  size_policy->avg_young_live()->sample(used());
+  size_policy->avg_eden_live()->sample(eden()->used());
+
+  size_policy->compute_young_generation_free_space(eden()->capacity(),
+                                                   max_gen_size());
+
+  resize(size_policy->calculated_eden_size_in_bytes(), 
+	 size_policy->calculated_survivor_size_in_bytes());
+
+  if (UsePerfData) {
+    CMSGCAdaptivePolicyCounters* counters = 
+      (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
+    assert(counters->kind() == 
+	   GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
+      "Wrong kind of counters");
+    counters->update_tenuring_threshold(_tenuring_threshold);
+    counters->update_survivor_overflowed(_survivor_overflow);
+    counters->update_young_capacity(capacity());
+  }
+}
+
+
+#ifndef PRODUCT
+// Changes from PSYoungGen version
+//	value of "alignment"
+void ASParNewGeneration::space_invariants() {
+  const size_t alignment = os::vm_page_size();
+
+  // Currently, our eden size cannot shrink to zero
+  guarantee(eden()->capacity() >= alignment, "eden too small");
+  guarantee(from()->capacity() >= alignment, "from too small");
+  guarantee(to()->capacity() >= alignment, "to too small");
+
+  // Relationship of spaces to each other
+  char* eden_start = (char*)eden()->bottom();
+  char* eden_end   = (char*)eden()->end();   
+  char* from_start = (char*)from()->bottom();
+  char* from_end   = (char*)from()->end();
+  char* to_start   = (char*)to()->bottom();
+  char* to_end     = (char*)to()->end();
+
+  guarantee(eden_start >= virtual_space()->low(), "eden bottom");
+  guarantee(eden_start < eden_end, "eden space consistency");
+  guarantee(from_start < from_end, "from space consistency");
+  guarantee(to_start < to_end, "to space consistency");
+
+  // Check whether from space is below to space
+  if (from_start < to_start) {
+    // Eden, from, to
+    guarantee(eden_end <= from_start, "eden/from boundary");
+    guarantee(from_end <= to_start,   "from/to boundary");
+    guarantee(to_end <= virtual_space()->high(), "to end");
+  } else {
+    // Eden, to, from
+    guarantee(eden_end <= to_start, "eden/to boundary");
+    guarantee(to_end <= from_start, "to/from boundary");
+    guarantee(from_end <= virtual_space()->high(), "from end");
+  }
+
+  // More checks that the virtual space is consistent with the spaces
+  assert(virtual_space()->committed_size() >=
+    (eden()->capacity() +
+     to()->capacity() +
+     from()->capacity()), "Committed size is inconsistent");
+  assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
+    "Space invariant");
+  char* eden_top = (char*)eden()->top();
+  char* from_top = (char*)from()->top();
+  char* to_top = (char*)to()->top();
+  assert(eden_top <= virtual_space()->high(), "eden top");
+  assert(from_top <= virtual_space()->high(), "from top");
+  assert(to_top <= virtual_space()->high(), "to top");
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,93 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)asParNewGeneration.hpp	1.8 07/05/05 17:05:25 JVM"
+#endif
+/*
+ * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// A Generation that does parallel young-gen collection extended
+// for adaptive size policy.
+
+// Division of generation into spaces
+// done by DefNewGeneration::compute_space_boundaries()
+//	+---------------+
+//	| uncommitted	|
+//	|---------------|
+//	| ss0		|
+//	|---------------|
+//	| ss1		|
+//	|---------------|
+//	|		|
+//	| eden		|
+//	|		|
+//	+---------------+	<-- low end of VirtualSpace
+//
+class ASParNewGeneration: public ParNewGeneration {
+
+  size_t _min_gen_size;
+
+  // Resize the generation based on the desired sizes of 
+  // the constituent spaces.
+  bool resize_generation(size_t eden_size, size_t survivor_size);
+  // Resize the spaces based on their desired sizes but
+  // respecting the maximum size of the generation.
+  void resize_spaces(size_t eden_size, size_t survivor_size);
+  // Return the byte size remaining to the minimum generation size.
+  size_t available_to_min_gen();
+  // Return the byte size remaining to the live data in the generation.
+  size_t available_to_live() const;
+  // Return the byte size that the generation is allowed to shrink.
+  size_t limit_gen_shrink(size_t bytes);
+  // Reset the size of the spaces after a shrink of the generation.
+  void reset_survivors_after_shrink();
+
+  // Accessor
+  VirtualSpace* virtual_space() { return &_virtual_space; }
+
+  virtual void adjust_desired_tenuring_threshold();
+
+ public:
+
+  ASParNewGeneration(ReservedSpace rs, 
+		     size_t initial_byte_size, 
+		     size_t min_byte_size,
+		     int level);
+
+  virtual const char* short_name() const { return "ASParNew"; }
+  virtual const char* name() const;
+  virtual Generation::Name kind() { return ASParNew; }
+
+  // Change the sizes of eden and the survivor spaces in
+  // the generation.  The parameters are desired sizes
+  // and are not guaranteed to be met.  For example, if
+  // the total is larger than the generation. 
+  void resize(size_t eden_size, size_t survivor_size);
+
+  virtual void compute_new_size();
+
+  size_t max_gen_size()                 { return _reserved.byte_size(); }
+  size_t min_gen_size() const		{ return _min_gen_size; }
+
+  // Space boundary invariant checker
+  void space_invariants() PRODUCT_RETURN;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,130 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)parGCAllocBuffer.cpp	1.27 07/05/05 17:05:53 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_parGCAllocBuffer.cpp.incl"
+
+ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
+  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
+  _end(NULL), _hard_end(NULL),
+  _retained(false), _retained_filler(),
+  _allocated(0), _wasted(0)
+{
+  assert (min_size() > AlignmentReserve, "Inconsistency!");
+}
+
+const size_t ParGCAllocBuffer::FillerHeaderSize =
+             align_object_size(arrayOopDesc::header_size(T_INT));
+
+// If the minimum object size is greater than MinObjAlignment, we can
+// end up with a shard at the end of the buffer that's smaller than
+// the smallest object.  We can't allow that because the buffer must
+// look like it's full of objects when we retire it, so we make
+// sure we have enough space for a filler int array object.
+const size_t ParGCAllocBuffer::AlignmentReserve =
+             oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
+
+void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
+  assert(!retain || end_of_gc, "Can only retain at GC end.");
+  if (_retained) {
+    // If the buffer had been retained shorten the previous filler object.
+    assert(_retained_filler.end() <= _top, "INVARIANT");
+    SharedHeap::fill_region_with_object(_retained_filler);
+    // Wasted space book-keeping, otherwise (normally) done in invalidate()
+    _wasted += _retained_filler.word_size();
+    _retained = false;
+  }
+  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
+  if (_top < _hard_end) {
+    SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end));
+    if (!retain) {
+      invalidate();
+    } else {
+      // Is there wasted space we'd like to retain for the next GC?
+      if (pointer_delta(_end, _top) > FillerHeaderSize) {
+	_retained = true;
+	_retained_filler = MemRegion(_top, FillerHeaderSize);
+	_top = _top + FillerHeaderSize;
+      } else {
+        invalidate();
+      }
+    }
+  }
+}
+
+void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
+  assert(ResizePLAB, "Wasted work");
+  stats->add_allocated(_allocated);
+  stats->add_wasted(_wasted);
+  stats->add_unused(pointer_delta(_end, _top));
+}
+
+// Compute desired plab size and latch result for later
+// use. This should be called once at the end of parallel
+// scavenge; it clears the sensor accumulators.
+void PLABStats::adjust_desired_plab_sz() {
+  assert(ResizePLAB, "Not set");
+  if (_allocated == 0) {
+    assert(_unused == 0, "Inconsistency in PLAB stats");
+    _allocated = 1;
+  }
+  double wasted_frac    = (double)_unused/(double)_allocated;
+  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
+                                   TargetPLABWastePct);
+  if (target_refills == 0) {
+    target_refills = 1;
+  }
+  _used = _allocated - _wasted - _unused;
+  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
+  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
+  // Take historical weighted average
+  _filter.sample(plab_sz);
+  // Clip from above and below, and align to object boundary
+  plab_sz = MAX2(min_size(), (size_t)_filter.average());
+  plab_sz = MIN2(max_size(), plab_sz);
+  plab_sz = align_object_size(plab_sz);
+  // Latch the result
+  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
+  if (ResizePLAB) {
+    _desired_plab_sz = plab_sz;
+  }
+  // Now clear the accumulators for next round:
+  // note this needs to be fixed in the case where we
+  // are retaining across scavenges. FIX ME !!! XXX
+  _allocated = 0;
+  _wasted    = 0;
+  _unused    = 0;
+}
+
+#ifndef PRODUCT
+void ParGCAllocBuffer::print() {
+  gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
+             "_retained: %c _retained_filler: [%p,%p)\n",
+             _bottom, _top, _end, _hard_end,
+             "FT"[_retained], _retained_filler.start(), _retained_filler.end());
+}
+#endif // !PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,205 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)parGCAllocBuffer.hpp	1.29 07/05/05 17:05:53 JVM"
+#endif
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+// Forward decl.
+
+class PLABStats;
+
+// A per-thread allocation buffer used during GC.
+class ParGCAllocBuffer: public CHeapObj {
+protected:
+  char head[32];
+  size_t _word_sz;          // in HeapWord units
+  HeapWord* _bottom;
+  HeapWord* _top;
+  HeapWord* _end;       // last allocatable address + 1
+  HeapWord* _hard_end;  // _end + AlignmentReserve
+  bool      _retained;  // whether we hold a _retained_filler
+  MemRegion _retained_filler;
+  // In support of ergonomic sizing of PLAB's
+  size_t    _allocated;     // in HeapWord units
+  size_t    _wasted;        // in HeapWord units
+  char tail[32];
+  static const size_t FillerHeaderSize;
+  static const size_t AlignmentReserve;
+
+public:
+  // Initializes the buffer to be empty, but with the given "word_sz".
+  // Must get initialized with "set_buf" for an allocation to succeed.
+  ParGCAllocBuffer(size_t word_sz);
+
+  static const size_t min_size() {
+    return ThreadLocalAllocBuffer::min_size();
+  }
+
+  static const size_t max_size() {
+    return ThreadLocalAllocBuffer::max_size();
+  }
+
+  // If an allocation of the given "word_sz" can be satisfied within the
+  // buffer, do the allocation, returning a pointer to the start of the
+  // allocated block.  If the allocation request cannot be satisfied,
+  // return NULL.
+  HeapWord* allocate(size_t word_sz) {
+    HeapWord* res = _top;
+    HeapWord* new_top = _top + word_sz;
+    if (new_top <= _end) {
+      _top = new_top;
+      return res;
+    } else {
+      return NULL;
+    }
+  }
+
+  // Undo the last allocation in the buffer, which is required to be of the 
+  // "obj" of the given "word_sz".
+  void undo_allocation(HeapWord* obj, size_t word_sz) {
+    assert(_top - word_sz >= _bottom
+	   && _top - word_sz == obj,
+	   "Bad undo_allocation");
+    _top = _top - word_sz;
+  }
+
+  // The total (word) size of the buffer, including both allocated and
+  // unallocted space.
+  size_t word_sz() { return _word_sz; }
+
+  // Should only be done if we are about to reset with a new buffer of the
+  // given size.
+  void set_word_size(size_t new_word_sz) {
+    assert(new_word_sz > AlignmentReserve, "Too small");
+    _word_sz = new_word_sz;
+  }
+
+  // The number of words of unallocated space remaining in the buffer.
+  size_t words_remaining() {
+    assert(_end >= _top, "Negative buffer");
+    return pointer_delta(_end, _top, HeapWordSize);
+  }
+
+  bool contains(void* addr) {
+    return (void*)_bottom <= addr && addr < (void*)_hard_end;
+  }
+
+  // Sets the space of the buffer to be [buf, space+word_sz()).
+  void set_buf(HeapWord* buf) {
+    _bottom   = buf;
+    _top      = _bottom;
+    _hard_end = _bottom + word_sz();
+    _end      = _hard_end - AlignmentReserve;
+    assert(_end >= _top, "Negative buffer");
+    // In support of ergonomic sizing
+    _allocated += word_sz();
+  }
+
+  // Flush the stats supporting ergonomic sizing of PLAB's
+  void flush_stats(PLABStats* stats);
+  void flush_stats_and_retire(PLABStats* stats, bool retain) {
+    // We flush the stats first in order to get a reading of
+    // unused space in the last buffer.
+    if (ResizePLAB) {
+      flush_stats(stats);
+    }
+    // Retire the last allocation buffer.
+    retire(true, retain);
+  }
+
+  // Force future allocations to fail and queries for contains()
+  // to return false
+  void invalidate() {
+    assert(!_retained, "Shouldn't retain an invalidated buffer.");
+    _end    = _hard_end;
+    _wasted += pointer_delta(_end, _top);  // unused  space
+    _top    = _end;      // force future allocations to fail
+    _bottom = _end;      // force future contains() queries to return false
+  }
+
+  // Fills in the unallocated portion of the buffer with a garbage object.
+  // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
+  // is true, attempt to re-use the unused portion in the next GC.
+  void retire(bool end_of_gc, bool retain);
+
+  void print() PRODUCT_RETURN;
+};
+
+// PLAB stats book-keeping
+class PLABStats VALUE_OBJ_CLASS_SPEC {
+  size_t _allocated;      // total allocated
+  size_t _wasted;         // of which wasted (internal fragmentation)
+  size_t _unused;         // Unused in last buffer
+  size_t _used;           // derived = allocated - wasted - unused
+  size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
+  AdaptiveWeightedAverage
+         _filter;         // integrator with decay
+  
+ public:
+  PLABStats(size_t desired_plab_sz_, unsigned wt) :
+    _allocated(0),
+    _wasted(0),
+    _unused(0),
+    _used(0),
+    _desired_plab_sz(desired_plab_sz_),
+    _filter(wt)
+  {
+    size_t min_sz = min_size();
+    size_t max_sz = max_size();
+    size_t aligned_min_sz = align_object_size(min_sz);
+    size_t aligned_max_sz = align_object_size(max_sz);
+    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
+           min_sz <= max_sz,
+           "PLAB clipping computation in adjust_desired_plab_sz()"
+           " may be incorrect");
+  }
+
+  static const size_t min_size() {
+    return ParGCAllocBuffer::min_size();
+  }
+
+  static const size_t max_size() {
+    return ParGCAllocBuffer::max_size();
+  }
+
+  size_t desired_plab_sz() {
+    return _desired_plab_sz;
+  }
+
+  void adjust_desired_plab_sz(); // filter computation, latches output to
+                                 // _desired_plab_sz, clears sensor accumulators
+
+  void add_allocated(size_t v) {
+    Atomic::add_ptr(v, &_allocated);
+  }
+
+  void add_unused(size_t v) {
+    Atomic::add_ptr(v, &_unused);
+  }
+
+  void add_wasted(size_t v) {
+    Atomic::add_ptr(v, &_wasted);
+  }
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,1203 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)parNewGeneration.cpp	1.99 07/05/17 15:52:40 JVM"
+#endif
+/*
+ * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_parNewGeneration.cpp.incl"
+
+#ifdef _MSC_VER
+#pragma warning( push )
+#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
+#endif
+ParScanThreadState::ParScanThreadState(Space* to_space_,
+                                       ParNewGeneration* gen_,
+				       Generation* old_gen_,
+				       int thread_num_,
+				       ObjToScanQueueSet* work_queue_set_,
+                                       size_t desired_plab_sz_,
+                                       ParallelTaskTerminator& term_) :
+  _to_space(to_space_), _old_gen(old_gen_), _thread_num(thread_num_),
+  _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
+  _ageTable(false), // false ==> not the global age table, no perf data.
+  _to_space_alloc_buffer(desired_plab_sz_),
+  _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
+  _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
+  _older_gen_closure(gen_, this),
+  _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
+                      &_to_space_root_closure, gen_, &_old_gen_root_closure,
+                      work_queue_set_, &term_),
+  _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
+  _keep_alive_closure(&_scan_weak_ref_closure),
+  _pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0),
+  _strong_roots_time(0.0), _term_time(0.0)
+{
+  _survivor_chunk_array =
+    (ChunkArray*) old_gen()->get_data_recorder(thread_num());
+  _hash_seed = 17;  // Might want to take time-based random value.
+  _start = os::elapsedTime();
+  _old_gen_closure.set_generation(old_gen_);
+  _old_gen_root_closure.set_generation(old_gen_);
+}
+#ifdef _MSC_VER
+#pragma warning( pop )
+#endif
+
+void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
+                                              size_t plab_word_size) {
+  ChunkArray* sca = survivor_chunk_array();
+  if (sca != NULL) {
+    // A non-null SCA implies that we want the PLAB data recorded.
+    sca->record_sample(plab_start, plab_word_size);
+  }
+}
+
+bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
+  return new_obj->is_objArray() &&
+         arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
+         new_obj != old_obj;
+}
+
+void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
+  assert(old->is_objArray(), "must be obj array");
+  assert(old->is_forwarded(), "must be forwarded");
+  assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
+  assert(!_old_gen->is_in(old), "must be in young generation.");
+
+  objArrayOop obj = objArrayOop(old->forwardee());
+  // Process ParGCArrayScanChunk elements now
+  // and push the remainder back onto queue
+  int start     = arrayOop(old)->length();
+  int end       = obj->length();
+  int remainder = end - start;
+  assert(start <= end, "just checking");
+  if (remainder > 2 * ParGCArrayScanChunk) {
+    // Test above combines last partial chunk with a full chunk
+    end = start + ParGCArrayScanChunk;
+    arrayOop(old)->set_length(end);
+    // Push remainder.
+    bool ok = work_queue()->push(old);
+    assert(ok, "just popped, push must be okay");
+    note_push();
+  } else {
+    // Restore length so that it can be used if there
+    // is a promotion failure and forwarding pointers
+    // must be removed.
+    arrayOop(old)->set_length(end);
+  }
+  // process our set of indices (include header in first chunk)
+  oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start);
+  oop* end_addr   = obj->base() + end; // obj_at_addr(end) asserts end < length
+  MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
+  if ((HeapWord *)obj < young_old_boundary()) {
+    // object is in to_space
+    obj->oop_iterate(&_to_space_closure, mr);
+  } else {
+    // object is in old generation
+    obj->oop_iterate(&_old_gen_closure, mr);
+  }
+}
+
+
+void ParScanThreadState::trim_queues(int max_size) {
+  ObjToScanQueue* queue = work_queue();
+  while (queue->size() > (juint)max_size) { 
+    oop obj_to_scan;
+    if (queue->pop_local(obj_to_scan)) {
+      note_pop();
+
+      if ((HeapWord *)obj_to_scan < young_old_boundary()) {
+        if (obj_to_scan->is_objArray() &&
+            obj_to_scan->is_forwarded() &&
+            obj_to_scan->forwardee() != obj_to_scan) {
+          scan_partial_array_and_push_remainder(obj_to_scan);
+        } else {
+          // object is in to_space
+          obj_to_scan->oop_iterate(&_to_space_closure);
+        }
+      } else {
+        // object is in old generation
+        obj_to_scan->oop_iterate(&_old_gen_closure);
+      }
+    }
+  }
+}
+
+HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
+
+  // Otherwise, if the object is small enough, try to reallocate the
+  // buffer.
+  HeapWord* obj = NULL;
+  if (!_to_space_full) {
+    ParGCAllocBuffer* const plab = to_space_alloc_buffer();
+    Space*            const sp   = to_space();
+    if (word_sz * 100 <
+	ParallelGCBufferWastePct * plab->word_sz()) {
+      // Is small enough; abandon this buffer and start a new one.
+      plab->retire(false, false);
+      size_t buf_size = plab->word_sz();
+      HeapWord* buf_space = sp->par_allocate(buf_size);
+      if (buf_space == NULL) {
+        const size_t min_bytes =
+          ParGCAllocBuffer::min_size() << LogHeapWordSize;
+        size_t free_bytes = sp->free();
+        while(buf_space == NULL && free_bytes >= min_bytes) {
+          buf_size = free_bytes >> LogHeapWordSize;
+          assert(buf_size == (size_t)align_object_size(buf_size),
+                 "Invariant");
+	  buf_space  = sp->par_allocate(buf_size);
+          free_bytes = sp->free();
+        }
+      }
+      if (buf_space != NULL) {
+	plab->set_word_size(buf_size);
+	plab->set_buf(buf_space);
+        record_survivor_plab(buf_space, buf_size);
+	obj = plab->allocate(word_sz);
+        // Note that we cannot compare buf_size < word_sz below
+        // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
+	assert(obj != NULL || plab->words_remaining() < word_sz,
+               "Else should have been able to allocate");
+        // It's conceivable that we may be able to use the
+        // buffer we just grabbed for subsequent small requests
+        // even if not for this one.
+      } else {
+	// We're used up.
+	_to_space_full = true;
+      }
+
+    } else {
+      // Too large; allocate the object individually.
+      obj = sp->par_allocate(word_sz);
+    }
+  }
+  return obj;
+}
+
+
+void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
+						size_t word_sz) {
+  // Is the alloc in the current alloc buffer?
+  if (to_space_alloc_buffer()->contains(obj)) {
+    assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
+	   "Should contain whole object.");
+    to_space_alloc_buffer()->undo_allocation(obj, word_sz);
+  } else {
+    SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
+  }
+}
+
+class ParScanThreadStateSet: private ResourceArray {
+public:
+  // Initializes states for the specified number of threads;
+  ParScanThreadStateSet(int                     num_threads, 
+                        Space&                  to_space, 
+                        ParNewGeneration&       gen,
+                        Generation&             old_gen, 
+                        ObjToScanQueueSet&      queue_set, 
+                        size_t                  desired_plab_sz,
+                        ParallelTaskTerminator& term);
+  inline ParScanThreadState& thread_sate(int i);
+  int pushes() { return _pushes; }
+  int pops()   { return _pops; }
+  int steals() { return _steals; }
+  void reset();
+  void flush();
+private:
+  ParallelTaskTerminator& _term;
+  ParNewGeneration&       _gen;
+  Generation&             _next_gen;
+  // staticstics
+  int _pushes;
+  int _pops;
+  int _steals;
+};
+
+
+ParScanThreadStateSet::ParScanThreadStateSet(
+  int num_threads, Space& to_space, ParNewGeneration& gen,
+  Generation& old_gen, ObjToScanQueueSet& queue_set, 
+  size_t desired_plab_sz, ParallelTaskTerminator& term)
+  : ResourceArray(sizeof(ParScanThreadState), num_threads),
+    _gen(gen), _next_gen(old_gen), _term(term),
+    _pushes(0), _pops(0), _steals(0)
+{
+  assert(num_threads > 0, "sanity check!");
+  // Initialize states.
+  for (int i = 0; i < num_threads; ++i) {
+    new ((ParScanThreadState*)_data + i) 
+        ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
+                           desired_plab_sz, term);
+  }
+}
+
+inline ParScanThreadState& ParScanThreadStateSet::thread_sate(int i)
+{
+  assert(i >= 0 && i < length(), "sanity check!");
+  return ((ParScanThreadState*)_data)[i];
+}
+
+
+void ParScanThreadStateSet::reset()
+{
+  _term.reset_for_reuse();
+}
+
+void ParScanThreadStateSet::flush()
+{
+  for (int i = 0; i < length(); ++i) {
+    ParScanThreadState& par_scan_state = thread_sate(i);
+  
+    // Flush stats related to To-space PLAB activity and
+    // retire the last buffer.
+    par_scan_state.to_space_alloc_buffer()->
+      flush_stats_and_retire(_gen.plab_stats(),
+                             false /* !retain */);
+
+    // Every thread has its own age table.  We need to merge
+    // them all into one.
+    ageTable *local_table = par_scan_state.age_table();
+    _gen.age_table()->merge(local_table);
+
+    // Inform old gen that we're done.
+    _next_gen.par_promote_alloc_done(i);
+    _next_gen.par_oop_since_save_marks_iterate_done(i);
+
+    // Flush stats related to work queue activity (push/pop/steal)
+    // This could conceivably become a bottleneck; if so, we'll put the
+    // stat's gathering under the flag.
+    if (PAR_STATS_ENABLED) {
+      _pushes += par_scan_state.pushes();
+      _pops   += par_scan_state.pops();
+      _steals += par_scan_state.steals();
+      if (ParallelGCVerbose) {
+        gclog_or_tty->print("Thread %d complete:\n"
+                            "  Pushes: %7d    Pops: %7d    Steals %7d (in %d attempts)\n",
+                            i, par_scan_state.pushes(), par_scan_state.pops(),
+                            par_scan_state.steals(), par_scan_state.steal_attempts());
+        if (par_scan_state.overflow_pushes() > 0 ||
+            par_scan_state.overflow_refills() > 0) {
+          gclog_or_tty->print("  Overflow pushes: %7d    "
+                              "Overflow refills: %7d for %d objs.\n",
+                              par_scan_state.overflow_pushes(),
+                              par_scan_state.overflow_refills(),
+                              par_scan_state.overflow_refill_objs());
+        }
+
+        double elapsed = par_scan_state.elapsed();
+        double strong_roots = par_scan_state.strong_roots_time();
+        double term = par_scan_state.term_time();
+        gclog_or_tty->print(
+                            "  Elapsed: %7.2f ms.\n"
+                            "    Strong roots: %7.2f ms (%6.2f%%)\n"
+                            "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
+                           elapsed * 1000.0,
+                           strong_roots * 1000.0, (strong_roots*100.0/elapsed),
+                           term * 1000.0, (term*100.0/elapsed),
+                           par_scan_state.term_attempts());
+      }
+    }
+  }
+}
+
+
+ParScanClosure::ParScanClosure(ParNewGeneration* g,
+			       ParScanThreadState* par_scan_state) :
+  OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
+{
+  assert(_g->level() == 0, "Optimized for youngest generation");
+  _boundary = _g->reserved().end();
+}
+
+ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
+                                             ParScanThreadState* par_scan_state)
+  : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
+{
+}
+
+#ifdef WIN32
+#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
+#endif
+
+ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
+    ParScanThreadState* par_scan_state_,
+    ParScanWithoutBarrierClosure* to_space_closure_,
+    ParScanWithBarrierClosure* old_gen_closure_,
+    ParRootScanWithoutBarrierClosure* to_space_root_closure_,
+    ParNewGeneration* par_gen_,
+    ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
+    ObjToScanQueueSet* task_queues_,
+    ParallelTaskTerminator* terminator_) :
+
+    _par_scan_state(par_scan_state_),
+    _to_space_closure(to_space_closure_),
+    _old_gen_closure(old_gen_closure_),
+    _to_space_root_closure(to_space_root_closure_),
+    _old_gen_root_closure(old_gen_root_closure_),
+    _par_gen(par_gen_),
+    _task_queues(task_queues_),
+    _terminator(terminator_)
+{}
+
+void ParEvacuateFollowersClosure::do_void() {
+  ObjToScanQueue* work_q = par_scan_state()->work_queue();
+
+  while (true) {
+
+    // Scan to-space and old-gen objs until we run out of both.
+    oop obj_to_scan;
+    par_scan_state()->trim_queues(0);
+
+    // We have no local work, attempt to steal from other threads.
+
+    // attempt to steal work from promoted.
+    par_scan_state()->note_steal_attempt();
+    if (task_queues()->steal(par_scan_state()->thread_num(),
+                             par_scan_state()->hash_seed(),
+                             obj_to_scan)) {
+      par_scan_state()->note_steal();
+      bool res = work_q->push(obj_to_scan);
+      assert(res, "Empty queue should have room for a push.");
+
+      par_scan_state()->note_push();
+      //   if successful, goto Start.
+      continue;
+
+      // try global overflow list.
+    } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
+      continue;
+    }
+
+    // Otherwise, offer termination.
+    par_scan_state()->start_term_time();
+    if (terminator()->offer_termination()) break;
+    par_scan_state()->end_term_time();
+  }
+  // Finish the last termination pause.
+  par_scan_state()->end_term_time();
+}
+
+ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
+		HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
+    AbstractGangTask("ParNewGeneration collection"),
+    _gen(gen), _next_gen(next_gen),
+    _young_old_boundary(young_old_boundary),
+    _state_set(state_set)
+  {}
+
+void ParNewGenTask::work(int i) {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  // Since this is being done in a separate thread, need new resource
+  // and handle marks.
+  ResourceMark rm;
+  HandleMark hm;
+  // We would need multiple old-gen queues otherwise.
+  guarantee(gch->n_gens() == 2,
+     "Par young collection currently only works with one older gen.");
+
+  Generation* old_gen = gch->next_gen(_gen);
+
+  ParScanThreadState& par_scan_state = _state_set->thread_sate(i);
+  par_scan_state.set_young_old_boundary(_young_old_boundary);
+  
+  par_scan_state.start_strong_roots();
+  gch->gen_process_strong_roots(_gen->level(),
+                                true, // Process younger gens, if any,
+                                      // as strong roots.
+                                false,// not collecting perm generation.
+                                SharedHeap::SO_AllClasses,
+                                &par_scan_state.older_gen_closure(),
+                                &par_scan_state.to_space_root_closure());
+  par_scan_state.end_strong_roots();
+
+  // "evacuate followers".
+  par_scan_state.evacuate_followers_closure().do_void();
+}
+
+#ifdef _MSC_VER
+#pragma warning( push )
+#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
+#endif
+ParNewGeneration::
+ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
+  : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
+  _overflow_list(NULL),
+  _is_alive_closure(this),
+  _plab_stats(YoungPLABSize, PLABWeight)
+{
+  _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
+  guarantee(_task_queues != NULL, "task_queues allocation failure.");
+
+  for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
+    ObjToScanQueuePadded *q_padded = new ObjToScanQueuePadded();
+    guarantee(q_padded != NULL, "work_queue Allocation failure.");
+
+    _task_queues->register_queue(i1, &q_padded->work_queue);
+  }
+
+  for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
+    _task_queues->queue(i2)->initialize();
+
+  if (UsePerfData) {
+    EXCEPTION_MARK;
+    ResourceMark rm;
+
+    const char* cname =
+         PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
+    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
+                                     ParallelGCThreads, CHECK);
+  }
+}
+#ifdef _MSC_VER
+#pragma warning( pop )
+#endif
+
+// ParNewGeneration::
+ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
+  DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
+
+void
+// ParNewGeneration::
+ParKeepAliveClosure::do_oop(oop* p) {
+  // We never expect to see a null reference being processed
+  // as a weak reference.
+  assert (*p != NULL, "expected non-null ref");
+  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+
+  _par_cl->do_oop_nv(p);
+
+  if (Universe::heap()->is_in_reserved(p)) {
+    _rs->write_ref_field_gc_par(p, *p);
+  }
+}
+
+// ParNewGeneration::
+KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
+  DefNewGeneration::KeepAliveClosure(cl) {}
+
+void
+// ParNewGeneration::
+KeepAliveClosure::do_oop(oop* p) {
+  // We never expect to see a null reference being processed
+  // as a weak reference.
+  assert (*p != NULL, "expected non-null ref");
+  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+
+  _cl->do_oop_nv(p);
+
+  if (Universe::heap()->is_in_reserved(p)) {
+    _rs->write_ref_field_gc_par(p, *p);
+  }
+}
+
+void ScanClosureWithParBarrier::do_oop(oop* p) {
+  oop obj = *p;
+  // Should we copy the obj?
+  if (obj != NULL) {
+    if ((HeapWord*)obj < _boundary) {
+      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
+      if (obj->is_forwarded()) {
+        *p = obj->forwardee();
+      } else {        
+        *p = _g->DefNewGeneration::copy_to_survivor_space(obj, p);
+      }
+    }
+    if (_gc_barrier) {
+      // If p points to a younger generation, mark the card.
+      if ((HeapWord*)obj < _gen_boundary) {
+	_rs->write_ref_field_gc_par(p, obj);
+      }
+    }
+  }
+}
+
+class ParNewRefProcTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+public:
+  ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,		
+                         Generation& next_gen,
+                         HeapWord* young_old_boundary,
+                         ParScanThreadStateSet& state_set);
+
+private:
+  virtual void work(int i);
+  
+private:
+  ParNewGeneration&      _gen;
+  ProcessTask&           _task;
+  Generation&            _next_gen;
+  HeapWord*              _young_old_boundary;
+  ParScanThreadStateSet& _state_set;
+};
+
+ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
+    ProcessTask& task, ParNewGeneration& gen,		
+    Generation& next_gen, 
+    HeapWord* young_old_boundary,
+    ParScanThreadStateSet& state_set)
+  : AbstractGangTask("ParNewGeneration parallel reference processing"),
+    _gen(gen),
+    _task(task),
+    _next_gen(next_gen), 
+    _young_old_boundary(young_old_boundary),
+    _state_set(state_set)
+{
+}
+
+void ParNewRefProcTaskProxy::work(int i)
+{
+  ResourceMark rm;
+  HandleMark hm;
+  ParScanThreadState& par_scan_state = _state_set.thread_sate(i);
+  par_scan_state.set_young_old_boundary(_young_old_boundary);
+  _task.work(i, par_scan_state.is_alive_closure(), 
+             par_scan_state.keep_alive_closure(), 
+             par_scan_state.evacuate_followers_closure());
+}
+
+class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
+  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
+  EnqueueTask& _task;
+
+public:
+  ParNewRefEnqueueTaskProxy(EnqueueTask& task)
+    : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
+      _task(task)
+  { }
+
+  virtual void work(int i)
+  {
+    _task.work(i);
+  }
+};
+
+
+void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
+{
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
+         "not a generational heap");
+  WorkGang* workers = gch->workers();
+  assert(workers != NULL, "Need parallel worker threads.");
+  ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
+                                 _generation.reserved().end(), _state_set);
+  workers->run_task(&rp_task);
+  _state_set.reset();
+}
+
+void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
+{
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  WorkGang* workers = gch->workers();
+  assert(workers != NULL, "Need parallel worker threads.");
+  ParNewRefEnqueueTaskProxy enq_task(task);
+  workers->run_task(&enq_task);
+}
+
+void ParNewRefProcTaskExecutor::set_single_threaded_mode() 
+{ 
+  _state_set.flush(); 
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  gch->set_par_threads(0);  // 0 ==> non-parallel.
+  gch->save_marks();
+}
+
+ScanClosureWithParBarrier::
+ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
+  ScanClosure(g, gc_barrier) {}
+
+EvacuateFollowersClosureGeneral::
+EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+				OopsInGenClosure* cur,
+				OopsInGenClosure* older) :
+  _gch(gch), _level(level),
+  _scan_cur_or_nonheap(cur), _scan_older(older)
+{}
+
+void EvacuateFollowersClosureGeneral::do_void() {
+  do {
+    // Beware: this call will lead to closure applications via virtual
+    // calls.
+    _gch->oop_since_save_marks_iterate(_level,
+				       _scan_cur_or_nonheap,
+				       _scan_older);
+  } while (!_gch->no_allocs_since_save_marks(_level));
+}
+
+
+bool ParNewGeneration::_avoid_promotion_undo = false;
+
+void ParNewGeneration::adjust_desired_tenuring_threshold() {
+  // Set the desired survivor size to half the real survivor space
+  _tenuring_threshold =
+    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
+}
+
+// A Generation that does parallel young-gen collection.
+
+void ParNewGeneration::collect(bool   full,
+                               bool   clear_all_soft_refs,
+			       size_t size,
+                               bool   is_tlab) {
+  assert(full || size > 0, "otherwise we don't want to collect");
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
+    "not a CMS generational heap");
+  AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
+  WorkGang* workers = gch->workers();
+  _next_gen = gch->next_gen(this);
+  assert(_next_gen != NULL, 
+    "This must be the youngest gen, and not the only gen");
+  assert(gch->n_gens() == 2,
+	 "Par collection currently only works with single older gen.");
+  // Do we have to avoid promotion_undo?
+  if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
+    set_avoid_promotion_undo(true);
+  }
+
+  // If the next generation is too full to accomodate worst-case promotion
+  // from this generation, pass on collection; let the next generation
+  // do it.
+  if (!collection_attempt_is_safe()) {
+    gch->set_incremental_collection_will_fail();
+    return;
+  }
+  assert(to()->is_empty(), "Else not collection_attempt_is_safe");
+
+  init_assuming_no_promotion_failure();
+
+  if (UseAdaptiveSizePolicy) {
+    set_survivor_overflow(false);
+    size_policy->minor_collection_begin();
+  }
+
+  TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
+  // Capture heap used before collection (for printing).
+  size_t gch_prev_used = gch->used();
+
+  SpecializationStats::clear();
+
+  age_table()->clear();
+  to()->clear();
+
+  gch->save_marks();
+  assert(workers != NULL, "Need parallel worker threads.");
+  ParallelTaskTerminator _term(workers->total_workers(), task_queues());
+  ParScanThreadStateSet thread_state_set(workers->total_workers(),
+                                         *to(), *this, *_next_gen, *task_queues(), 
+                                         desired_plab_sz(), _term);
+
+  ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
+  int n_workers = workers->total_workers();
+  gch->set_par_threads(n_workers);
+  gch->change_strong_roots_parity();
+  gch->rem_set()->prepare_for_younger_refs_iterate(true);
+  // It turns out that even when we're using 1 thread, doing the work in a
+  // separate thread causes wide variance in run times.  We can't help this 
+  // in the multi-threaded case, but we special-case n=1 here to get
+  // repeatable measurements of the 1-thread overhead of the parallel code.
+  if (n_workers > 1) {
+    workers->run_task(&tsk);
+  } else {
+    tsk.work(0);
+  }
+  thread_state_set.reset();
+
+  if (PAR_STATS_ENABLED && ParallelGCVerbose) {
+    gclog_or_tty->print("Thread totals:\n"
+	       "  Pushes: %7d    Pops: %7d    Steals %7d (sum = %7d).\n",
+	       thread_state_set.pushes(), thread_state_set.pops(), 
+               thread_state_set.steals(),
+	       thread_state_set.pops()+thread_state_set.steals());
+  }
+  assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(),
+	 "Or else the queues are leaky.");
+
+  // For now, process discovered weak refs sequentially.
+#ifdef COMPILER2
+  ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
+#else
+  ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
+#endif // COMPILER2
+ 
+  // Process (weak) reference objects found during scavenge.
+  IsAliveClosure is_alive(this);
+  ScanWeakRefClosure scan_weak_ref(this);
+  KeepAliveClosure keep_alive(&scan_weak_ref);
+  ScanClosure               scan_without_gc_barrier(this, false);
+  ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
+  set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
+  EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 
+    &scan_without_gc_barrier, &scan_with_gc_barrier);
+  if (ref_processor()->processing_is_mt()) {
+    ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+    ref_processor()->process_discovered_references(
+        soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, 
+        &task_executor);
+  } else {
+    thread_state_set.flush();
+    gch->set_par_threads(0);  // 0 ==> non-parallel.
+    gch->save_marks();
+    ref_processor()->process_discovered_references(
+      soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
+      NULL);
+  }
+  if (!promotion_failed()) {
+    // Swap the survivor spaces.
+    eden()->clear();
+    from()->clear();
+    swap_spaces();
+  
+    assert(to()->is_empty(), "to space should be empty now");
+  } else {
+    assert(HandlePromotionFailure, 
+      "Should only be here if promotion failure handling is on");
+    if (_promo_failure_scan_stack != NULL) {
+      // Can be non-null because of reference processing.
+      // Free stack with its elements.
+      delete _promo_failure_scan_stack;
+      _promo_failure_scan_stack = NULL;
+    }
+    remove_forwarding_pointers();
+    if (PrintGCDetails) {
+      gclog_or_tty->print(" (promotion failed)");
+    }
+    // All the spaces are in play for mark-sweep.
+    from()->set_next_compaction_space(to());
+    gch->set_incremental_collection_will_fail();
+  }
+  // set new iteration safe limit for the survivor spaces
+  from()->set_concurrent_iteration_safe_limit(from()->top());
+  to()->set_concurrent_iteration_safe_limit(to()->top());
+
+  adjust_desired_tenuring_threshold();
+  if (ResizePLAB) {
+    plab_stats()->adjust_desired_plab_sz();
+  }
+
+  if (PrintGC && !PrintGCDetails) {
+    gch->print_heap_change(gch_prev_used);
+  }
+
+  if (UseAdaptiveSizePolicy) {
+    size_policy->minor_collection_end(gch->gc_cause());
+    size_policy->avg_survived()->sample(from()->used());
+  }
+
+  update_time_of_last_gc(os::javaTimeMillis());
+
+  SpecializationStats::print();
+  
+  ref_processor()->set_enqueuing_is_done(true);
+  if (ref_processor()->processing_is_mt()) {
+    ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
+    ref_processor()->enqueue_discovered_references(&task_executor);
+  } else {
+    ref_processor()->enqueue_discovered_references(NULL);
+  }
+  ref_processor()->verify_no_references_recorded();
+}
+
+static int sum;
+void ParNewGeneration::waste_some_time() {
+  for (int i = 0; i < 100; i++) {
+    sum += i;
+  }
+}
+
+static const oop ClaimedForwardPtr = oop(0x4);
+
+// Because of concurrency, there are times where an object for which
+// "is_forwarded()" is true contains an "interim" forwarding pointer
+// value.  Such a value will soon be overwritten with a real value.
+// This method requires "obj" to have a forwarding pointer, and waits, if
+// necessary for a real one to be inserted, and returns it.
+
+oop ParNewGeneration::real_forwardee(oop obj) {
+  oop forward_ptr = obj->forwardee();
+  if (forward_ptr != ClaimedForwardPtr) {
+    return forward_ptr;
+  } else {
+    return real_forwardee_slow(obj);
+  }
+}
+
+oop ParNewGeneration::real_forwardee_slow(oop obj) {
+  // Spin-read if it is claimed but not yet written by another thread.
+  oop forward_ptr = obj->forwardee();
+  while (forward_ptr == ClaimedForwardPtr) {
+    waste_some_time();
+    assert(obj->is_forwarded(), "precondition");
+    forward_ptr = obj->forwardee();
+  }
+  return forward_ptr;
+}
+
+#ifdef ASSERT
+bool ParNewGeneration::is_legal_forward_ptr(oop p) {
+  return
+    (_avoid_promotion_undo && p == ClaimedForwardPtr)
+    || Universe::heap()->is_in_reserved(p);
+}
+#endif
+
+void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
+  if ((m != markOopDesc::prototype()) &&
+      (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) {
+    MutexLocker ml(ParGCRareEvent_lock);
+    DefNewGeneration::preserve_mark_if_necessary(obj, m);
+  }
+}
+
+// Multiple GC threads may try to promote an object.  If the object
+// is successfully promoted, a forwarding pointer will be installed in
+// the object in the young generation.  This method claims the right
+// to install the forwarding pointer before it copies the object,
+// thus avoiding the need to undo the copy as in
+// copy_to_survivor_space_avoiding_with_undo.
+ 
+oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
+	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
+  // In the sequential version, this assert also says that the object is
+  // not forwarded.  That might not be the case here.  It is the case that
+  // the caller observed it to be not forwarded at some time in the past.
+  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
+
+  // The sequential code read "old->age()" below.  That doesn't work here,
+  // since the age is in the mark word, and that might be overwritten with
+  // a forwarding pointer by a parallel thread.  So we must save the mark
+  // word in a local and then analyze it.
+  oopDesc dummyOld;
+  dummyOld.set_mark(m);
+  assert(!dummyOld.is_forwarded(),
+	 "should not be called with forwarding pointer mark word.");
+  
+  oop new_obj = NULL;
+  oop forward_ptr;
+
+  // Try allocating obj in to-space (unless too old)
+  if (dummyOld.age() < tenuring_threshold()) {
+    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
+    if (new_obj == NULL) {
+      set_survivor_overflow(true);
+    }
+  }
+
+  if (new_obj == NULL) {
+    // Either to-space is full or we decided to promote
+    // try allocating obj tenured
+
+    // Attempt to install a null forwarding pointer (atomically),
+    // to claim the right to install the real forwarding pointer.
+    forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
+    if (forward_ptr != NULL) {
+      // someone else beat us to it.
+	return real_forwardee(old);
+    }
+
+    new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+				       old, m, sz);
+
+    if (new_obj == NULL) {
+      if (!HandlePromotionFailure) {
+        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
+        // is incorrectly set. In any case, its seriously wrong to be here!
+        vm_exit_out_of_memory(sz*wordSize, "promotion");
+      }
+      // promotion failed, forward to self
+      _promotion_failed = true;
+      new_obj = old;
+
+      preserve_mark_if_necessary(old, m);
+    }
+
+    old->forward_to(new_obj);
+    forward_ptr = NULL;
+  } else {
+    // Is in to-space; do copying ourselves.
+    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
+    forward_ptr = old->forward_to_atomic(new_obj);
+    // Restore the mark word copied above.
+    new_obj->set_mark(m);
+    // Increment age if obj still in new generation
+    new_obj->incr_age();
+    par_scan_state->age_table()->add(new_obj, sz);
+  }
+  assert(new_obj != NULL, "just checking");
+
+  if (forward_ptr == NULL) {
+    oop obj_to_push = new_obj;
+    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
+      // Length field used as index of next element to be scanned.
+      // Real length can be obtained from real_forwardee()
+      arrayOop(old)->set_length(0);
+      obj_to_push = old;
+      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
+             "push forwarded object");
+    }
+    // Push it on one of the queues of to-be-scanned objects.
+    if (!par_scan_state->work_queue()->push(obj_to_push)) {
+      // Add stats for overflow pushes.
+      if (Verbose && PrintGCDetails) {
+        gclog_or_tty->print("queue overflow!\n");
+      }
+      push_on_overflow_list(old);
+      par_scan_state->note_overflow_push();
+    }
+    par_scan_state->note_push();
+
+    return new_obj;
+  } 
+
+  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
+  // allocate it?
+  if (is_in_reserved(new_obj)) {
+    // Must be in to_space.
+    assert(to()->is_in_reserved(new_obj), "Checking");
+    if (forward_ptr == ClaimedForwardPtr) {
+      // Wait to get the real forwarding pointer value.
+      forward_ptr = real_forwardee(old);
+    }
+    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
+  }
+
+  return forward_ptr;
+}
+
+
+// Multiple GC threads may try to promote the same object.  If two
+// or more GC threads copy the object, only one wins the race to install
+// the forwarding pointer.  The other threads have to undo their copy.
+
+oop ParNewGeneration::copy_to_survivor_space_with_undo(
+	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
+
+  // In the sequential version, this assert also says that the object is
+  // not forwarded.  That might not be the case here.  It is the case that
+  // the caller observed it to be not forwarded at some time in the past.
+  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
+
+  // The sequential code read "old->age()" below.  That doesn't work here,
+  // since the age is in the mark word, and that might be overwritten with
+  // a forwarding pointer by a parallel thread.  So we must save the mark
+  // word here, install it in a local oopDesc, and then analyze it.
+  oopDesc dummyOld;
+  dummyOld.set_mark(m);
+  assert(!dummyOld.is_forwarded(),
+	 "should not be called with forwarding pointer mark word.");
+  
+  bool failed_to_promote = false;
+  oop new_obj = NULL;
+  oop forward_ptr;
+
+  // Try allocating obj in to-space (unless too old)
+  if (dummyOld.age() < tenuring_threshold()) {
+    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
+    if (new_obj == NULL) {
+      set_survivor_overflow(true);
+    }
+  }
+
+  if (new_obj == NULL) {
+    // Either to-space is full or we decided to promote
+    // try allocating obj tenured
+    new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+				       old, m, sz);
+
+    if (new_obj == NULL) {
+      if (!HandlePromotionFailure) {
+        // A failed promotion likely means the MaxLiveObjectEvacuationRatio
+        // flag is incorrectly set. In any case, its seriously wrong to be
+        // here!
+        vm_exit_out_of_memory(sz*wordSize, "promotion");
+      }
+      // promotion failed, forward to self
+      forward_ptr = old->forward_to_atomic(old);
+      new_obj = old;
+
+      if (forward_ptr != NULL) {
+        return forward_ptr;   // someone else succeeded
+      }
+
+      _promotion_failed = true;
+      failed_to_promote = true;
+
+      preserve_mark_if_necessary(old, m);
+    }
+  } else {
+    // Is in to-space; do copying ourselves.
+    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
+    // Restore the mark word copied above.
+    new_obj->set_mark(m);
+    // Increment age if new_obj still in new generation
+    new_obj->incr_age();
+    par_scan_state->age_table()->add(new_obj, sz);
+  }
+  assert(new_obj != NULL, "just checking");
+
+  // Now attempt to install the forwarding pointer (atomically).
+  // We have to copy the mark word before overwriting with forwarding
+  // ptr, so we can restore it below in the copy.
+  if (!failed_to_promote) {
+    forward_ptr = old->forward_to_atomic(new_obj);
+  }
+
+  if (forward_ptr == NULL) {
+    oop obj_to_push = new_obj;
+    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
+      // Length field used as index of next element to be scanned.
+      // Real length can be obtained from real_forwardee()
+      arrayOop(old)->set_length(0);
+      obj_to_push = old;
+      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
+             "push forwarded object");
+    }
+    // Push it on one of the queues of to-be-scanned objects.
+    if (!par_scan_state->work_queue()->push(obj_to_push)) {
+      // Add stats for overflow pushes.
+      push_on_overflow_list(old);
+      par_scan_state->note_overflow_push();
+    }
+    par_scan_state->note_push();
+
+    return new_obj;
+  } 
+
+  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
+  // allocate it?
+  if (is_in_reserved(new_obj)) {
+    // Must be in to_space.
+    assert(to()->is_in_reserved(new_obj), "Checking");
+    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
+  } else {
+    assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
+    _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
+                                      (HeapWord*)new_obj, sz);
+  }
+
+  return forward_ptr;
+}
+
+void ParNewGeneration::push_on_overflow_list(oop from_space_obj) {
+  oop cur_overflow_list = _overflow_list;
+  // if the object has been forwarded to itself, then we cannot
+  // use the klass pointer for the linked list.  Instead we have
+  // to allocate an oopDesc in the C-Heap and use that for the linked list.
+  if (from_space_obj->forwardee() == from_space_obj) {
+    oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
+    listhead->forward_to(from_space_obj);
+    from_space_obj = listhead;
+  }
+  while (true) {
+    from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
+    oop observed_overflow_list =
+      (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
+    if (observed_overflow_list == cur_overflow_list) break;
+    // Otherwise...
+    cur_overflow_list = observed_overflow_list;
+  }
+}
+
+bool
+ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
+  ObjToScanQueue* work_q = par_scan_state->work_queue();
+  // How many to take?
+  int objsFromOverflow = MIN2(work_q->max_elems()/4,
+			      (juint)ParGCDesiredObjsFromOverflowList);
+
+  if (_overflow_list == NULL) return false;
+
+  // Otherwise, there was something there; try claiming the list.
+  oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
+
+  if (prefix == NULL) {
+    return false;
+  }
+  // Trim off a prefix of at most objsFromOverflow items
+  int i = 1;
+  oop cur = prefix;
+  while (i < objsFromOverflow && cur->klass() != NULL) {
+    i++; cur = oop(cur->klass());
+  }
+
+  // Reattach remaining (suffix) to overflow list
+  if (cur->klass() != NULL) {
+    oop suffix = oop(cur->klass());
+    cur->set_klass_to_list_ptr(NULL);
+
+    // Find last item of suffix list
+    oop last = suffix;
+    while (last->klass() != NULL) {
+      last = oop(last->klass());
+    }
+    // Atomically prepend suffix to current overflow list
+    oop cur_overflow_list = _overflow_list;
+    while (true) {
+      last->set_klass_to_list_ptr(cur_overflow_list);
+      oop observed_overflow_list =
+        (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
+      if (observed_overflow_list == cur_overflow_list) break;
+      // Otherwise...
+      cur_overflow_list = observed_overflow_list;
+    }
+  }
+
+  // Push objects on prefix list onto this thread's work queue
+  assert(cur != NULL, "program logic");
+  cur = prefix;
+  int n = 0;
+  while (cur != NULL) {
+    oop obj_to_push = cur->forwardee();
+    oop next        = oop(cur->klass());
+    cur->set_klass(obj_to_push->klass());
+    if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
+      obj_to_push = cur;
+      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
+    }
+    work_q->push(obj_to_push);
+    cur = next;
+    n++;
+  }
+  par_scan_state->note_overflow_refill(n);
+  return true;
+}
+
+void ParNewGeneration::ref_processor_init()
+{
+  if (_ref_processor == NULL) {
+    // Allocate and initialize a reference processor
+    _ref_processor = ReferenceProcessor::create_ref_processor(
+        _reserved,                  // span
+        refs_discovery_is_atomic(), // atomic_discovery
+        refs_discovery_is_mt(),     // mt_discovery
+        &_is_alive_closure,
+        ParallelGCThreads,
+        ParallelRefProcEnabled);
+  }
+}
+
+const char* ParNewGeneration::name() const {
+  return "par new generation";
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,391 @@
+#ifdef USE_PRAGMA_IDENT_HDR
+#pragma ident "@(#)parNewGeneration.hpp	1.48 07/05/17 15:52:44 JVM"
+#endif
+/*
+ * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *  
+ */
+
+class ChunkArray;
+class ParScanWithoutBarrierClosure;
+class ParScanWithBarrierClosure;
+class ParRootScanWithoutBarrierClosure;
+class ParRootScanWithBarrierTwoGensClosure;
+class ParEvacuateFollowersClosure;
+
+// It would be better if these types could be kept local to the .cpp file,
+// but they must be here to allow ParScanClosure::do_oop_work to be defined 
+// in genOopClosures.inline.hpp.
+
+
+typedef OopTaskQueue    ObjToScanQueue;
+typedef OopTaskQueueSet ObjToScanQueueSet;
+
+// Enable this to get push/pop/steal stats.
+const int PAR_STATS_ENABLED = 0;
+
+class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+  ParScanWeakRefClosure* _par_cl;
+ public:
+  ParKeepAliveClosure(ParScanWeakRefClosure* cl);
+  void do_oop(oop* p);
+};
+
+// The state needed by thread performing parallel young-gen collection.
+class ParScanThreadState {
+  friend class ParScanThreadStateSet;
+  ObjToScanQueue *_work_queue;
+
+  ParGCAllocBuffer _to_space_alloc_buffer;
+
+  ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
+  ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
+  ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
+  // One of these two will be passed to process_strong_roots, which will
+  // set its generation.  The first is for two-gen configs where the
+  // old gen collects the perm gen; the second is for arbitrary configs.
+  // The second isn't used right now (it used to be used for the train, an
+  // incremental collector) but the declaration has been left as a reminder.
+  ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
+  // This closure will always be bound to the old gen; it will be used
+  // in evacuate_followers.
+  ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
+  ParEvacuateFollowersClosure          _evacuate_followers;
+  DefNewGeneration::IsAliveClosure     _is_alive_closure;
+  ParScanWeakRefClosure                _scan_weak_ref_closure;
+  ParKeepAliveClosure                  _keep_alive_closure;
+  
+
+  Space* _to_space;
+  Space* to_space() { return _to_space; }
+
+  Generation* _old_gen;
+  Generation* old_gen() { return _old_gen; }
+
+  HeapWord *_young_old_boundary;
+
+  int _hash_seed;
+  int _thread_num;
+  ageTable _ageTable;
+
+  bool _to_space_full;
+
+  int _pushes, _pops, _steals, _steal_attempts, _term_attempts;
+  int _overflow_pushes, _overflow_refills, _overflow_refill_objs;
+
+  // Timing numbers.
+  double _start;
+  double _start_strong_roots;
+  double _strong_roots_time;
+  double _start_term;
+  double _term_time;
+
+  // Helper for trim_queues. Scans subset of an array and makes
+  // remainder available for work stealing.
+  void scan_partial_array_and_push_remainder(oop obj);
+
+  // In support of CMS' parallel rescan of survivor space.
+  ChunkArray* _survivor_chunk_array;
+  ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
+
+  void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
+
+  ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, 
+                     Generation* old_gen_, int thread_num_,
+                     ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_,
+                     ParallelTaskTerminator& term_);
+
+public:
+  ageTable* age_table() {return &_ageTable;}
+  
+  ObjToScanQueue* work_queue() { return _work_queue; }
+
+  ParGCAllocBuffer* to_space_alloc_buffer() {
+    return &_to_space_alloc_buffer;
+  }
+  
+  ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
+  DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
+  ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
+  ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
+  ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
+  ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
+
+  // Decrease queue size below "max_size".
+  void trim_queues(int max_size);
+
+  // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
+  inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
+
+  int* hash_seed()  { return &_hash_seed; }
+  int  thread_num() { return _thread_num; }
+
+  // Allocate a to-space block of size "sz", or else return NULL.
+  HeapWord* alloc_in_to_space_slow(size_t word_sz);
+
+  HeapWord* alloc_in_to_space(size_t word_sz) {
+    HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
+    if (obj != NULL) return obj;
+    else return alloc_in_to_space_slow(word_sz);
+  }
+
+  HeapWord* young_old_boundary() { return _young_old_boundary; }
+
+  void set_young_old_boundary(HeapWord *boundary) {
+    _young_old_boundary = boundary;
+  }
+
+  // Undo the most recent allocation ("obj", of "word_sz").
+  void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
+
+  int pushes() { return _pushes; }
+  int pops()   { return _pops; }
+  int steals() { return _steals; }
+  int steal_attempts() { return _steal_attempts; }
+  int term_attempts()  { return _term_attempts; }
+  int overflow_pushes() { return _overflow_pushes; }
+  int overflow_refills() { return _overflow_refills; }
+  int overflow_refill_objs() { return _overflow_refill_objs; }
+
+  void note_push()  { if (PAR_STATS_ENABLED) _pushes++; }
+  void note_pop()   { if (PAR_STATS_ENABLED) _pops++; }
+  void note_steal() { if (PAR_STATS_ENABLED) _steals++; }
+  void note_steal_attempt() { if (PAR_STATS_ENABLED) _steal_attempts++; }
+  void note_term_attempt()  { if (PAR_STATS_ENABLED) _term_attempts++; }
+  void note_overflow_push() { if (PAR_STATS_ENABLED) _overflow_pushes++; }
+  void note_overflow_refill(int objs) {
+    if (PAR_STATS_ENABLED) {
+      _overflow_refills++;
+      _overflow_refill_objs += objs;
+    }
+  }
+
+  void start_strong_roots() {
+    _start_strong_roots = os::elapsedTime();
+  }
+  void end_strong_roots() {
+    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
+  }
+  double strong_roots_time() { return _strong_roots_time; }
+  void start_term_time() {
+    note_term_attempt();
+    _start_term = os::elapsedTime();
+  }
+  void end_term_time() {
+    _term_time += (os::elapsedTime() - _start_term);
+  }
+  double term_time() { return _term_time; }
+
+  double elapsed() {
+    return os::elapsedTime() - _start;
+  }
+
+};
+
+class ParNewGenTask: public AbstractGangTask {
+  ParNewGeneration* _gen;
+  Generation* _next_gen;
+  HeapWord* _young_old_boundary;
+  class ParScanThreadStateSet* _state_set;
+
+public:
+  ParNewGenTask(ParNewGeneration*      gen, 
+                Generation*            next_gen,
+		HeapWord*              young_old_boundary, 
+                ParScanThreadStateSet* state_set);
+
+  HeapWord* young_old_boundary() { return _young_old_boundary; }
+
+  void work(int i);
+};
+
+class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+ public:
+  KeepAliveClosure(ScanWeakRefClosure* cl);
+  void do_oop(oop* p);
+};
+
+class EvacuateFollowersClosureGeneral: public VoidClosure {
+    GenCollectedHeap* _gch;
+    int _level;
+    OopsInGenClosure* _scan_cur_or_nonheap;
+    OopsInGenClosure* _scan_older;
+  public:
+    EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+                                    OopsInGenClosure* cur,
+                                    OopsInGenClosure* older);
+    void do_void();
+};
+
+// Closure for scanning ParNewGeneration.
+// Same as ScanClosure, except does parallel GC barrier.
+class ScanClosureWithParBarrier: public ScanClosure {
+public:
+  ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
+  void do_oop(oop* p);
+};
+
+// Implements AbstractRefProcTaskExecutor for ParNew.
+class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
+public:
+
+  ParNewRefProcTaskExecutor(ParNewGeneration& generation,
+                            ParScanThreadStateSet& state_set)
+    : _generation(generation), _state_set(state_set)
+  { }
+  
+  // Executes a task using worker threads.  
+  virtual void execute(ProcessTask& task);
+  virtual void execute(EnqueueTask& task);
+  // Switch to single threaded mode.
+  virtual void set_single_threaded_mode();
+private:
+  ParNewGeneration&      _generation;
+  ParScanThreadStateSet& _state_set;
+};
+
+
+// A Generation that does parallel young-gen collection.
+
+class ParNewGeneration: public DefNewGeneration {
+  friend class ParNewGenTask;
+  friend class ParNewRefProcTask;
+  friend class ParNewRefProcTaskExecutor;
+  friend class ParScanThreadStateSet;
+
+  // XXX use a global constant instead of 64!
+  struct ObjToScanQueuePadded {
+        ObjToScanQueue work_queue;
+        char pad[64 - sizeof(ObjToScanQueue)];  // prevent false sharing
+  };
+
+  // The per-thread work queues, available here for stealing.
+  ObjToScanQueueSet* _task_queues;
+
+  // Desired size of survivor space plab's
+  PLABStats _plab_stats;
+
+  // A list of from-space images of to-be-scanned objects, threaded through 
+  // klass-pointers (klass information already copied to the forwarded
+  // image.)  Manipulated with CAS.
+  oop _overflow_list;
+
+  // If true, older generation does not support promotion undo, so avoid.
+  static bool _avoid_promotion_undo;
+  
+  // This closure is used by the reference processor to filter out
+  // references to live referent.
+  DefNewGeneration::IsAliveClosure _is_alive_closure;
+
+  static oop real_forwardee_slow(oop obj);
+  static void waste_some_time();
+
+  // Preserve the mark of "obj", if necessary, in preparation for its mark 
+  // word being overwritten with a self-forwarding-pointer. 
+  void preserve_mark_if_necessary(oop obj, markOop m);
+
+ protected:
+
+  bool _survivor_overflow;
+
+  bool avoid_promotion_undo() { return _avoid_promotion_undo; }
+  void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
+
+  bool survivor_overflow() { return _survivor_overflow; }
+  void set_survivor_overflow(bool v) { _survivor_overflow = v; }
+
+  // Adjust the tenuring threshold.  See the implementation for
+  // the details of the policy.
+  virtual void adjust_desired_tenuring_threshold();
+
+public:
+  ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
+
+  ~ParNewGeneration() {
+    for (uint i = 0; i < ParallelGCThreads; i++)
+        delete _task_queues->queue(i);
+
+    delete _task_queues;
+  }
+
+  virtual void ref_processor_init();
+  virtual Generation::Name kind()        { return Generation::ParNew; }
+  virtual const char* name() const;
+  virtual const char* short_name() const { return "ParNew"; }
+
+  // override
+  virtual bool refs_discovery_is_mt()     const {
+    assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
+    return ParallelGCThreads > 1;
+  }
+
+  // Make the collection virtual.
+  virtual void collect(bool   full,
+                       bool   clear_all_soft_refs,
+                       size_t size, 
+                       bool   is_tlab);
+
+  // This needs to be visible to the closure function.
+  // "obj" is the object to be copied, "m" is a recent value of its mark
+  // that must not contain a forwarding pointer (though one might be
+  // inserted in "obj"s mark word by a parallel thread).
+  inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
+			     oop obj, size_t obj_sz, markOop m) {
+    if (_avoid_promotion_undo) {
+       return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
+                                         		     obj, obj_sz, m);
+    }
+
+    return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
+  }
+
+  oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
+			     oop obj, size_t obj_sz, markOop m);
+
+  oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
+			     oop obj, size_t obj_sz, markOop m);
+
+  // Push the given (from-space) object on the global overflow list.
+  void push_on_overflow_list(oop from_space_obj);
+
+  // If the global overflow list is non-empty, move some tasks from it
+  // onto "work_q" (which must be empty).  No more than 1/4 of the
+  // max_elems of "work_q" are moved.
+  bool take_from_overflow_list(ParScanThreadState* par_scan_state);
+
+  // The task queues to be used by parallel GC threads.
+  ObjToScanQueueSet* task_queues() {
+    return _task_queues;
+  }
+
+  PLABStats* plab_stats() {
+    return &_plab_stats;
+  }
+
+  size_t desired_plab_sz() {
+    return _plab_stats.desired_plab_sz();
+  }
+
+  static oop real_forwardee(oop obj);
+
+  DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
+};
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)cardTableExtension.cpp	1.33 07/05/05 17:05:26 JVM"
+#pragma ident "@(#)cardTableExtension.cpp	1.34 07/05/17 15:52:46 JVM"
 #endif
 /*
  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -517,12 +517,14 @@
 
 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
   CardTableModRefBS::resize_covered_region(new_region);
+  debug_only(verify_guard();)
 }
 
 void CardTableExtension::resize_covered_region_by_end(int changed_region,
   						      MemRegion new_region) {
   assert(SafepointSynchronize::is_at_safepoint(), 
     "Only expect an expansion at the low end at a GC");
+  debug_only(verify_guard();)
 #ifdef ASSERT
   for (int k = 0; k < _cur_covered_regions; k++) {
     if (_covered[k].end() == new_region.end()) {
@@ -568,12 +570,15 @@
                   addr_for((jbyte*) _committed[ind].start()),
                   addr_for((jbyte*) _committed[ind].last()));
   }
+  debug_only(verify_guard();)
 }
 
 void CardTableExtension::resize_commit_uncommit(int changed_region,
 						MemRegion new_region) {
   // Commit new or uncommit old pages, if necessary.
   MemRegion cur_committed = _committed[changed_region];
+  assert(_covered[changed_region].end() == new_region.end(), 
+    "The ends of the regions are expected to match");
   // Extend the start of this _committed region to
   // to cover the start of any previous _committed region.
   // This forms overlapping regions, but never interior regions.
@@ -594,26 +599,55 @@
     "Starts should have proper alignment");
 #endif
 
-  jbyte* new_committed_start = byte_for(new_region.start());
+  jbyte* new_start = byte_for(new_region.start());
   // Round down because this is for the start address
-  HeapWord* new_committed_start_aligned =
-    (HeapWord*)align_size_down((uintptr_t)new_committed_start, 
-                             os::vm_page_size());
-  if (new_committed_start_aligned < cur_committed.start()) {
+  HeapWord* new_start_aligned =
+    (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
+  // The guard page is always committed and should not be committed over.
+  // This method is used in cases where the generation is growing toward
+  // lower addresses but the guard region is still at the end of the
+  // card table.  That still makes sense when looking for writes
+  // off the end of the card table.
+  if (new_start_aligned < cur_committed.start()) {
     // Expand the committed region
+    //
+    // Case A
+    //						|+ guard +|
+    //				|+ cur committed +++++++++|
+    //			|+ new committed +++++++++++++++++|
+    //
+    // Case B
+    //						|+ guard +|
+    //			      |+ cur committed +|
+    //			|+ new committed +++++++|
+    //
+    // These are not expected because the calculation of the
+    // cur committed region and the new committed region
+    // share the same end for the covered region.
+    // Case C
+    //						|+ guard +|
+    //			      |+ cur committed +|
+    //			|+ new committed +++++++++++++++++|
+    // Case D
+    //						|+ guard +|
+    //			      |+ cur committed +++++++++++|
+    //			|+ new committed +++++++|
+
+    HeapWord* new_end_for_commit = 
+      MIN2(cur_committed.end(), _guard_region.start());
     MemRegion new_committed = 
-      MemRegion(new_committed_start_aligned, cur_committed.start());
-    assert(!new_committed.is_empty(), 
-	"The committed region is not expanding");
-    if (!os::commit_memory((char*)new_committed.start(),
-                           new_committed.byte_size())) {
-      vm_exit_out_of_memory(new_committed.byte_size(),
-                            "card table expansion");
+      MemRegion(new_start_aligned, new_end_for_commit);
+    if(!new_committed.is_empty()) {
+      if (!os::commit_memory((char*)new_committed.start(),
+                             new_committed.byte_size())) {
+        vm_exit_out_of_memory(new_committed.byte_size(),
+                              "card table expansion");
+      }
     }
-  } else if (new_committed_start_aligned > cur_committed.start()) {
+  } else if (new_start_aligned > cur_committed.start()) {
     // Shrink the committed region
     MemRegion uncommit_region = committed_unique_to_self(changed_region, 
-      MemRegion(cur_committed.start(), new_committed_start_aligned));
+      MemRegion(cur_committed.start(), new_start_aligned));
     if (!uncommit_region.is_empty()) {
       if (!os::uncommit_memory((char*)uncommit_region.start(),
                                uncommit_region.byte_size())) {
@@ -629,19 +663,20 @@
 void CardTableExtension::resize_update_committed_table(int changed_region,
 						       MemRegion new_region) {
 
-  jbyte* new_committed_start = byte_for(new_region.start());
+  jbyte* new_start = byte_for(new_region.start());
   // Set the new start of the committed region
-  HeapWord* new_committed_start_aligned =
-    (HeapWord*)align_size_down((uintptr_t)new_committed_start, 
+  HeapWord* new_start_aligned =
+    (HeapWord*)align_size_down((uintptr_t)new_start, 
                              os::vm_page_size());
-  MemRegion new_committed = MemRegion(new_committed_start_aligned, 
+  MemRegion new_committed = MemRegion(new_start_aligned, 
     _committed[changed_region].end());
   _committed[changed_region] = new_committed;
-  _committed[changed_region].set_start(new_committed_start_aligned);
+  _committed[changed_region].set_start(new_start_aligned);
 }
 
 void CardTableExtension::resize_update_card_table_entries(int changed_region,
 						          MemRegion new_region) {
+  debug_only(verify_guard();)
   MemRegion original_covered = _covered[changed_region];
   // Initialize the card entries.  Only consider the
   // region covered by the card table (_whole_heap)
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)parallelScavengeHeap.cpp	1.93 07/05/05 17:05:28 JVM"
+#pragma ident "@(#)parallelScavengeHeap.cpp	1.94 07/05/17 15:52:49 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -850,13 +850,6 @@
   }
 }
 
-#ifdef JVMPI_SUPPORT
-int ParallelScavengeHeap::addr_to_arena_id(void* addr) {
-  fatal("JVMPI is not supported by this collector");
-  return 0;
-}
-#endif // JVMPI_SUPPORT
-
 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)parallelScavengeHeap.hpp	1.60 07/05/05 17:05:28 JVM"
+#pragma ident "@(#)parallelScavengeHeap.hpp	1.61 07/05/17 15:52:51 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -210,10 +210,6 @@
 
   void print_heap_change(size_t prev_used);
 
-#ifdef JVMPI_SUPPORT
-  int addr_to_arena_id(void* addr);
-#endif // JVMPI_SUPPORT
-
   // Resize the young generation.  The reserved space for the
   // generation may be expanded in preparation for the resize.
   void resize_young_gen(size_t eden_size, size_t survivor_size);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)psMarkSweepDecorator.cpp	1.25 07/05/05 17:05:29 JVM"
+#pragma ident "@(#)psMarkSweepDecorator.cpp	1.26 07/05/17 15:52:53 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -409,9 +409,6 @@
     }
   }
 
-#ifdef JVMPI_SUPPORT
-  guarantee(!Universe::jvmpi_move_event_enabled(), "This collector does not work with JVMPI");
-#endif // JVMPI_SUPPORT
   const intx scan_interval = PrefetchScanIntervalInBytes;
   const intx copy_interval = PrefetchCopyIntervalInBytes;
 
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)markSweep.hpp	1.66 07/05/05 17:05:33 JVM"
+#pragma ident "@(#)markSweep.hpp	1.67 07/05/17 15:52:55 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -246,13 +246,3 @@
     _obj->set_mark(_mark);
   }
 };
-
-#ifdef JVMPI_SUPPORT
-class JVMPI_Object_Free : public ObjectClosure {
-  void do_object(oop obj) {
-    if (!obj->mark()->is_marked()) {
-      jvmpi::post_object_free_event(obj);
-    }
-  }
-};
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)collectedHeap.hpp	1.54 07/05/05 17:05:40 JVM"
+#pragma ident "@(#)collectedHeap.hpp	1.55 07/05/17 15:52:57 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -489,13 +489,6 @@
   inline void reset_promotion_should_fail();
 #endif	// #ifndef PRODUCT
 
-#ifdef JVMPI_SUPPORT
-  // If "addr" is a pointer into the (reserved?) heap, returns a positive
-  // number indicating the "arena" within the heap in which "addr" falls.
-  // Or else returns 0.
-  virtual int addr_to_arena_id(void* addr) = 0;
-#endif // JVMPI_SUPPORT
-
 #ifdef ASSERT
   static int fired_fake_oom() {
     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)collectedHeap.inline.hpp	1.45 07/05/05 17:05:40 JVM"
+#pragma ident "@(#)collectedHeap.inline.hpp	1.46 07/05/17 15:52:59 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -64,12 +64,6 @@
   obj->set_klass(klass());
   assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
          "missing blueprint");
-
-#ifdef JVMPI_SUPPORT
-  if (Universe::jvmpi_alloc_event_enabled()) {
-    Universe::jvmpi_object_alloc((oop)obj, size * wordSize /* no. of bytes */);
-  }
-#endif // JVMPI_SUPPORT
  
   // support for JVMTI VMObjectAlloc event (no-op if not enabled) 
   JvmtiExport::vm_object_alloc_event_collector(obj);
@@ -94,11 +88,7 @@
                                                 HeapWord* obj,
                                                 size_t size,
                                                 int length) {
-#ifdef JVMPI_SUPPORT
-  // Set array length before posting jvmpi/jvmti object alloc event 
-#else // !JVMPI_SUPPORT
   // Set array length before posting jvmti object alloc event 
-#endif // JVMPI_SUPPORT
   // in post_allocation_setup_common()
   assert(length >= 0, "length should be non-negative");
   ((arrayOop)obj)->set_length(length);
@@ -119,11 +109,7 @@
 
   // We may want to update this, is_noref objects might not be allocated in TLABs.
   HeapWord* result = NULL;
-#ifdef JVMPI_SUPPORT
-  if (UseTLAB && !Universe::jvmpi_slow_allocation()) {
-#else // !JVMPI_SUPPORT
   if (UseTLAB) {
-#endif // JVMPI_SUPPORT
     result = CollectedHeap::allocate_from_tlab(THREAD, size);
     if (result != NULL) {
       assert(!HAS_PENDING_EXCEPTION,
--- a/hotspot/src/share/vm/includeDB_compiler1	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/includeDB_compiler1	Fri May 25 00:49:14 2007 +0000
@@ -69,10 +69,6 @@
 c1_Compilation.cpp                      c1_ValueStack.hpp
 c1_Compilation.cpp                      ciEnv.hpp
 c1_Compilation.cpp                      debugInfoRec.hpp
-// #ifdef JVMPI_SUPPORT
-// jvmpi.cpp                               vframeArray.hpp
-// #endif // JVMPI_SUPPORT
-
 c1_Compilation.hpp                      exceptionHandlerTable.hpp
 c1_Compilation.hpp                      resourceArea.hpp
 
@@ -230,10 +226,6 @@
 c1_LIRAssembler_<arch>.cpp              ciArrayKlass.hpp
 c1_LIRAssembler_<arch>.cpp              ciInstance.hpp
 c1_LIRAssembler_<arch>.cpp              collectedHeap.hpp
-// #ifdef JVMPI_SUPPORT
-// c1_LIRAssembler_<arch>.cpp              jvmpi.hpp
-// c1_LIRAssembler_<arch>.cpp              jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 c1_LIRAssembler_<arch>.cpp              nativeInst_<arch>.hpp
 c1_LIRAssembler_<arch>.cpp              objArrayKlass.hpp
 c1_LIRAssembler_<arch>.cpp              sharedRuntime.hpp
--- a/hotspot/src/share/vm/includeDB_compiler2	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/includeDB_compiler2	Fri May 25 00:49:14 2007 +0000
@@ -64,6 +64,7 @@
 
 ad_<arch>_gen.cpp                       ad_<arch>.hpp
 ad_<arch>_gen.cpp                       cfgnode.hpp
+ad_<arch>_gen.cpp                       locknode.hpp
 
 ad_<arch>_misc.cpp                      ad_<arch>.hpp
 
@@ -175,6 +176,7 @@
 callnode.cpp                            runtime.hpp
 
 callnode.hpp                            connode.hpp
+callnode.hpp                            escape.hpp
 callnode.hpp                            mulnode.hpp
 callnode.hpp                            multnode.hpp
 callnode.hpp                            opcodes.hpp
@@ -312,6 +314,7 @@
 compile.cpp                             node.hpp
 compile.cpp                             oopMap.hpp
 compile.cpp                             opcodes.hpp
+compile.cpp                             output.hpp
 compile.cpp                             parse.hpp
 compile.cpp                             phaseX.hpp
 compile.cpp                             rootnode.hpp
@@ -330,9 +333,6 @@
 compile.hpp                             deoptimization.hpp
 compile.hpp                             dict.hpp
 compile.hpp                             exceptionHandlerTable.hpp
-// #ifdef JVMPI_SUPPORT
-// compile.hpp                             jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 compile.hpp                             phase.hpp
 compile.hpp                             port.hpp
 compile.hpp                             regmask.hpp
@@ -408,6 +408,7 @@
 domgraph.cpp                            rootnode.hpp
 domgraph.cpp                            vectset.hpp
 
+escape.cpp                              allocation.hpp
 escape.cpp                              bcEscapeAnalyzer.hpp
 escape.cpp                              callnode.hpp
 escape.cpp                              cfgnode.hpp
@@ -526,11 +527,7 @@
 java.cpp                                compile.hpp
 java.cpp                                compiledIC.hpp
 java.cpp                                indexSet.hpp
-java.cpp                                loopnode.hpp
 java.cpp                                methodLiveness.hpp
-java.cpp                                output.hpp
-java.cpp                                parse.hpp
-java.cpp                                regalloc.hpp
 java.cpp                                runtime.hpp
 
 lcm.cpp                                 ad_<arch>.hpp
@@ -949,6 +946,7 @@
 runtime.cpp                             vmSymbols.hpp
 runtime.cpp                             vtableStubs.hpp
 
+runtime.hpp                             biasedLocking.hpp
 runtime.hpp                             codeBlob.hpp
 runtime.hpp                             deoptimization.hpp
 runtime.hpp                             machnode.hpp
--- a/hotspot/src/share/vm/includeDB_core	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/includeDB_core	Fri May 25 00:49:14 2007 +0000
@@ -339,14 +339,6 @@
 biasedLocking.hpp                       growableArray.hpp
 biasedLocking.hpp                       handles.hpp
 
-binaryTreeDictionary.cpp                allocationStats.hpp
-binaryTreeDictionary.cpp                binaryTreeDictionary.hpp
-binaryTreeDictionary.cpp                globals.hpp
-binaryTreeDictionary.cpp                ostream.hpp
-
-binaryTreeDictionary.hpp                freeBlockDictionary.hpp
-binaryTreeDictionary.hpp                freeList.hpp
-
 bitMap.cpp                              bitMap.hpp
 bitMap.cpp                              bitMap.inline.hpp
 bitMap.cpp                              copy.hpp
@@ -449,10 +441,6 @@
 cInterpreter.cpp                        interfaceSupport.hpp
 cInterpreter.cpp                        interpreterRuntime.hpp
 cInterpreter.cpp                        interpreter_<arch>.hpp
-// #ifdef JVMPI_SUPPORT
-// cInterpreter.cpp                        jvmpi.hpp
-// cInterpreter.cpp                        jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 cInterpreter.cpp                        jvmtiExport.hpp
 cInterpreter.cpp                        objArrayKlass.hpp
 cInterpreter.cpp                        oop.inline.hpp
@@ -571,10 +559,6 @@
 ciEnv.cpp                               compilerOracle.hpp
 ciEnv.cpp                               dtrace.hpp
 ciEnv.cpp                               init.hpp
-// #ifdef JVMPI_SUPPORT
-// ciEnv.cpp                               jvmpi.hpp
-// ciEnv.cpp                               jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 ciEnv.cpp                               jvmtiExport.hpp
 ciEnv.cpp                               linkResolver.hpp
 ciEnv.cpp                               methodDataOop.hpp
@@ -595,9 +579,6 @@
 ciEnv.hpp                               debugInfoRec.hpp
 ciEnv.hpp                               dependencies.hpp
 ciEnv.hpp                               exceptionHandlerTable.hpp
-// #ifdef JVMPI_SUPPORT
-// ciEnv.hpp                               jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 ciEnv.hpp                               oopMap.hpp
 ciEnv.hpp                               thread.hpp
 
@@ -880,10 +861,6 @@
 classFileParser.cpp                     instanceKlass.hpp
 classFileParser.cpp                     javaCalls.hpp
 classFileParser.cpp                     javaClasses.hpp
-// #ifdef JVMPI_SUPPORT
-// classFileParser.cpp                     jvmpi.hpp
-// classFileParser.cpp                     jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 classFileParser.cpp                     jvmtiExport.hpp
 classFileParser.cpp                     klass.inline.hpp
 classFileParser.cpp                     klassOop.hpp
@@ -905,9 +882,6 @@
 classFileParser.hpp                     accessFlags.hpp
 classFileParser.hpp                     classFileStream.hpp
 classFileParser.hpp                     handles.inline.hpp
-// #ifdef JVMPI_SUPPORT
-// classFileParser.hpp                     jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 classFileParser.hpp                     oop.inline.hpp
 classFileParser.hpp                     resourceArea.hpp
 classFileParser.hpp                     typeArrayOop.hpp
@@ -978,12 +952,6 @@
 classify.hpp                            oop.hpp
 classify.hpp                            oop.inline.hpp
 
-cmsLockVerifier.cpp                     cmsLockVerifier.hpp
-cmsLockVerifier.cpp                     concurrentMarkSweepThread.hpp
-cmsLockVerifier.cpp                     vmThread.hpp
-
-cmsLockVerifier.hpp                     mutex.hpp
-
 codeBlob.cpp                            allocation.inline.hpp
 codeBlob.cpp                            bytecode.hpp
 codeBlob.cpp                            codeBlob.hpp
@@ -1064,28 +1032,6 @@
 compactPermGen.hpp                      generation.hpp
 compactPermGen.hpp                      permGen.hpp
 
-compactibleFreeListSpace.cpp            allocation.inline.hpp
-compactibleFreeListSpace.cpp            blockOffsetTable.inline.hpp
-compactibleFreeListSpace.cpp            cmsLockVerifier.hpp
-compactibleFreeListSpace.cpp            collectedHeap.hpp
-compactibleFreeListSpace.cpp            compactibleFreeListSpace.hpp
-compactibleFreeListSpace.cpp            concurrentMarkSweepGeneration.inline.hpp
-compactibleFreeListSpace.cpp            concurrentMarkSweepThread.hpp
-compactibleFreeListSpace.cpp            copy.hpp
-compactibleFreeListSpace.cpp            globals.hpp
-compactibleFreeListSpace.cpp            handles.inline.hpp
-compactibleFreeListSpace.cpp            init.hpp
-compactibleFreeListSpace.cpp            java.hpp
-compactibleFreeListSpace.cpp            liveRange.hpp
-compactibleFreeListSpace.cpp            oop.inline.hpp
-compactibleFreeListSpace.cpp            resourceArea.hpp
-compactibleFreeListSpace.cpp            universe.inline.hpp
-compactibleFreeListSpace.cpp            vmThread.hpp
-
-compactibleFreeListSpace.hpp            binaryTreeDictionary.hpp
-compactibleFreeListSpace.hpp            freeList.hpp
-compactibleFreeListSpace.hpp            space.hpp
-
 compactingPermGenGen.cpp                compactingPermGenGen.hpp
 compactingPermGenGen.cpp                concurrentMarkSweepGeneration.inline.hpp
 compactingPermGenGen.cpp                filemap.hpp
@@ -1125,10 +1071,6 @@
 compilationPolicy.hpp                   nmethod.hpp
 compilationPolicy.hpp                   vm_operations.hpp
 
-// #ifdef JVMPI_SUPPORT
-// compile.hpp				jvmpi.h
-// #endif // JVMPI_SUPPORT
-
 compileBroker.cpp                       allocation.inline.hpp
 compileBroker.cpp                       arguments.hpp
 compileBroker.cpp                       codeCache.hpp
@@ -1140,10 +1082,6 @@
 compileBroker.cpp                       init.hpp
 compileBroker.cpp                       interfaceSupport.hpp
 compileBroker.cpp                       javaCalls.hpp
-// #ifdef JVMPI_SUPPORT
-// compileBroker.cpp                       jvmpi.hpp
-// compileBroker.cpp                       jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 compileBroker.cpp                       linkResolver.hpp
 compileBroker.cpp                       methodDataOop.hpp
 compileBroker.cpp                       methodOop.hpp
@@ -1258,81 +1196,6 @@
 
 compressedStream.hpp                    allocation.hpp
 
-concurrentGCThread.cpp                  concurrentGCThread.hpp
-concurrentGCThread.cpp                  init.hpp
-concurrentGCThread.cpp                  instanceRefKlass.hpp
-concurrentGCThread.cpp                  interfaceSupport.hpp
-concurrentGCThread.cpp                  java.hpp
-concurrentGCThread.cpp                  javaCalls.hpp
-concurrentGCThread.cpp                  oop.inline.hpp
-concurrentGCThread.cpp                  systemDictionary.hpp
-
-concurrentGCThread.hpp                  thread.hpp
-
-concurrentMarkSweepGeneration.cpp       cardTableRS.hpp
-concurrentMarkSweepGeneration.cpp       cmsAdaptiveSizePolicy.hpp
-concurrentMarkSweepGeneration.cpp       cmsGCAdaptivePolicyCounters.hpp
-concurrentMarkSweepGeneration.cpp       codeCache.hpp
-concurrentMarkSweepGeneration.cpp       collectedHeap.inline.hpp
-concurrentMarkSweepGeneration.cpp       collectorCounters.hpp
-concurrentMarkSweepGeneration.cpp       collectorPolicy.hpp
-concurrentMarkSweepGeneration.cpp       compactibleFreeListSpace.hpp
-concurrentMarkSweepGeneration.cpp       concurrentMarkSweepGeneration.inline.hpp
-concurrentMarkSweepGeneration.cpp       concurrentMarkSweepThread.hpp
-concurrentMarkSweepGeneration.cpp       gcLocker.inline.hpp
-concurrentMarkSweepGeneration.cpp       genCollectedHeap.hpp
-concurrentMarkSweepGeneration.cpp       genMarkSweep.hpp
-concurrentMarkSweepGeneration.cpp       genOopClosures.inline.hpp
-concurrentMarkSweepGeneration.cpp       globals_extension.hpp
-concurrentMarkSweepGeneration.cpp       handles.inline.hpp
-concurrentMarkSweepGeneration.cpp       isGCActiveMark.hpp
-concurrentMarkSweepGeneration.cpp       java.hpp
-concurrentMarkSweepGeneration.cpp       jvmtiExport.hpp
-concurrentMarkSweepGeneration.cpp       oop.inline.hpp
-concurrentMarkSweepGeneration.cpp       referencePolicy.hpp
-concurrentMarkSweepGeneration.cpp       resourceArea.hpp
-concurrentMarkSweepGeneration.cpp       runtimeService.hpp
-concurrentMarkSweepGeneration.cpp       symbolTable.hpp
-concurrentMarkSweepGeneration.cpp       systemDictionary.hpp
-concurrentMarkSweepGeneration.cpp       vmCMSOperations.hpp
-concurrentMarkSweepGeneration.cpp       vmThread.hpp
-
-concurrentMarkSweepGeneration.hpp       bitMap.hpp
-concurrentMarkSweepGeneration.hpp       freeBlockDictionary.hpp
-concurrentMarkSweepGeneration.hpp       gSpaceCounters.hpp
-concurrentMarkSweepGeneration.hpp       gcStats.hpp
-concurrentMarkSweepGeneration.hpp       generation.hpp
-concurrentMarkSweepGeneration.hpp       generationCounters.hpp
-concurrentMarkSweepGeneration.hpp       mutexLocker.hpp
-concurrentMarkSweepGeneration.hpp       taskqueue.hpp
-concurrentMarkSweepGeneration.hpp       virtualspace.hpp
-concurrentMarkSweepGeneration.hpp       yieldingWorkgroup.hpp
-
-concurrentMarkSweepGeneration.inline.hpp cmsLockVerifier.hpp
-concurrentMarkSweepGeneration.inline.hpp compactibleFreeListSpace.hpp
-concurrentMarkSweepGeneration.inline.hpp concurrentMarkSweepGeneration.hpp
-concurrentMarkSweepGeneration.inline.hpp concurrentMarkSweepThread.hpp
-concurrentMarkSweepGeneration.inline.hpp defNewGeneration.hpp
-concurrentMarkSweepGeneration.inline.hpp gcUtil.hpp
-
-concurrentMarkSweepThread.cpp           concurrentMarkSweepGeneration.inline.hpp
-concurrentMarkSweepThread.cpp           concurrentMarkSweepThread.hpp
-concurrentMarkSweepThread.cpp           genCollectedHeap.hpp
-concurrentMarkSweepThread.cpp           init.hpp
-concurrentMarkSweepThread.cpp           instanceRefKlass.hpp
-concurrentMarkSweepThread.cpp           interfaceSupport.hpp
-concurrentMarkSweepThread.cpp           java.hpp
-concurrentMarkSweepThread.cpp           javaCalls.hpp
-concurrentMarkSweepThread.cpp           mutexLocker.hpp
-concurrentMarkSweepThread.cpp           oop.inline.hpp
-concurrentMarkSweepThread.cpp           os.hpp
-concurrentMarkSweepThread.cpp           systemDictionary.hpp
-concurrentMarkSweepThread.cpp           vmThread.hpp
-
-concurrentMarkSweepThread.hpp           concurrentGCThread.hpp
-concurrentMarkSweepThread.hpp           concurrentMarkSweepGeneration.hpp
-concurrentMarkSweepThread.hpp           thread_<os_family>.inline.hpp
-
 constMethodKlass.cpp                    constMethodKlass.hpp
 constMethodKlass.cpp                    constMethodOop.hpp
 constMethodKlass.cpp                    gcLocker.hpp
@@ -1393,6 +1256,9 @@
 constantTag.hpp                         jvm.h
 constantTag.hpp                         top.hpp
 
+copy.cpp                                copy.hpp
+copy.cpp                                sharedRuntime.hpp
+
 copy.hpp                                stubRoutines.hpp
 
 copy_<arch>.hpp                         generate_platform_dependent_include
@@ -1506,10 +1372,6 @@
 defNewGeneration.cpp                    instanceRefKlass.hpp
 defNewGeneration.cpp                    iterator.hpp
 defNewGeneration.cpp                    java.hpp
-// #ifdef JVMPI_SUPPORT
-// defNewGeneration.cpp                    jvmpi.hpp
-// defNewGeneration.cpp                    jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 defNewGeneration.cpp                    oop.inline.hpp
 defNewGeneration.cpp                    referencePolicy.hpp
 defNewGeneration.cpp                    space.hpp
@@ -1578,9 +1440,6 @@
 dictionary.cpp                          classLoadingService.hpp
 dictionary.cpp                          dictionary.hpp
 dictionary.cpp                          hashtable.inline.hpp
-// #ifdef JVMPI_SUPPORT
-// dictionary.cpp                          jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 dictionary.cpp                          jvmtiRedefineClasses.hpp
 dictionary.cpp                          oop.inline.hpp
 dictionary.cpp                          systemDictionary.hpp
@@ -1710,9 +1569,6 @@
 forte.cpp                               collectedHeap.inline.hpp
 forte.cpp                               debugInfoRec.hpp
 forte.cpp                               forte.hpp
-// #ifdef JVMPI_SUPPORT
-// forte.cpp                               jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 forte.cpp                               oop.inline.hpp
 forte.cpp                               oop.inline2.hpp
 forte.cpp                               pcDesc.hpp
@@ -1799,27 +1655,6 @@
 
 frame_<arch>.inline.hpp                 generate_platform_dependent_include
 
-freeBlockDictionary.cpp                 freeBlockDictionary.hpp
-freeBlockDictionary.cpp                 thread_<os_family>.inline.hpp
-
-freeBlockDictionary.hpp                 allocation.hpp
-freeBlockDictionary.hpp                 debug.hpp
-freeBlockDictionary.hpp                 globalDefinitions.hpp
-freeBlockDictionary.hpp                 memRegion.hpp
-freeBlockDictionary.hpp                 mutex.hpp
-freeBlockDictionary.hpp                 ostream.hpp
-
-freeChunk.cpp                           copy.hpp
-freeChunk.cpp                           freeBlockDictionary.hpp
-
-freeList.cpp                            freeBlockDictionary.hpp
-freeList.cpp                            freeList.hpp
-freeList.cpp                            globals.hpp
-freeList.cpp                            mutex.hpp
-freeList.cpp                            sharedHeap.hpp
-
-freeList.hpp                            allocationStats.hpp
-
 gcLocker.cpp                            gcLocker.inline.hpp
 gcLocker.cpp                            sharedHeap.hpp
 
@@ -1850,10 +1685,6 @@
 genCollectedHeap.cpp                    handles.inline.hpp
 genCollectedHeap.cpp                    icBuffer.hpp
 genCollectedHeap.cpp                    java.hpp
-// #ifdef JVMPI_SUPPORT
-// genCollectedHeap.cpp                    jvmpi.hpp
-// genCollectedHeap.cpp                    jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 genCollectedHeap.cpp                    memoryService.hpp
 genCollectedHeap.cpp                    oop.inline.hpp
 genCollectedHeap.cpp                    oop.inline2.hpp
@@ -1887,10 +1718,6 @@
 genMarkSweep.cpp                        icBuffer.hpp
 genMarkSweep.cpp                        instanceRefKlass.hpp
 genMarkSweep.cpp                        javaClasses.hpp
-// #ifdef JVMPI_SUPPORT
-// genMarkSweep.cpp                        jvmpi.hpp
-// genMarkSweep.cpp                        jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 genMarkSweep.cpp                        jvmtiExport.hpp
 genMarkSweep.cpp                        modRefBarrierSet.hpp
 genMarkSweep.cpp                        oop.inline.hpp
@@ -1909,7 +1736,7 @@
 genOopClosures.hpp                      oop.hpp
 
 genOopClosures.inline.hpp               cardTableRS.hpp
-genOopClosures.inline.hpp               concurrentMarkSweepGeneration.hpp
+genOopClosures.inline.hpp               concurrentMarkSweepGeneration.inline.hpp
 genOopClosures.inline.hpp               concurrentMarkSweepThread.hpp
 genOopClosures.inline.hpp               defNewGeneration.hpp
 genOopClosures.inline.hpp               genCollectedHeap.hpp
@@ -2179,10 +2006,6 @@
 instanceKlass.cpp                       instanceOop.hpp
 instanceKlass.cpp                       javaCalls.hpp
 instanceKlass.cpp                       javaClasses.hpp
-// #ifdef JVMPI_SUPPORT
-// instanceKlass.cpp                       jvmpi.hpp
-// instanceKlass.cpp                       jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 instanceKlass.cpp                       jvmti.h
 instanceKlass.cpp                       jvmtiExport.hpp
 instanceKlass.cpp                       jvmtiRedefineClasses.hpp
@@ -2289,10 +2112,6 @@
 interp_masm_<arch>.cpp                  interp_masm_<arch>.hpp
 interp_masm_<arch>.cpp                  interpreterRuntime.hpp
 interp_masm_<arch>.cpp                  interpreter_<arch>.hpp
-// #ifdef JVMPI_SUPPORT
-// interp_masm_<arch>.cpp                  jvmpi.hpp
-// interp_masm_<arch>.cpp                  jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 interp_masm_<arch>.cpp                  jvmtiExport.hpp
 interp_masm_<arch>.cpp                  jvmtiThreadState.hpp
 interp_masm_<arch>.cpp                  markOop.hpp
@@ -2315,10 +2134,6 @@
 interpreter.cpp                         interpreter.hpp
 interpreter.cpp                         interpreterRuntime.hpp
 interpreter.cpp                         interpreter_<arch>.hpp
-// #ifdef JVMPI_SUPPORT
-// interpreter.cpp                         jvmpi.hpp
-// interpreter.cpp                         jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 interpreter.cpp                         jvmtiExport.hpp
 interpreter.cpp                         methodDataOop.hpp
 interpreter.cpp                         methodOop.hpp
@@ -2405,10 +2220,6 @@
 interpreter_<arch>.cpp                  frame.inline.hpp
 interpreter_<arch>.cpp                  interpreterRuntime.hpp
 interpreter_<arch>.cpp                  interpreter_<arch>.hpp
-// #ifdef JVMPI_SUPPORT
-// interpreter_<arch>.cpp                  jvmpi.hpp
-// interpreter_<arch>.cpp                  jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 interpreter_<arch>.cpp                  jvmtiExport.hpp
 interpreter_<arch>.cpp                  jvmtiThreadState.hpp
 interpreter_<arch>.cpp                  methodDataOop.hpp
@@ -2463,10 +2274,6 @@
 java.cpp                                instanceOop.hpp
 java.cpp                                interfaceSupport.hpp
 java.cpp                                java.hpp
-// #ifdef JVMPI_SUPPORT
-// java.cpp                                jvmpi.hpp
-// java.cpp                                jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 java.cpp                                jvmtiExport.hpp
 java.cpp                                memprofiler.hpp
 java.cpp                                methodOop.hpp
@@ -2592,10 +2399,6 @@
 jni.cpp                                 jniTypes_<arch>.hpp
 jni.cpp                                 jvm.h
 jni.cpp                                 jvm_misc.hpp
-// #ifdef JVMPI_SUPPORT
-// jni.cpp 				jvmpi.hpp
-// jni.cpp					jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 jni.cpp                                 jvmtiExport.hpp
 jni.cpp                                 jvmtiThreadState.hpp
 jni.cpp                                 linkResolver.hpp
@@ -2687,10 +2490,6 @@
 jvm.cpp                                 jvm.h
 jvm.cpp                                 jvm_<os_family>.h
 jvm.cpp                                 jvm_misc.hpp
-// #ifdef JVMPI_SUPPORT
-// jvm.cpp                                 jvmpi.hpp
-// jvm.cpp                                 jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 jvm.cpp                                 jvmtiExport.hpp
 jvm.cpp                                 jvmtiThreadState.hpp
 jvm.cpp                                 management.hpp
@@ -2723,38 +2522,6 @@
 jvm_misc.hpp                            handles.hpp
 jvm_misc.hpp                            jni.h
 
-// #ifdef JVMPI_SUPPORT
-// jvmpi.cpp                               bytecode.hpp
-// jvmpi.cpp                               collectedHeap.inline.hpp
-// jvmpi.cpp                               gcLocker.inline.hpp
-// jvmpi.cpp                               interfaceSupport.hpp
-// jvmpi.cpp                               javaCalls.hpp
-// jvmpi.cpp                               jvmpi.hpp
-// jvmpi.cpp                               jvmpi.inline.hpp
-// jvmpi.cpp                               mutex_<os_family>.inline.hpp
-// jvmpi.cpp                               objArrayKlass.hpp
-// jvmpi.cpp                               objectMonitor.hpp
-// jvmpi.cpp                               objectMonitor.inline.hpp
-// jvmpi.cpp                               oop.inline2.hpp
-// jvmpi.cpp                               osThread.hpp
-// jvmpi.cpp                               reflectionUtils.hpp
-// jvmpi.cpp                               resourceArea.hpp
-// jvmpi.cpp                               signature.hpp
-// jvmpi.cpp                               symbolTable.hpp
-// jvmpi.cpp                               systemDictionary.hpp
-// jvmpi.cpp                               universe.inline.hpp
-// jvmpi.cpp                               vframe.hpp
-// jvmpi.cpp                               vmSymbols.hpp
-// 
-// jvmpi.h                                 globalDefinitions.hpp
-// 
-// jvmpi.hpp                               jvmpi.h
-// jvmpi.hpp                               rawMonitor.hpp
-// 
-// jvmpi.inline.hpp                        java.hpp
-// jvmpi.inline.hpp                        jvmpi.hpp
-// #endif // JVMPI_SUPPORT
-
 jvmtiAgentThread.hpp                    jvmtiEnv.hpp
 
 jvmtiClassFileReconstituter.cpp         bytecodeStream.hpp
@@ -3061,9 +2828,6 @@
 klass.cpp                               atomic.hpp
 klass.cpp                               collectedHeap.inline.hpp
 klass.cpp                               instanceKlass.hpp
-// #ifdef JVMPI_SUPPORT
-// klass.cpp                               jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 klass.cpp                               klass.inline.hpp
 klass.cpp                               klassOop.hpp
 klass.cpp                               oop.inline.hpp
@@ -3447,10 +3211,6 @@
 monitorChunk.hpp                        synchronizer.hpp
 
 mutex.cpp                               events.hpp
-// #ifdef JVMPI_SUPPORT
-// mutex.cpp                               jvmpi.hpp
-// mutex.cpp                               jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 mutex.cpp                               mutex.hpp
 mutex.cpp                               mutex_<os_family>.inline.hpp
 mutex.cpp                               osThread.hpp
@@ -3472,9 +3232,6 @@
 
 mutex_<os_family>.cpp                   events.hpp
 mutex_<os_family>.cpp                   interfaceSupport.hpp
-// #ifdef JVMPI_SUPPORT
-// mutex_<os_family>.cpp                   jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 mutex_<os_family>.cpp                   mutex.hpp
 mutex_<os_family>.cpp                   mutex_<os_family>.inline.hpp
 mutex_<os_family>.cpp                   thread_<os_family>.inline.hpp
@@ -3530,9 +3287,6 @@
 nmethod.cpp                             disassembler_<arch>.hpp
 nmethod.cpp                             dtrace.hpp
 nmethod.cpp                             events.hpp
-// #ifdef JVMPI_SUPPORT
-// nmethod.cpp                             jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 nmethod.cpp                             jvmtiRedefineClasses.hpp
 nmethod.cpp                             methodDataOop.hpp
 nmethod.cpp                             nmethod.hpp
@@ -3543,9 +3297,6 @@
 nmethod.cpp                             xmlstream.hpp
 
 nmethod.hpp                             codeBlob.hpp
-// #ifdef JVMPI_SUPPORT
-// nmethod.hpp                             jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 nmethod.hpp                             pcDesc.hpp
 
 objArrayKlass.cpp                       collectedHeap.inline.hpp
@@ -3554,9 +3305,6 @@
 objArrayKlass.cpp                       genOopClosures.inline.hpp
 objArrayKlass.cpp                       handles.inline.hpp
 objArrayKlass.cpp                       instanceKlass.hpp
-// #ifdef JVMPI_SUPPORT
-// objArrayKlass.cpp                       jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 objArrayKlass.cpp                       mutexLocker.hpp
 objArrayKlass.cpp                       objArrayKlass.hpp
 objArrayKlass.cpp                       objArrayKlassKlass.hpp
@@ -3596,10 +3344,6 @@
 
 objectMonitor_<os_family>.cpp           dtrace.hpp
 objectMonitor_<os_family>.cpp           interfaceSupport.hpp
-// #ifdef JVMPI_SUPPORT
-// objectMonitor_<os_family>.cpp           jvmpi.hpp
-// objectMonitor_<os_family>.cpp           jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 objectMonitor_<os_family>.cpp           objectMonitor.hpp
 objectMonitor_<os_family>.cpp           objectMonitor.inline.hpp
 objectMonitor_<os_family>.cpp           oop.inline.hpp
@@ -3619,10 +3363,6 @@
 oop.cpp                                 copy.hpp
 oop.cpp                                 handles.inline.hpp
 oop.cpp                                 javaClasses.hpp
-// #ifdef JVMPI_SUPPORT
-// oop.cpp                                 jvmpi.hpp
-// oop.cpp                                 jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 oop.cpp                                 oop.inline.hpp
 oop.cpp                                 parNewGeneration.hpp
 oop.cpp                                 thread_<os_family>.inline.hpp
@@ -3759,9 +3499,6 @@
 os.cpp                                  javaClasses.hpp
 os.cpp                                  jvm.h
 os.cpp                                  jvm_misc.hpp
-// #ifdef JVMPI_SUPPORT
-// os.cpp                                  jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 os.cpp                                  mutexLocker.hpp
 os.cpp                                  oop.inline.hpp
 os.cpp                                  os.hpp
@@ -3797,9 +3534,6 @@
 os_<os_arch>.cpp                        jvm.h
 os_<os_arch>.cpp                        jvm_<os_family>.h
 os_<os_arch>.cpp                        jvm_misc.hpp
-// #ifdef JVMPI_SUPPORT
-// os_<os_arch>.cpp                        jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 os_<os_arch>.cpp                        mutexLocker.hpp
 os_<os_arch>.cpp                        mutex_<os_family>.inline.hpp
 os_<os_arch>.cpp                        nativeInst_<arch>.hpp
@@ -3838,9 +3572,6 @@
 os_<os_family>.cpp                      jvm.h
 os_<os_family>.cpp                      jvm_<os_family>.h
 os_<os_family>.cpp                      jvm_misc.hpp
-// #ifdef JVMPI_SUPPORT
-// os_<os_family>.cpp                      jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 os_<os_family>.cpp                      mutexLocker.hpp
 os_<os_family>.cpp                      mutex_<os_family>.inline.hpp
 os_<os_family>.cpp                      nativeInst_<arch>.hpp
@@ -3904,42 +3635,6 @@
 ostream.hpp                             allocation.hpp
 ostream.hpp                             timer.hpp
 
-parGCAllocBuffer.cpp                    arrayOop.hpp
-parGCAllocBuffer.cpp                    oop.inline.hpp
-parGCAllocBuffer.cpp                    parGCAllocBuffer.hpp
-parGCAllocBuffer.cpp                    sharedHeap.hpp
-
-parGCAllocBuffer.hpp                    allocation.hpp
-parGCAllocBuffer.hpp                    globalDefinitions.hpp
-parGCAllocBuffer.hpp                    threadLocalAllocBuffer.hpp
-
-parNewGeneration.cpp                    adaptiveSizePolicy.hpp
-parNewGeneration.cpp                    ageTable.hpp
-parNewGeneration.cpp                    concurrentMarkSweepGeneration.inline.hpp
-parNewGeneration.cpp                    copy.hpp
-parNewGeneration.cpp                    defNewGeneration.inline.hpp
-parNewGeneration.cpp                    genCollectedHeap.hpp
-parNewGeneration.cpp                    genOopClosures.inline.hpp
-parNewGeneration.cpp                    generation.hpp
-parNewGeneration.cpp                    generation.inline.hpp
-parNewGeneration.cpp                    globalDefinitions.hpp
-parNewGeneration.cpp                    handles.hpp
-parNewGeneration.cpp                    handles.inline.hpp
-parNewGeneration.cpp                    java.hpp
-parNewGeneration.cpp                    objArrayOop.hpp
-parNewGeneration.cpp                    oop.inline.hpp
-parNewGeneration.cpp                    parGCAllocBuffer.hpp
-parNewGeneration.cpp                    parNewGeneration.hpp
-parNewGeneration.cpp                    referencePolicy.hpp
-parNewGeneration.cpp                    resourceArea.hpp
-parNewGeneration.cpp                    sharedHeap.hpp
-parNewGeneration.cpp                    space.hpp
-parNewGeneration.cpp                    workgroup.hpp
-
-parNewGeneration.hpp                    defNewGeneration.hpp
-parNewGeneration.hpp                    parGCAllocBuffer.hpp
-parNewGeneration.hpp                    taskqueue.hpp
-
 pcDesc.cpp                              debugInfoRec.hpp
 pcDesc.cpp                              nmethod.hpp
 pcDesc.cpp                              pcDesc.hpp
@@ -4045,14 +3740,6 @@
 privilegedStack.hpp                     oopsHierarchy.hpp
 privilegedStack.hpp                     vframe.hpp
 
-// #ifdef JVMPI_SUPPORT
-// rawMonitor.cpp                          allocation.inline.hpp
-// rawMonitor.cpp                          rawMonitor.hpp
-// rawMonitor.cpp                          thread_<os_family>.inline.hpp
-// 
-// rawMonitor.hpp                          objectMonitor.hpp
-// #endif // JVMPI_SUPPORT
-
 referencePolicy.cpp                     arguments.hpp
 referencePolicy.cpp                     globals.hpp
 referencePolicy.cpp                     javaClasses.hpp
@@ -4318,10 +4005,6 @@
 sharedRuntime.cpp                       interpreterRuntime.hpp
 sharedRuntime.cpp                       interpreter_<arch>.hpp
 sharedRuntime.cpp                       javaCalls.hpp
-// #ifdef JVMPI_SUPPORT
-// sharedRuntime.cpp                       jvmpi.hpp
-// sharedRuntime.cpp                       jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 sharedRuntime.cpp                       jvmtiExport.hpp
 sharedRuntime.cpp                       nativeInst_<arch>.hpp
 sharedRuntime.cpp                       nativeLookup.hpp
@@ -4353,9 +4036,6 @@
 sharedRuntime_<arch>.cpp                icBuffer.hpp
 sharedRuntime_<arch>.cpp                interpreter.hpp
 sharedRuntime_<arch>.cpp                interpreter_<arch>.hpp
-// #ifdef JVMPI_SUPPORT
-// sharedRuntime_<arch>.cpp                jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 sharedRuntime_<arch>.cpp                sharedRuntime.hpp
 sharedRuntime_<arch>.cpp                vframeArray.hpp
 sharedRuntime_<arch>.cpp                vmreg_<arch>.inline.hpp
@@ -4393,10 +4073,6 @@
 space.cpp                               genCollectedHeap.hpp
 space.cpp                               globalDefinitions.hpp
 space.cpp                               java.hpp
-// #ifdef JVMPI_SUPPORT
-// space.cpp                               jvmpi.hpp
-// space.cpp                               jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 space.cpp                               liveRange.hpp
 space.cpp                               markSweep.hpp
 space.cpp                               oop.inline.hpp
@@ -4560,9 +4236,6 @@
 
 symbolKlass.cpp                         gcLocker.hpp
 symbolKlass.cpp                         handles.inline.hpp
-// #ifdef JVMPI_SUPPORT
-// symbolKlass.cpp                         jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 symbolKlass.cpp                         oop.inline.hpp
 symbolKlass.cpp                         symbolKlass.hpp
 symbolKlass.cpp                         symbolOop.hpp
@@ -4598,9 +4271,6 @@
 synchronizer.cpp                        events.hpp
 synchronizer.cpp                        handles.inline.hpp
 synchronizer.cpp                        interfaceSupport.hpp
-// #ifdef JVMPI_SUPPORT
-// synchronizer.cpp                        jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 synchronizer.cpp                        markOop.hpp
 synchronizer.cpp                        mutexLocker.hpp
 synchronizer.cpp                        objectMonitor.hpp
@@ -4634,9 +4304,6 @@
 systemDictionary.cpp                    java.hpp
 systemDictionary.cpp                    javaCalls.hpp
 systemDictionary.cpp                    javaClasses.hpp
-// #ifdef JVMPI_SUPPORT
-// systemDictionary.cpp                    jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 systemDictionary.cpp                    jvmtiEnvBase.hpp
 systemDictionary.cpp                    klass.inline.hpp
 systemDictionary.cpp                    loaderConstraints.hpp
@@ -4707,10 +4374,6 @@
 tenuredGeneration.cpp                   collectorCounters.hpp
 tenuredGeneration.cpp                   generation.inline.hpp
 tenuredGeneration.cpp                   generationSpec.hpp
-// #ifdef JVMPI_SUPPORT
-// tenuredGeneration.cpp                   jvmpi.hpp
-// tenuredGeneration.cpp                   jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 tenuredGeneration.cpp                   java.hpp
 tenuredGeneration.cpp                   oop.inline.hpp
 tenuredGeneration.cpp                   parGCAllocBuffer.hpp
@@ -4747,10 +4410,6 @@
 thread.cpp                              javaClasses.hpp
 thread.cpp                              jniPeriodicChecker.hpp
 thread.cpp                              jvm_misc.hpp
-// #ifdef JVMPI_SUPPORT
-// thread.cpp				jvmpi.hpp
-// thread.cpp                              jvmpi.inline.hpp
-// #endif // JVMPI_SUPPORT
 thread.cpp                              jvmtiExport.hpp
 thread.cpp                              jvmtiThreadState.hpp
 thread.cpp                              linkResolver.hpp
@@ -4966,9 +4625,6 @@
 universe.cpp                            java.hpp
 universe.cpp                            javaCalls.hpp
 universe.cpp                            javaClasses.hpp
-// #ifdef JVMPI_SUPPORT
-// universe.cpp                            jvmpi.hpp
-// #endif // JVMPI_SUPPORT
 universe.cpp                            jvmtiRedefineClasses.hpp
 universe.cpp                            klassKlass.hpp
 universe.cpp                            klassOop.hpp
@@ -5260,6 +4916,7 @@
 vmSymbols.cpp                           oop.inline.hpp
 vmSymbols.cpp                           oopFactory.hpp
 vmSymbols.cpp                           vmSymbols.hpp
+vmSymbols.cpp                           xmlstream.hpp
 
 vmSymbols.hpp                           symbolOop.hpp
 
@@ -5385,7 +5042,3 @@
 
 xmlstream.hpp                           handles.hpp
 xmlstream.hpp                           ostream.hpp
-
-yieldingWorkgroup.cpp                   yieldingWorkgroup.hpp
-
-yieldingWorkgroup.hpp                   workgroup.hpp
--- a/hotspot/src/share/vm/interpreter/cInterpretMethod.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/interpreter/cInterpretMethod.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)cInterpretMethod.hpp	1.67 07/05/05 17:05:37 JVM"
+#pragma ident "@(#)cInterpretMethod.hpp	1.68 07/05/17 15:54:02 JVM"
 #endif
 /*
  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -458,14 +458,10 @@
  * Basically it's a big while loop that iterates until we return from
  * the method passed in.
  *
-#ifdef JVMPI_SUPPORT
- * The InterpretMethodWithChecks is used if JVMTI or JVMPI are enabled.
-#else // !JVMPI_SUPPORT
  * The InterpretMethodWithChecks is used if JVMTI is enabled.
-#endif // JVMPI_SUPPORT
  *
  */
-#if defined(VM_JVMTI) || (defined(JVMPI_SUPPORT) && defined(VM_JVMPI))
+#if defined(VM_JVMTI)
 void
 cInterpreter::InterpretMethodWithChecks(interpreterState istate) {
 #else
@@ -682,11 +678,7 @@
       }
       THREAD->clr_do_not_unlock();
 
-#ifdef JVMPI_SUPPORT
-      // Notify jvmti/jvmpi
-#else // !JVMPI_SUPPORT
       // Notify jvmti
-#endif // JVMPI_SUPPORT
 #ifdef VM_JVMTI
       if (_jvmti_interp_events) {
         // Whenever JVMTI puts a thread in interp_only_mode, method
@@ -697,19 +689,6 @@
         }
       }
 #endif /* VM_JVMTI */
-#ifdef JVMPI_SUPPORT
-      if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) == JVMPI_EVENT_ENABLED ||
-          *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2) == JVMPI_EVENT_ENABLED) {
-        oop rcvr;
-        if (istate->method()->is_static()) {
-          rcvr = NULL;
-        } else {
-          rcvr = LOCALS_OBJECT(0);
-        }
-        CALL_VM(SharedRuntime::jvmpi_method_entry(THREAD, istate->method(), 
-                rcvr), handle_exception);
-      }
-#endif // JVMPI_SUPPORT
 
       goto run;
     }
@@ -2545,11 +2524,7 @@
     }
 
     //
-#ifdef JVMPI_SUPPORT
-    // Notify jvmti/jvmpi
-#else // !JVMPI_SUPPORT
     // Notify jvmti
-#endif // JVMPI_SUPPORT
     //
     // NOTE: we do not notify a method_exit if we have a pending exception,
     // including an exception we generate for unlocking checks.  In the former
@@ -2558,19 +2533,10 @@
     // If we notify it again JVMDI will be all confused about how many frames
     // are still on the stack (4340444).
     //
-#ifdef JVMPI_SUPPORT
-    // Further note that jvmpi does not suppress method_exit notifications
-    // in the case of exceptions (which makes more sense to me). See bug
-    // 4933156
-#endif // JVMPI_SUPPORT
     //
     // NOTE Further! It turns out the the JVMTI spec in fact expects to see
     // method_exit events whenever we leave an activation unless it was done
-#ifdef JVMPI_SUPPORT
-    // for popframe. This is just like jvmpi and nothing like jvmdi. However
-#else // !JVMPI_SUPPORT
     // for popframe. This is nothing like jvmdi. However
-#endif // JVMPI_SUPPORT
     // we are passing the tests at the moment (apparently becuase they are
     // jvmdi based) so rather than change this code and possibly fail tests
     // we will leave it alone (with this note) in anticipation of changing
@@ -2596,17 +2562,6 @@
       }
 #endif /* VM_JVMTI */
 
-#ifdef JVMPI_SUPPORT
-    /* Only suppress method_exit events for jvmpi if we are doing a popFrame */
-    if ( istate->msg() != popping_frame && *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT) == JVMPI_EVENT_ENABLED) {
-      {
-        // Prevent any HandleMarkCleaner from freeing our live handles
-        HandleMark __hm(THREAD); 
-        CALL_VM_NOCHECK(SharedRuntime::jvmpi_method_exit(THREAD, istate->method()))
-      }
-    }
-#endif // JVMPI_SUPPORT
-
     //
     // See if we are returning any exception
     // A pending exception that was pending prior to a possible popping frame
--- a/hotspot/src/share/vm/interpreter/cInterpreter.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/interpreter/cInterpreter.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)cInterpreter.cpp	1.29 07/05/05 17:05:36 JVM"
+#pragma ident "@(#)cInterpreter.cpp	1.30 07/05/17 15:54:05 JVM"
 #endif
 /*
  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -27,33 +27,18 @@
 
 /*
  * Note:
-#ifdef JVMPI_SUPPORT
- * In order to eliminate the overhead of testing JVMPI and JVMTI flags
-#else // !JVMPI_SUPPORT
  * In order to eliminate the overhead of testing JVMTI flags
-#endif // JVMPI_SUPPORT
  * during non debuging execution, we generate two version of the Interpreter.
  * The first one is generated via the dependency in the includeDB mechanism 
  * and is read in as part of the _cInterpreter.cpp.incl line below.
  *
-#ifdef JVMPI_SUPPORT
- * The second and JVMTI/JVMPI enabled interpreter is brought in below after
-#else // !JVMPI_SUPPORT
  * The second and JVMTI enabled interpreter is brought in below after
-#endif // JVMPI_SUPPORT
  * the line defining VM_JVMTI to 1.
  * 
  * On startup, the assembly generated to enter the Interpreter will be
  * pointed at either InterpretMethod or InterpretMethodWithChecks depending
-#ifdef JVMPI_SUPPORT
- * on the state of the JVMTI or JVMPI flags..
-#else // !JVMPI_SUPPORT
  * on the state of the JVMTI flags..
-#endif // JVMPI_SUPPORT
  */
-#ifdef JVMPI_SUPPORT
-#undef VM_JVMPI
-#endif // JVMPI_SUPPORT
 #undef VM_JVMTI
 
 #include "incls/_precompiled.incl"
@@ -62,16 +47,9 @@
 #ifdef CC_INTERP
 
 
-#ifdef JVMPI_SUPPORT
-#define VM_JVMPI 1
-#endif // JVMPI_SUPPORT
 #define VM_JVMTI 1
 
-#ifdef JVMPI_SUPPORT
-// Build the Interpreter that is used if JVMTI or JVMPI are enabled
-#else // !JVMPI_SUPPORT
 // Build the Interpreter that is used if JVMTI is enabled
-#endif // JVMPI_SUPPORT
 #include "cInterpretMethod.hpp"
 
 // This constructor should only be used to contruct the object to signal
--- a/hotspot/src/share/vm/interpreter/cInterpreter.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/interpreter/cInterpreter.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)cInterpreter.hpp	1.22 07/05/05 17:05:38 JVM"
+#pragma ident "@(#)cInterpreter.hpp	1.23 07/05/17 15:54:24 JVM"
 #endif
 /*
  * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -493,11 +493,7 @@
 
 // The Interpreter used when 
 static void InterpretMethod(interpreterState istate);
-#ifdef JVMPI_SUPPORT
-// The interpreter used if JVMPI is enabled or JVMTI needs interpreter events
-#else // !JVMPI_SUPPORT
 // The interpreter used if JVMTI needs interpreter events
-#endif // JVMPI_SUPPORT
 static void InterpretMethodWithChecks(interpreterState istate);
 static void End_Of_Interpreter(void);
 
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)interpreter.cpp	1.244 07/05/05 17:05:39 JVM"
+#pragma ident "@(#)interpreter.cpp	1.245 07/05/17 15:54:28 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -103,11 +103,7 @@
   {
     // Allow c++ interpreter to do one initialization now that switches are set, etc.
     cInterpreter start_msg(cInterpreter::initialize);
-#ifdef JVMPI_SUPPORT
-    if (JvmtiExport::can_post_interpreter_events() || jvmpi::enabled() )
-#else // !JVMPI_SUPPORT
     if (JvmtiExport::can_post_interpreter_events())
-#endif // JVMPI_SUPPORT
       cInterpreter::InterpretMethodWithChecks(&start_msg);
     else
       cInterpreter::InterpretMethod(&start_msg);
@@ -293,12 +289,9 @@
 address    AbstractInterpreter::_throw_StackOverflowError_entry             = NULL;
 address    AbstractInterpreter::_throw_exception_entry                      = NULL;
 
-#ifdef JVMPI_SUPPORT
-// when JVM/PI is retired this variable can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+#ifndef PRODUCT
 EntryPoint AbstractInterpreter::_trace_code;
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+#endif // !PRODUCT
 EntryPoint AbstractInterpreter::_return_entry[AbstractInterpreter::number_of_return_entries];
 EntryPoint AbstractInterpreter::_earlyret_entry;
 EntryPoint AbstractInterpreter::_deopt_entry [AbstractInterpreter::number_of_deopt_entries ];
@@ -401,15 +394,8 @@
     _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
   }
 
-#ifdef JVMPI_SUPPORT
-  // when JVM/PI is retired this block can be made '#ifndef PRODUCT'
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
-#ifdef JVMPI_SUPPORT
-  if (TraceBytecodes || EnableJVMPIInstructionStartEvent) {
-#else // !JVMPI_SUPPORT
+#ifndef PRODUCT
   if (TraceBytecodes) {
-#endif // JVMPI_SUPPORT
     CodeletMark cm(_masm, "bytecode tracing support");
     Interpreter::_trace_code =
       EntryPoint(
@@ -424,7 +410,7 @@
         generate_trace_code(vtos)
       );
   }
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+#endif // !PRODUCT
 
   { CodeletMark cm(_masm, "return entry points");
     for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
@@ -747,22 +733,10 @@
   // debugging code
   if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
   if (PrintBytecodePairHistogram)                                histogram_bytecode_pair(t);
-#endif // PRODUCT
-#ifdef JVMPI_SUPPORT
-  // When JVM/PI is retired, this call to trace_bytecode() can be
-  // made '#ifndef PRODUCT'.
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
-#ifdef JVMPI_SUPPORT
-  if (TraceBytecodes || EnableJVMPIInstructionStartEvent)        trace_bytecode(t);
-#else // !JVMPI_SUPPORT
   if (TraceBytecodes)                                            trace_bytecode(t);
-#endif // JVMPI_SUPPORT
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
-#ifndef PRODUCT
   if (StopInterpreterAt > 0)                                     stop_interpreter_at();
   __ verify_FPU(1, t->tos_in());
-#endif // PRODUCT
+#endif // !PRODUCT
   int step;
   if (!t->does_dispatch()) { 
     step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
--- a/hotspot/src/share/vm/interpreter/interpreter.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/interpreter/interpreter.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)interpreter.hpp	1.152 07/05/05 17:05:39 JVM"
+#pragma ident "@(#)interpreter.hpp	1.153 07/05/17 15:54:31 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -170,14 +170,9 @@
 
   static address    _remove_activation_entry;                   // continuation address if an exception is not handled by current frame
 
-#ifdef JVMPI_SUPPORT
-  // when JVM/PI is retired this variable can be made '#ifndef PRODUCT'
-  static EntryPoint _trace_code;
-#else // !JVMPI_SUPPORT
 #ifndef PRODUCT
   static EntryPoint _trace_code;
 #endif // !PRODUCT
-#endif // JVMPI_SUPPORT
   static EntryPoint _return_entry[number_of_return_entries];    // entry points to return to from a call
   static EntryPoint _earlyret_entry;                            // entry point to return early from a call
   static EntryPoint _deopt_entry[number_of_deopt_entries];      // entry points to return to from a deoptimization
@@ -258,14 +253,9 @@
   static address    throw_StackOverflowError_entry()            { return _throw_StackOverflowError_entry; }
 
   // Code generation
-#ifdef JVMPI_SUPPORT
-  // when JVM/PI is retired this definition can be made '#ifndef PRODUCT'
-  static address    trace_code    (TosState state)              { return _trace_code.entry(state); }
-#else // !JVMPI_SUPPORT
 #ifndef PRODUCT
   static address    trace_code    (TosState state)              { return _trace_code.entry(state); }
 #endif // !PRODUCT
-#endif // JVMPI_SUPPORT
   static address    continuation  (TosState state)              { return _continuation_entry.entry(state); }
   static address*   dispatch_table(TosState state)              { return _active_table.table_for(state); }
   static address*   dispatch_table()                            { return _active_table.table_for(); }
@@ -455,21 +445,11 @@
   void set_safepoints_for_all_bytes();
 
   // Helpers for generate_and_dispatch
-#ifdef JVMPI_SUPPORT
-  // when JVM/PI is retired, this declaration can be made 'PRODUCT_RETURN0'
-  address generate_trace_code(TosState state);
-#else // !JVMPI_SUPPORT
   address generate_trace_code(TosState state)   PRODUCT_RETURN0;
-#endif // JVMPI_SUPPORT
   void count_bytecode()                         PRODUCT_RETURN;  
   void histogram_bytecode(Template* t)          PRODUCT_RETURN;
   void histogram_bytecode_pair(Template* t)     PRODUCT_RETURN;
-#ifdef JVMPI_SUPPORT
-  // when JVM/PI is retired, this declaration can be made 'PRODUCT_RETURN'
-  void trace_bytecode(Template* t);
-#else // !JVMPI_SUPPORT
   void trace_bytecode(Template* t)              PRODUCT_RETURN;
-#endif // JVMPI_SUPPORT
   void stop_interpreter_at()                    PRODUCT_RETURN;
 #endif // CC_INTERP
 
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1213 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)binaryTreeDictionary.cpp	1.37 07/05/05 17:05:43 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_binaryTreeDictionary.cpp.incl"
-
-////////////////////////////////////////////////////////////////////////////////
-// A binary tree based search structure for free blocks.
-// This is currently used in the Concurrent Mark&Sweep implementation.
-////////////////////////////////////////////////////////////////////////////////
-
-TreeChunk* TreeChunk::as_TreeChunk(FreeChunk* fc) {
-  // Do some assertion checking here.
-  return (TreeChunk*) fc;
-}
-
-void TreeChunk::verifyTreeChunkList() const {
-  TreeChunk* nextTC = (TreeChunk*)next();
-  if (prev() != NULL) { // interior list node shouldn'r have tree fields
-    guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
-              embedded_list()->right()  == NULL, "should be clear");
-  }
-  if (nextTC != NULL) {
-    guarantee(as_TreeChunk(nextTC->prev()) == this, "broken chain");
-    guarantee(nextTC->size() == size(), "wrong size");
-    nextTC->verifyTreeChunkList();
-  }
-}
-
-
-TreeList* TreeList::as_TreeList(TreeChunk* tc) {
-  // This first free chunk in the list will be the tree list.
-  assert(tc->size() >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
-  TreeList* tl = tc->embedded_list();
-  tc->set_list(tl);
-#ifdef ASSERT
-  tl->set_protecting_lock(NULL);
-#endif
-  tl->set_hint(0);
-  tl->set_size(tc->size());
-  tl->link_head(tc);
-  tl->link_tail(tc);
-  tl->set_count(1);
-  tl->init_statistics();
-  tl->setParent(NULL);
-  tl->setLeft(NULL);
-  tl->setRight(NULL);
-  return tl;
-}
-TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
-  TreeChunk* tc = (TreeChunk*) addr;
-  assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
-  assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL, 
-    "Space should be clear");
-  tc->setSize(size);
-  tc->linkPrev(NULL);
-  tc->linkNext(NULL);
-  TreeList* tl = TreeList::as_TreeList(tc);
-  return tl;
-}
-
-TreeList* TreeList::removeChunkReplaceIfNeeded(TreeChunk* tc) {
-
-  TreeList* retTL = this;
-  FreeChunk* list = head();
-  assert(!list || list != list->next(), "Chunk on list twice");
-  assert(tc != NULL, "Chunk being removed is NULL");
-  assert(parent() == NULL || this == parent()->left() || 
-    this == parent()->right(), "list is inconsistent");
-  assert(tc->isFree(), "Header is not marked correctly");
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-
-  FreeChunk* prevFC = tc->prev();
-  TreeChunk* nextTC = TreeChunk::as_TreeChunk(tc->next());
-  assert(list != NULL, "should have at least the target chunk");
-
-  // Is this the first item on the list?
-  if (tc == list) {
-    // The "getChunk..." functions for a TreeList will not return the
-    // first chunk in the list unless it is the last chunk in the list
-    // because the first chunk is also acting as the tree node.
-    // When coalescing happens, however, the first chunk in the a tree
-    // list can be the start of a free range.  Free ranges are removed
-    // from the free lists so that they are not available to be 
-    // allocated when the sweeper yields (giving up the free list lock)
-    // to allow mutator activity.  If this chunk is the first in the
-    // list and is not the last in the list, do the work to copy the
-    // TreeList from the first chunk to the next chunk and update all
-    // the TreeList pointers in the chunks in the list.
-    if (nextTC == NULL) {
-      assert(prevFC == NULL, "Not last chunk in the list")
-      set_tail(NULL);
-      set_head(NULL);
-    } else {
-      // copy embedded list.
-      nextTC->set_embedded_list(tc->embedded_list());
-      retTL = nextTC->embedded_list();
-      // Fix the pointer to the list in each chunk in the list.
-      // This can be slow for a long list.  Consider having
-      // an option that does not allow the first chunk on the
-      // list to be coalesced.
-      for (TreeChunk* curTC = nextTC; curTC != NULL; 
-	  curTC = TreeChunk::as_TreeChunk(curTC->next())) {
-        curTC->set_list(retTL);
-      }
-      // Fix the parent to point to the new TreeList.
-      if (retTL->parent() != NULL) {
-	if (this == retTL->parent()->left()) {
-	  retTL->parent()->setLeft(retTL);
-	} else {
-	  assert(this == retTL->parent()->right(), "Parent is incorrect");
-	  retTL->parent()->setRight(retTL);
-	}
-      }
-      // Fix the children's parent pointers to point to the
-      // new list.
-      assert(right() == retTL->right(), "Should have been copied");
-      if (retTL->right() != NULL) {
-	retTL->right()->setParent(retTL);
-      }
-      assert(left() == retTL->left(), "Should have been copied");
-      if (retTL->left() != NULL) {
-	retTL->left()->setParent(retTL);
-      }
-      retTL->link_head(nextTC);
-      assert(nextTC->isFree(), "Should be a free chunk");
-    }
-  } else {
-    if (nextTC == NULL) {
-      // Removing chunk at tail of list
-      link_tail(prevFC);
-    }
-    // Chunk is interior to the list
-    prevFC->linkAfter(nextTC);
-  }
-
-  // Below this point the embeded TreeList being used for the
-  // tree node may have changed. Don't use "this" 
-  // TreeList*.
-  // chunk should still be a free chunk (bit set in _prev)
-  assert(!retTL->head() || retTL->size() == retTL->head()->size(), 
-    "Wrong sized chunk in list");
-  debug_only(
-    tc->linkPrev(NULL);  
-    tc->linkNext(NULL);
-    tc->set_list(NULL);
-    bool prev_found = false;
-    bool next_found = false;
-    for (FreeChunk* curFC = retTL->head(); 
-	 curFC != NULL; curFC = curFC->next()) {
-      assert(curFC != tc, "Chunk is still in list");
-      if (curFC == prevFC) {
-	prev_found = true;
-      }
-      if (curFC == nextTC) {
-	next_found = true;
-      }
-    }
-    assert(prevFC == NULL || prev_found, "Chunk was lost from list");
-    assert(nextTC == NULL || next_found, "Chunk was lost from list");
-    assert(retTL->parent() == NULL ||
-	   retTL == retTL->parent()->left() || 
-	   retTL == retTL->parent()->right(),
-           "list is inconsistent");
-  )
-  retTL->decrement_count();
-
-  assert(tc->isFree(), "Should still be a free chunk");
-  assert(retTL->head() == NULL || retTL->head()->prev() == NULL, 
-    "list invariant");
-  assert(retTL->tail() == NULL || retTL->tail()->next() == NULL, 
-    "list invariant");
-  return retTL;
-}
-void TreeList::returnChunkAtTail(TreeChunk* chunk) {
-  assert(chunk != NULL, "returning NULL chunk");
-  assert(chunk->list() == this, "list should be set for chunk");
-  assert(tail() != NULL, "The tree list is embedded in the first chunk");
-  // which means that the list can never be empty.
-  assert(!verifyChunkInFreeLists(chunk), "Double entry");
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-  
-  FreeChunk* fc = tail();
-  fc->linkAfter(chunk);
-  link_tail(chunk);
-
-  assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
-  increment_count();
-  debug_only(increment_returnedBytes_by(chunk->size()*sizeof(HeapWord));)
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-}
-
-// Add this chunk at the head of the list.  "At the head of the list"
-// is defined to be after the chunk pointer to by head().  This is 
-// because the TreeList is embedded in the first TreeChunk in the
-// list.  See the definition of TreeChunk.
-void TreeList::returnChunkAtHead(TreeChunk* chunk) {
-  assert(chunk->list() == this, "list should be set for chunk");
-  assert(head() != NULL, "The tree list is embedded in the first chunk");
-  assert(chunk != NULL, "returning NULL chunk");
-  assert(!verifyChunkInFreeLists(chunk), "Double entry");
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-
-  FreeChunk* fc = head()->next();
-  if (fc != NULL) {
-    chunk->linkAfter(fc);
-  } else {
-    assert(tail() == NULL, "List is inconsistent");
-    link_tail(chunk);
-  }
-  head()->linkAfter(chunk);
-  assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
-  increment_count();
-  debug_only(increment_returnedBytes_by(chunk->size()*sizeof(HeapWord));)
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-}
-
-TreeChunk* TreeList::head_as_TreeChunk() {
-  assert(head() == NULL || TreeChunk::as_TreeChunk(head())->list() == this,
-    "Wrong type of chunk?");
-  return TreeChunk::as_TreeChunk(head());
-}
-
-TreeChunk* TreeList::first_available() {
-  guarantee(head() != NULL, "The head of the list cannot be NULL");
-  FreeChunk* fc = head()->next();
-  TreeChunk* retTC;
-  if (fc == NULL) {
-    retTC = head_as_TreeChunk();
-  } else {
-    retTC = TreeChunk::as_TreeChunk(fc);
-  }
-  assert(retTC->list() == this, "Wrong type of chunk.");
-  return retTC;
-}
-
-BinaryTreeDictionary::BinaryTreeDictionary(MemRegion mr, bool splay):
-  _splay(splay)
-{
-  assert(mr.byte_size() > MIN_TREE_CHUNK_SIZE, "minimum chunk size");
-
-  reset(mr);
-  assert(root()->left() == NULL, "reset check failed");
-  assert(root()->right() == NULL, "reset check failed");
-  assert(root()->head()->next() == NULL, "reset check failed");
-  assert(root()->head()->prev() == NULL, "reset check failed");
-  assert(totalSize() == root()->size(), "reset check failed");
-  assert(totalFreeBlocks() == 1, "reset check failed");
-}
-
-void BinaryTreeDictionary::inc_totalSize(size_t inc) {
-  _totalSize = _totalSize + inc;
-}
-
-void BinaryTreeDictionary::dec_totalSize(size_t dec) {
-  _totalSize = _totalSize - dec;
-}
-
-void BinaryTreeDictionary::reset(MemRegion mr) {
-  assert(mr.byte_size() > MIN_TREE_CHUNK_SIZE, "minimum chunk size");
-  set_root(TreeList::as_TreeList(mr.start(), mr.word_size()));
-  set_totalSize(mr.word_size());
-  set_totalFreeBlocks(1);
-}
-
-void BinaryTreeDictionary::reset(HeapWord* addr, size_t byte_size) {
-  MemRegion mr(addr, heap_word_size(byte_size));
-  reset(mr);
-}
-
-void BinaryTreeDictionary::reset() {
-  set_root(NULL);
-  set_totalSize(0);
-  set_totalFreeBlocks(0);
-}
-
-// Get a free block of size at least size from tree, or NULL.
-// If a splay step is requested, the removal algorithm (only) incorporates
-// a splay step as follows:
-// . the search proceeds down the tree looking for a possible
-//   match. At the (closest) matching location, an appropriate splay step is applied
-//   (zig, zig-zig or zig-zag). A chunk of the appropriate size is then returned
-//   if available, and if it's the last chunk, the node is deleted. A deteleted
-//   node is replaced in place by its tree successor.
-TreeChunk*
-BinaryTreeDictionary::getChunkFromTree(size_t size, Dither dither, bool splay)
-{
-  TreeList *curTL, *prevTL;
-  TreeChunk* retTC = NULL;
-  assert(size >= MIN_TREE_CHUNK_SIZE, "minimum chunk size");
-  if (FLSVerifyDictionary) {
-    verifyTree();
-  }
-  // starting at the root, work downwards trying to find match.
-  // Remember the last node of size too great or too small.
-  for (prevTL = curTL = root(); curTL != NULL;) {
-    if (curTL->size() == size) {        // exact match
-      break;
-    } 
-    prevTL = curTL;
-    if (curTL->size() < size) {        // proceed to right sub-tree
-      curTL = curTL->right();
-    } else {                           // proceed to left sub-tree
-      assert(curTL->size() > size, "size inconsistency");
-      curTL = curTL->left();
-    }
-  }
-  if (curTL == NULL) { // couldn't find exact match
-    // try and find the next larger size by walking back up the search path
-    for (curTL = prevTL; curTL != NULL;) {
-      if (curTL->size() >= size) break;
-      else curTL = curTL->parent();
-    }
-    assert(curTL == NULL || curTL->count() > 0,
-      "An empty list should not be in the tree");
-  }
-  if (curTL != NULL) {
-    assert(curTL->size() >= size, "size inconsistency");
-    if (UseCMSAdaptiveFreeLists) {
-  
-      // A candidate chunk has been found.  If it is already under
-      // populated, get a chunk associated with the hint for this
-      // chunk.
-      if (curTL->surplus() <= 0) {
-        /* Use the hint to find a size with a surplus, and reset the hint. */
-        TreeList* hintTL = curTL;
-        while (hintTL->hint() != 0) {
-  	  assert(hintTL->hint() == 0 || hintTL->hint() > hintTL->size(),
-	    "hint points in the wrong direction");
-          hintTL = findList(hintTL->hint());
-  	  assert(curTL != hintTL, "Infinite loop");
-          if (hintTL == NULL || 
-	      hintTL == curTL /* Should not happen but protect against it */ ) {
-  	    // No useful hint.  Set the hint to NULL and go on.
-            curTL->set_hint(0);
-            break;
-          }
-          assert(hintTL->size() > size, "hint is inconsistent");
-          if (hintTL->surplus() > 0) {
-  	    // The hint led to a list that has a surplus.  Use it.
-  	    // Set the hint for the candidate to an overpopulated
-  	    // size.  
-            curTL->set_hint(hintTL->size());
-            // Change the candidate.
-            curTL = hintTL;
-            break;
-          }
-  	  // The evm code reset the hint of the candidate as
-  	  // at an interrim point.  Why?  Seems like this leaves
-  	  // the hint pointing to a list that didn't work.
-          // curTL->set_hint(hintTL->size());
-        }
-      }
-    }
-    // don't waste time splaying if chunk's singleton
-    if (splay && curTL->head()->next() != NULL) {
-      semiSplayStep(curTL);
-    }
-    retTC = curTL->first_available();
-    assert((retTC != NULL) && (curTL->count() > 0),
-      "A list in the binary tree should not be NULL");
-    assert(retTC->size() >= size, 
-      "A chunk of the wrong size was found");
-    removeChunkFromTree(retTC);
-    assert(retTC->isFree(), "Header is not marked correctly");
-  }
-
-  if (FLSVerifyDictionary) {
-    verify();
-  }
-  return retTC;
-}
-
-TreeList* BinaryTreeDictionary::findList(size_t size) const {
-  TreeList* curTL;
-  for (curTL = root(); curTL != NULL;) {
-    if (curTL->size() == size) {        // exact match
-      break;
-    } 
-    
-    if (curTL->size() < size) {        // proceed to right sub-tree
-      curTL = curTL->right();
-    } else {                           // proceed to left sub-tree
-      assert(curTL->size() > size, "size inconsistency");
-      curTL = curTL->left();
-    }
-  }
-  return curTL;
-}
-
-
-bool BinaryTreeDictionary::verifyChunkInFreeLists(FreeChunk* tc) const {
-  size_t size = tc->size();
-  TreeList* tl = findList(size);
-  if (tl == NULL) {
-    return false;
-  } else {
-    return tl->verifyChunkInFreeLists(tc);
-  }
-}
-
-FreeChunk* BinaryTreeDictionary::findLargestDict() const {
-  TreeList *curTL = root();
-  if (curTL != NULL) {
-    while(curTL->right() != NULL) curTL = curTL->right();
-    return curTL->first_available();
-  } else {
-    return NULL;
-  }
-}
-
-// Remove the current chunk from the tree.  If it is not the last 
-// chunk in a list on a tree node, just unlink it.
-// If it is the last chunk in the list (the next link is NULL),
-// remove the node and repair the tree.
-TreeChunk*
-BinaryTreeDictionary::removeChunkFromTree(TreeChunk* tc) {
-  assert(tc != NULL, "Should not call with a NULL chunk");
-  assert(tc->isFree(), "Header is not marked correctly");
-
-  TreeList *newTL, *parentTL;
-  TreeChunk* retTC;
-  TreeList* tl = tc->list();
-  debug_only(
-    bool removing_only_chunk = false;
-    if (tl == _root) {
-      if ((_root->left() == NULL) && (_root->right() == NULL)) {
-        if (_root->count() == 1) {
-	  assert(_root->head() == tc, "Should only be this one chunk");
-	  removing_only_chunk = true;
-        }
-      }
-    }
-  )
-  assert(tl != NULL, "List should be set");
-  assert(tl->parent() == NULL || tl == tl->parent()->left() || 
-	 tl == tl->parent()->right(), "list is inconsistent");
-
-  bool complicatedSplice = false;
-
-  retTC = tc;
-  // Removing this chunk can have the side effect of changing the node
-  // (TreeList*) in the tree.  If the node is the root, update it.
-  TreeList* replacementTL = tl->removeChunkReplaceIfNeeded(tc);
-  assert(tc->isFree(), "Chunk should still be free");
-  assert(replacementTL->parent() == NULL ||
-	 replacementTL == replacementTL->parent()->left() || 
-	 replacementTL == replacementTL->parent()->right(),
-         "list is inconsistent");
-  if (tl == root()) {
-    assert(replacementTL->parent() == NULL, "Incorrectly replacing root");
-    set_root(replacementTL);
-  }
-  debug_only(
-    if (tl != replacementTL) {
-      assert(replacementTL->head() != NULL, 
-        "If the tree list was replaced, it should not be a NULL list");
-      TreeList* rhl = replacementTL->head_as_TreeChunk()->list();
-      TreeList* rtl = TreeChunk::as_TreeChunk(replacementTL->tail())->list();
-      assert(rhl == replacementTL, "Broken head");
-      assert(rtl == replacementTL, "Broken tail");
-      assert(replacementTL->size() == tc->size(),  "Broken size");
-    }
-  )
-
-  // Does the tree need to be repaired?
-  if (replacementTL->count() == 0) {
-    assert(replacementTL->head() == NULL && 
-	   replacementTL->tail() == NULL, "list count is incorrect");
-    // Find the replacement node for the (soon to be empty) node being removed.
-    // if we have a single (or no) child, splice child in our stead
-    if (replacementTL->left() == NULL) {
-      // left is NULL so pick right.  right may also be NULL.
-      newTL = replacementTL->right();
-      debug_only(replacementTL->clearRight();)
-    } else if (replacementTL->right() == NULL) {
-      // right is NULL
-      newTL = replacementTL->left();
-      debug_only(replacementTL->clearLeft();)
-    } else {  // we have both children, so, by patriarchal convention,
-              // my replacement is least node in right sub-tree
-      complicatedSplice = true;
-      newTL = removeTreeMinimum(replacementTL->right());
-      assert(newTL != NULL && newTL->left() == NULL &&
-             newTL->right() == NULL, "sub-tree minimum exists");
-    }
-    // newTL is the replacement for the (soon to be empty) node.
-    // newTL may be NULL.
-    // should verify; we just cleanly excised our replacement
-    if (FLSVerifyDictionary) {
-      verifyTree();
-    }
-    // first make newTL my parent's child
-    if ((parentTL = replacementTL->parent()) == NULL) {  
-      // newTL should be root
-      assert(tl == root(), "Incorrectly replacing root");
-      set_root(newTL);
-      if (newTL != NULL) {
-        newTL->clearParent();
-      }
-    } else if (parentTL->right() == replacementTL) {   
-      // replacementTL is a right child
-      parentTL->setRight(newTL);
-    } else {                                // replacementTL is a left child
-      assert(parentTL->left() == replacementTL, "should be left child");
-      parentTL->setLeft(newTL);
-    }
-    debug_only(replacementTL->clearParent();)
-    if (complicatedSplice) {  // we need newTL to get replacementTL's 
-			      // two children
-      assert(newTL != NULL &&
-             newTL->left() == NULL && newTL->right() == NULL,
-            "newTL should not have encumbrances from the past");
-      // we'd like to assert as below:
-      // assert(replacementTL->left() != NULL && replacementTL->right() != NULL,
-      //       "else !complicatedSplice");
-      // ... however, the above assertion is too strong because we aren't
-      // guaranteed that replacementTL->right() is still NULL. 
-      // Recall that we removed
-      // the right sub-tree minimum from replacementTL. 
-      // That may well have been its right
-      // child! So we'll just assert half of the above:
-      assert(replacementTL->left() != NULL, "else !complicatedSplice");
-      newTL->setLeft(replacementTL->left());
-      newTL->setRight(replacementTL->right());
-      debug_only(
-        replacementTL->clearRight();
-        replacementTL->clearLeft();
-      )
-    }
-    assert(replacementTL->right() == NULL && 
-	   replacementTL->left() == NULL && 
-	   replacementTL->parent() == NULL,
-        "delete without encumbrances");
-  }
-
-  assert(totalSize() >= retTC->size(), "Incorrect total size");
-  dec_totalSize(retTC->size());     // size book-keeping
-  assert(totalFreeBlocks() > 0, "Incorrect total count");
-  set_totalFreeBlocks(totalFreeBlocks() - 1);
-
-  assert(retTC != NULL, "null chunk?");
-  assert(retTC->prev() == NULL && retTC->next() == NULL,
-         "should return without encumbrances");
-  if (FLSVerifyDictionary) {
-    verifyTree();
-  }
-  assert(!removing_only_chunk || _root == NULL, "root should be NULL");
-  return TreeChunk::as_TreeChunk(retTC);
-}
-
-// Remove the leftmost node (lm) in the tree and return it.
-// If lm has a right child, link it to the left node of
-// the parent of lm.
-TreeList* BinaryTreeDictionary::removeTreeMinimum(TreeList* tl) {
-  assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
-  // locate the subtree minimum by walking down left branches
-  TreeList* curTL = tl;
-  for (; curTL->left() != NULL; curTL = curTL->left());
-  // obviously curTL now has at most one child, a right child
-  if (curTL != root()) {  // Should this test just be removed?
-    TreeList* parentTL = curTL->parent();
-    if (parentTL->left() == curTL) { // curTL is a left child
-      parentTL->setLeft(curTL->right());
-    } else {
-      // If the list tl has no left child, then curTL may be
-      // the right child of parentTL.
-      assert(parentTL->right() == curTL, "should be a right child");
-      parentTL->setRight(curTL->right());
-    }
-  } else {
-    // The only use of this method would not pass the root of the
-    // tree (as indicated by the assertion above that the tree list
-    // has a parent) but the specification does not explicitly exclude the
-    // passing of the root so accomodate it.
-    set_root(NULL);
-  }
-  debug_only(
-    curTL->clearParent();  // Test if this needs to be cleared
-    curTL->clearRight();    // recall, above, left child is already null
-  )
-  // we just excised a (non-root) node, we should still verify all tree invariants
-  if (FLSVerifyDictionary) {
-    verifyTree();
-  }
-  return curTL;
-}
-
-// Based on a simplification of the algorithm by Sleator and Tarjan (JACM 1985).
-// The simplifications are the following:
-// . we splay only when we delete (not when we insert)
-// . we apply a single spay step per deletion/access
-// By doing such partial splaying, we reduce the amount of restructuring,
-// while getting a reasonably efficient search tree (we think).
-// [Measurements will be needed to (in)validate this expectation.]
-
-void BinaryTreeDictionary::semiSplayStep(TreeList* tc) {
-  // apply a semi-splay step at the given node:
-  // . if root, norting needs to be done
-  // . if child of root, splay once
-  // . else zig-zig or sig-zag depending on path from grandparent
-  if (root() == tc) return;
-  warning("*** Splaying not yet implemented; "
-          "tree operations may be inefficient ***");
-}
-
-void BinaryTreeDictionary::insertChunkInTree(FreeChunk* fc) {
-  TreeList *curTL, *prevTL;
-  size_t size = fc->size();
-
-  assert(size >= MIN_TREE_CHUNK_SIZE, "too small to be a TreeList");
-  if (FLSVerifyDictionary) {
-    verifyTree();
-  }
-  // XXX: do i need to clear the FreeChunk fields, let me do it just in case
-  // Revisit this later
-  
-  fc->clearNext();
-  fc->linkPrev(NULL);
-  
-  // work down from the _root, looking for insertion point
-  for (prevTL = curTL = root(); curTL != NULL;) {
-    if (curTL->size() == size)  // exact match
-      break;
-    prevTL = curTL;
-    if (curTL->size() > size) { // follow left branch
-      curTL = curTL->left();
-    } else {                    // follow right branch
-      assert(curTL->size() < size, "size inconsistency");
-      curTL = curTL->right();
-    }
-  }
-  TreeChunk* tc = TreeChunk::as_TreeChunk(fc);
-  // This chunk is being returned to the binary try.  It's embedded
-  // TreeList should be unused at this point.
-  tc->initialize();
-  if (curTL != NULL) {          // exact match
-    tc->set_list(curTL);
-    curTL->returnChunkAtTail(tc);
-  } else {                     // need a new node in tree
-    tc->clearNext();
-    tc->linkPrev(NULL);
-    TreeList* newTL = TreeList::as_TreeList(tc);
-    assert(((TreeChunk*)tc)->list() == newTL,
-      "List was not initialized correctly");
-    if (prevTL == NULL) {      // we are the only tree node
-      assert(root() == NULL, "control point invariant");
-      set_root(newTL);
-    } else {                   // insert under prevTL ...
-      if (prevTL->size() < size) {   // am right child
-        assert(prevTL->right() == NULL, "control point invariant");
-        prevTL->setRight(newTL);
-      } else {                       // am left child
-        assert(prevTL->size() > size && prevTL->left() == NULL, "cpt pt inv");
-        prevTL->setLeft(newTL);
-      }
-    }
-  }
-  assert(tc->list() != NULL, "Tree list should be set");
-
-  inc_totalSize(size);
-  // Method 'totalSizeInTree' walks through the every block in the
-  // tree, so it can cause significant performance loss if there are
-  // many blocks in the tree
-  assert(!FLSVerifyDictionary || totalSizeInTree(root()) == totalSize(), "_totalSize inconsistency");
-  set_totalFreeBlocks(totalFreeBlocks() + 1);
-  if (FLSVerifyDictionary) {
-    verifyTree();
-  }
-}
-
-size_t BinaryTreeDictionary::maxChunkSize() const {
-  verify_par_locked();
-  TreeList* tc = root();
-  if (tc == NULL) return 0;
-  for (; tc->right() != NULL; tc = tc->right());
-  return tc->size();
-}
-
-size_t BinaryTreeDictionary::totalListLength(TreeList* tl) const {
-  size_t res;
-  res = tl->count();
-#ifdef ASSERT
-  size_t cnt;
-  FreeChunk* tc = tl->head();
-  for (cnt = 0; tc != NULL; tc = tc->next(), cnt++);
-  assert(res == cnt, "The count is not being maintained correctly");
-#endif
-  return res;
-}
-
-size_t BinaryTreeDictionary::totalSizeInTree(TreeList* tl) const {
-  if (tl == NULL)
-    return 0;
-  return (tl->size() * totalListLength(tl)) +
-         totalSizeInTree(tl->left())    +
-         totalSizeInTree(tl->right());
-}
-
-double BinaryTreeDictionary::sum_of_squared_block_sizes(TreeList* const tl) const {
-  if (tl == NULL) {
-    return 0.0;
-  }
-  double size = (double)(tl->size());
-  double curr = size * size * totalListLength(tl);
-  curr += sum_of_squared_block_sizes(tl->left());
-  curr += sum_of_squared_block_sizes(tl->right());
-  return curr;
-}
-
-size_t BinaryTreeDictionary::totalFreeBlocksInTree(TreeList* tl) const {
-  if (tl == NULL)
-    return 0;
-  return totalListLength(tl) +
-         totalFreeBlocksInTree(tl->left()) +
-         totalFreeBlocksInTree(tl->right());
-}
-
-size_t BinaryTreeDictionary::numFreeBlocks() const {
-  assert(totalFreeBlocksInTree(root()) == totalFreeBlocks(), 
-         "_totalFreeBlocks inconsistency");
-  return totalFreeBlocks();
-}
-
-size_t BinaryTreeDictionary::treeHeightHelper(TreeList* tl) const {
-  if (tl == NULL)
-    return 0;
-  return 1 + MAX2(treeHeightHelper(tl->left()),
-                  treeHeightHelper(tl->right()));
-}
-
-size_t BinaryTreeDictionary::treeHeight() const {
-  return treeHeightHelper(root());
-}
-
-size_t BinaryTreeDictionary::totalNodesHelper(TreeList* tl) const {
-  if (tl == NULL) {
-    return 0;
-  }
-  return 1 + totalNodesHelper(tl->left()) +
-    totalNodesHelper(tl->right());
-}
-
-size_t BinaryTreeDictionary::totalNodesInTree(TreeList* tl) const {
-  return totalNodesHelper(root());
-}
-
-void BinaryTreeDictionary::dictCensusUpdate(size_t size, bool split, bool birth){
-  TreeList* nd = findList(size);
-  if (nd) {
-    if (split) {
-      if (birth) {
-        nd->increment_splitBirths();
-        nd->increment_surplus();
-      }  else {
-        nd->increment_splitDeaths();
-        nd->decrement_surplus();
-      }
-    } else {
-      if (birth) {
-        nd->increment_coalBirths();
-        nd->increment_surplus();
-      } else {
-        nd->increment_coalDeaths();
-        nd->decrement_surplus();
-      }
-    }
-  }
-  // A list for this size may not be found (nd == 0) if
-  //   This is a death where the appropriate list is now
-  //     empty and has been removed from the list.
-  //   This is a birth associated with a LinAB.  The chunk
-  //     for the LinAB is not in the dictionary.
-}
-
-bool BinaryTreeDictionary::coalDictOverPopulated(size_t size) {
-  TreeList* list_of_size = findList(size);
-  // None of requested size implies overpopulated.
-  return list_of_size == NULL || list_of_size->coalDesired() <= 0 ||
-         list_of_size->count() > list_of_size->coalDesired();
-}
-
-// Closures for walking the binary tree.
-//   do_list() walks the free list in a node applying the closure
-//     to each free chunk in the list
-//   do_tree() walks the nodes in the binary tree applying do_list()
-//     to each list at each node.
-
-class TreeCensusClosure : public StackObj {
- protected:
-  virtual void do_list(FreeList* fl) = 0;
- public:
-  virtual void do_tree(TreeList* tl) = 0;
-};
-
-class AscendTreeCensusClosure : public TreeCensusClosure {
- public:
-  void do_tree(TreeList* tl) {
-    if (tl != NULL) {
-      do_tree(tl->left());
-      do_list(tl);
-      do_tree(tl->right());
-    }
-  }
-};
-
-class DescendTreeCensusClosure : public TreeCensusClosure {
- public:
-  void do_tree(TreeList* tl) {
-    if (tl != NULL) {
-      do_tree(tl->right());
-      do_list(tl);
-      do_tree(tl->left());
-    }
-  }
-};
-       
-// For each list in the tree, calculate the desired, desired
-// coalesce, count before sweep, and surplus before sweep.
-class BeginSweepClosure : public AscendTreeCensusClosure {
-  double _percentage;
-  float _inter_sweep_current;
-  float _inter_sweep_estimate;
-  
- public:
-  BeginSweepClosure(double p, float inter_sweep_current,
-                              float inter_sweep_estimate) :
-   _percentage(p),
-   _inter_sweep_current(inter_sweep_current),
-   _inter_sweep_estimate(inter_sweep_estimate) { }
-
-  void do_list(FreeList* fl) {
-    double coalSurplusPercent = _percentage;
-    fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate);
-    fl->set_coalDesired((ssize_t)((double)fl->desired() * coalSurplusPercent));
-    fl->set_beforeSweep(fl->count());
-    fl->set_bfrSurp(fl->surplus());
-  }
-};
-
-// Used to search the tree until a condition is met.
-// Similar to TreeCensusClosure but searches the
-// tree and returns promptly when found.
-
-class TreeSearchClosure : public StackObj {
- protected:
-  virtual bool do_list(FreeList* fl) = 0;
- public:
-  virtual bool do_tree(TreeList* tl) = 0;
-};
-
-#if 0 //  Don't need this yet but here for symmetry.
-class AscendTreeSearchClosure : public TreeSearchClosure {
- public:
-  bool do_tree(TreeList* tl) {
-    if (tl != NULL) {
-      if (do_tree(tl->left())) return true;
-      if (do_list(tl)) return true;
-      if (do_tree(tl->right())) return true;
-    }
-    return false;
-  }
-};
-#endif
-
-class DescendTreeSearchClosure : public TreeSearchClosure {
- public:
-  bool do_tree(TreeList* tl) {
-    if (tl != NULL) {
-      if (do_tree(tl->right())) return true;
-      if (do_list(tl)) return true;
-      if (do_tree(tl->left())) return true;
-    }
-    return false;
-  }
-};
-
-// Searches the tree for a chunk that ends at the
-// specified address.
-class EndTreeSearchClosure : public DescendTreeSearchClosure {
-  HeapWord* _target;
-  FreeChunk* _found;
-
- public:
-  EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {}
-  bool do_list(FreeList* fl) {
-    FreeChunk* item = fl->head();
-    while (item != NULL) {
-      if (item->end() == _target) {
-	_found = item;
-	return true;
-      }
-      item = item->next();
-    }
-    return false;
-  }
-  FreeChunk* found() { return _found; }
-};
-
-FreeChunk* BinaryTreeDictionary::find_chunk_ends_at(HeapWord* target) const {
-  EndTreeSearchClosure etsc(target);
-  bool found_target = etsc.do_tree(root());
-  assert(found_target || etsc.found() == NULL, "Consistency check");
-  assert(!found_target || etsc.found() != NULL, "Consistency check");
-  return etsc.found();
-}
-
-void BinaryTreeDictionary::beginSweepDictCensus(double coalSurplusPercent,
-  float inter_sweep_current, float inter_sweep_estimate) {
-  BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
-                                            inter_sweep_estimate);
-  bsc.do_tree(root());
-}
-
-// Closures and methods for calculating total bytes returned to the
-// free lists in the tree.
-NOT_PRODUCT(
-  class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure {
-   public:
-    void do_list(FreeList* fl) {
-      fl->set_returnedBytes(0);
-    }
-  };
-  
-  void BinaryTreeDictionary::initializeDictReturnedBytes() {
-    InitializeDictReturnedBytesClosure idrb;
-    idrb.do_tree(root());
-  }
-  
-  class ReturnedBytesClosure : public AscendTreeCensusClosure {
-    size_t _dictReturnedBytes;
-   public:
-    ReturnedBytesClosure() { _dictReturnedBytes = 0; }
-    void do_list(FreeList* fl) {
-      _dictReturnedBytes += fl->returnedBytes();
-    }
-    size_t dictReturnedBytes() { return _dictReturnedBytes; }
-  };
-  
-  size_t BinaryTreeDictionary::sumDictReturnedBytes() {
-    ReturnedBytesClosure rbc;
-    rbc.do_tree(root());
-  
-    return rbc.dictReturnedBytes();
-  }
-
-  // Count the number of entries in the tree.
-  class treeCountClosure : public DescendTreeCensusClosure {
-   public:
-    uint count;
-    treeCountClosure(uint c) { count = c; }
-    void do_list(FreeList* fl) {
-      count++;
-    }
-  };
-
-  size_t BinaryTreeDictionary::totalCount() {
-    treeCountClosure ctc(0);
-    ctc.do_tree(root());
-    return ctc.count;
-  }
-)
-
-// Calculate surpluses for the lists in the tree.
-class setTreeSurplusClosure : public AscendTreeCensusClosure {
-  double percentage;
- public:
-  setTreeSurplusClosure(double v) { percentage = v; }
-  void do_list(FreeList* fl) {
-    double splitSurplusPercent = percentage;
-    fl->set_surplus(fl->count() -
-                   (ssize_t)((double)fl->desired() * splitSurplusPercent));
-  }
-};
-
-void BinaryTreeDictionary::setTreeSurplus(double splitSurplusPercent) {
-  setTreeSurplusClosure sts(splitSurplusPercent);
-  sts.do_tree(root());
-}
-
-// Set hints for the lists in the tree.
-class setTreeHintsClosure : public DescendTreeCensusClosure {
-  size_t hint;
- public:
-  setTreeHintsClosure(size_t v) { hint = v; }
-  void do_list(FreeList* fl) {
-    fl->set_hint(hint);
-    assert(fl->hint() == 0 || fl->hint() > fl->size(), 
-      "Current hint is inconsistent");
-    if (fl->surplus() > 0) {
-      hint = fl->size();
-    }
-  }
-};
-
-void BinaryTreeDictionary::setTreeHints(void) {
-  setTreeHintsClosure sth(0);
-  sth.do_tree(root());
-}
-
-// Save count before previous sweep and splits and coalesces.
-class clearTreeCensusClosure : public AscendTreeCensusClosure {
-  void do_list(FreeList* fl) {
-    fl->set_prevSweep(fl->count());
-    fl->set_coalBirths(0);
-    fl->set_coalDeaths(0);
-    fl->set_splitBirths(0);
-    fl->set_splitDeaths(0);
-  }
-};
-
-void BinaryTreeDictionary::clearTreeCensus(void) {
-  clearTreeCensusClosure ctc;
-  ctc.do_tree(root());
-}
-
-// Do reporting and post sweep clean up.
-void BinaryTreeDictionary::endSweepDictCensus(double splitSurplusPercent) {
-  // Does walking the tree 3 times hurt?
-  setTreeSurplus(splitSurplusPercent);
-  setTreeHints();
-  if (PrintGC && Verbose) {
-    reportStatistics();
-  }
-  clearTreeCensus();
-}
-    
-// Print summary statistics
-void BinaryTreeDictionary::reportStatistics() const {
-  verify_par_locked();
-  gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
-         "------------------------------------\n");
-  size_t totalSize = totalChunkSize(debug_only(NULL));
-  size_t    freeBlocks = numFreeBlocks();
-  gclog_or_tty->print("Total Free Space: %d\n", totalSize);
-  gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSize());
-  gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
-  if (freeBlocks > 0) {
-    gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
-  }
-  gclog_or_tty->print("Tree      Height: %d\n", treeHeight());
-}
-
-// Print census information - counts, births, deaths, etc.
-// for each list in the tree.  Also print some summary
-// information.
-class printTreeCensusClosure : public AscendTreeCensusClosure {
-  size_t _totalFree;
-  AllocationStats _totals;
-  size_t _count;
- 
- public:
-  printTreeCensusClosure() {
-    _totalFree = 0;
-    _count = 0;
-    _totals.initialize();
-  }
-  AllocationStats* totals() { return &_totals; }
-  size_t count() { return _count; }
-  void increment_count_by(size_t v) { _count += v; }
-  size_t totalFree() { return _totalFree; }
-  void increment_totalFree_by(size_t v) { _totalFree += v; }
-  void do_list(FreeList* fl) {
-    bool nl = false; // "maybe this is not needed" isNearLargestChunk(fl->head());
-
-    gclog_or_tty->print("%c %4d\t\t" "%7d\t" "%7d\t"
-               "%7d\t"      "%7d\t" "%7d\t" "%7d\t"
-               "%7d\t"      "%7d\t" "%7d\t"
-               "%7d\t" "\n",
-               " n"[nl], fl->size(), fl->bfrSurp(), fl->surplus(),
-               fl->desired(), fl->prevSweep(), fl->beforeSweep(), fl->count(),
-               fl->coalBirths(), fl->coalDeaths(), fl->splitBirths(),
-               fl->splitDeaths());
-  
-    increment_totalFree_by(fl->count() * fl->size());
-    increment_count_by(fl->count());
-    totals()->set_bfrSurp(totals()->bfrSurp() + fl->bfrSurp());
-    totals()->set_surplus(totals()->splitDeaths()     + fl->surplus());
-    totals()->set_prevSweep(totals()->prevSweep()   + fl->prevSweep());
-    totals()->set_beforeSweep(totals()->beforeSweep() + fl->beforeSweep());
-    totals()->set_coalBirths(totals()->coalBirths()  + fl->coalBirths());
-    totals()->set_coalDeaths(totals()->coalDeaths()  + fl->coalDeaths());
-    totals()->set_splitBirths(totals()->splitBirths() + fl->splitBirths());
-    totals()->set_splitDeaths(totals()->splitDeaths() + fl->splitDeaths());
-  }
-};
-
-void BinaryTreeDictionary::printDictCensus(void) const {
-  
-  gclog_or_tty->print("\nBinaryTree\n");
-  gclog_or_tty->print(
-             "%4s\t\t" "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"
-             "%7s\t"   "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"     "\n",
-             "size",  "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
-             "count", "cBirths", "cDeaths", "sBirths", "sDeaths");
-
-  printTreeCensusClosure ptc;
-  ptc.do_tree(root());
-
-  gclog_or_tty->print(
-             "\t\t"    "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"
-             "%7s\t"   "%7s\t"    "%7s\t"    "%7s\t"    "%7s\t"     "\n",
-                       "bfrsurp", "surplus", "prvSwep", "bfrSwep",
-             "count",  "cBirths", "cDeaths", "sBirths", "sDeaths");
-  gclog_or_tty->print(
-             "%s\t\t"  "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"
-             "%7d\t"   "%7d\t"    "%7d\t"     "%7d\t"    "%7d\t"    "\n",
-             "totl",
-             ptc.totals()->bfrSurp(), 
-	     ptc.totals()->surplus(), 
-	     ptc.totals()->prevSweep(), 
-	     ptc.totals()->beforeSweep(), 
-	     ptc.count(), 
-	     ptc.totals()->coalBirths(), 
-	     ptc.totals()->coalDeaths(), 
-	     ptc.totals()->splitBirths(), 
-	     ptc.totals()->splitDeaths());
-  gclog_or_tty->print("totalFree(words): %7d growth: %8.5f  deficit: %8.5f\n",
-              ptc.totalFree(),
-              (double)(ptc.totals()->splitBirths()+ptc.totals()->coalBirths()
-                       -ptc.totals()->splitDeaths()-ptc.totals()->coalDeaths())
-              /(ptc.totals()->prevSweep() != 0 ?
-                (double)ptc.totals()->prevSweep() : 1.0),
-             (double)(ptc.totals()->desired() - ptc.count())
-             /(ptc.totals()->desired() != 0 ?
-               (double)ptc.totals()->desired() : 1.0));
-}
-
-// Verify the following tree invariants:
-// . _root has no parent
-// . parent and child point to each other
-// . each node's key correctly related to that of its child(ren)
-void BinaryTreeDictionary::verifyTree() const {
-  guarantee(root() == NULL || totalFreeBlocks() == 0 ||
-    totalSize() != 0, "_totalSize should't be 0?");
-  guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent");
-  verifyTreeHelper(root());
-}
-
-size_t BinaryTreeDictionary::verifyPrevFreePtrs(TreeList* tl) {
-  size_t ct = 0;
-  for (FreeChunk* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
-    ct++;
-    assert(curFC->prev() == NULL || curFC->prev()->isFree(), 
-      "Chunk should be free");
-  }
-  return ct;
-}
-
-// Note: this helper is recursive rather than iterative, so use with
-// caution on very deep trees; and watch out for stack overflow errors;
-// In general, to be used only for debugging.
-void BinaryTreeDictionary::verifyTreeHelper(TreeList* tl) const {
-  if (tl == NULL)
-    return;
-  guarantee(tl->size() != 0, "A list must has a size");
-  guarantee(tl->left()  == NULL || tl->left()->parent()  == tl,
-         "parent<-/->left");
-  guarantee(tl->right() == NULL || tl->right()->parent() == tl,
-         "parent<-/->right");;
-  guarantee(tl->left() == NULL  || tl->left()->size()    <  tl->size(),
-         "parent !> left");
-  guarantee(tl->right() == NULL || tl->right()->size()   >  tl->size(), 
-         "parent !< left");
-  guarantee(tl->head() == NULL || tl->head()->isFree(), "!Free");
-  guarantee(tl->head() == NULL || tl->head_as_TreeChunk()->list() == tl, 
-    "list inconsistency");
-  guarantee(tl->count() > 0 || (tl->head() == NULL && tl->tail() == NULL),
-    "list count is inconsistent");
-  guarantee(tl->count() > 1 || tl->head() == tl->tail(),
-    "list is incorrectly constructed");
-  size_t count = verifyPrevFreePtrs(tl);
-  guarantee(count == (size_t)tl->count(), "Node count is incorrect");
-  if (tl->head() != NULL) {
-    tl->head_as_TreeChunk()->verifyTreeChunkList();
-  }
-  verifyTreeHelper(tl->left());
-  verifyTreeHelper(tl->right());
-}
-
-void BinaryTreeDictionary::verify() const {
-  verifyTree();
-  guarantee(totalSize() == totalSizeInTree(root()), "Total Size inconsistency");
-}
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,286 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)binaryTreeDictionary.hpp	1.26 07/05/05 17:05:41 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-/* 
- * A binary tree based search structure for free blocks.
- * This is currently used in the Concurrent Mark&Sweep implementation.
- */
-
-// A TreeList is a FreeList which can be used to maintain a
-// binary tree of free lists.
-
-class TreeChunk;
-class BinaryTreeDictionary;
-class AscendTreeCensusClosure;
-class DescendTreeCensusClosure;
-class DescendTreeSearchClosure;
-
-class TreeList: public FreeList {
-  friend class TreeChunk;
-  friend class BinaryTreeDictionary;
-  friend class AscendTreeCensusClosure;
-  friend class DescendTreeCensusClosure;
-  friend class DescendTreeSearchClosure;
-  TreeList* _parent;
-  TreeList* _left;
-  TreeList* _right;
-
- protected:
-  TreeList* parent() const { return _parent; }
-  TreeList* left()   const { return _left;   }
-  TreeList* right()  const { return _right;  }
-
-  // Accessors for links in tree.
-
-  void setLeft(TreeList* tl) {
-    _left   = tl;
-    if (tl != NULL)
-      tl->setParent(this);
-  }
-  void setRight(TreeList* tl) {
-    _right  = tl;
-    if (tl != NULL)
-      tl->setParent(this);
-  }
-  void setParent(TreeList* tl)  { _parent = tl;   }
-
-  void clearLeft()               { _left = NULL;   }
-  void clearRight()              { _right = NULL;  }
-  void clearParent()             { _parent = NULL; }
-  void initialize()		 { clearLeft(); clearRight(), clearParent(); }
-
-  // For constructing a TreeList from a Tree chunk or
-  // address and size.
-  static TreeList* as_TreeList(TreeChunk* tc);
-  static TreeList* as_TreeList(HeapWord* addr, size_t size);
-
-  // Returns the head of the free list as a pointer to a TreeChunk.
-  TreeChunk* head_as_TreeChunk();
-
-  // Returns the first available chunk in the free list as a pointer
-  // to a TreeChunk.
-  TreeChunk* first_available();
-
-  // removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
-  // If "tc" is the first chunk in the list, it is also the
-  // TreeList that is the node in the tree.  removeChunkReplaceIfNeeded()
-  // returns the possibly replaced TreeList* for the node in
-  // the tree.  It also updates the parent of the original
-  // node to point to the new node.
-  TreeList* removeChunkReplaceIfNeeded(TreeChunk* tc);
-  // See FreeList.
-  void returnChunkAtHead(TreeChunk* tc);
-  void returnChunkAtTail(TreeChunk* tc);
-};
-
-// A TreeChunk is a subclass of a FreeChunk that additionally
-// maintains a pointer to the free list on which it is currently
-// linked.  
-// A TreeChunk is also used as a node in the binary tree.  This
-// allows the binary tree to be maintained without any additional
-// storage (the free chunks are used).  In a binary tree the first
-// chunk in the free list is also the tree node.  Note that the
-// TreeChunk has an embedded TreeList for this purpose.  Because
-// the first chunk in the list is distinguished in this fashion
-// (also is the node in the tree), it is the last chunk to be found
-// on the free list for a node in the tree and is only removed if
-// it is the last chunk on the free list.
-
-class TreeChunk : public FreeChunk {
-  friend class TreeList;
-  TreeList* _list;
-  TreeList _embedded_list;  // if non-null, this chunk is on _list
- protected:
-  TreeList* embedded_list() const { return (TreeList*) &_embedded_list; }
-  void set_embedded_list(TreeList* v) { _embedded_list = *v; }
- public:
-  TreeList* list() { return _list; }
-  void set_list(TreeList* v) { _list = v; }
-  static TreeChunk* as_TreeChunk(FreeChunk* fc);
-  // Initialize fields in a TreeChunk that should be
-  // initialized when the TreeChunk is being added to
-  // a free list in the tree.
-  void initialize() { embedded_list()->initialize(); }
-
-  // debugging
-  void verifyTreeChunkList() const;
-};
-
-const size_t MIN_TREE_CHUNK_SIZE  = sizeof(TreeChunk)/HeapWordSize;
-
-class BinaryTreeDictionary: public FreeBlockDictionary {
-  bool       _splay;
-  size_t     _totalSize;
-  size_t     _totalFreeBlocks;
-  TreeList* _root;
-
-  // private accessors
-  bool splay() const { return _splay; }
-  void set_splay(bool v) { _splay = v; }
-  size_t totalSize() const { return _totalSize; }
-  void set_totalSize(size_t v) { _totalSize = v; }
-  virtual void inc_totalSize(size_t v);
-  virtual void dec_totalSize(size_t v);
-  size_t totalFreeBlocks() const { return _totalFreeBlocks; }
-  void set_totalFreeBlocks(size_t v) { _totalFreeBlocks = v; }
-  TreeList* root() const { return _root; }
-  void set_root(TreeList* v) { _root = v; }
-
-  // Remove a chunk of size "size" or larger from the tree and
-  // return it.  If the chunk 
-  // is the last chunk of that size, remove the node for that size 
-  // from the tree.
-  TreeChunk* getChunkFromTree(size_t size, Dither dither, bool splay);
-  // Return a list of the specified size or NULL from the tree.
-  // The list is not removed from the tree.
-  TreeList* findList (size_t size) const;
-  // Remove this chunk from the tree.  If the removal results
-  // in an empty list in the tree, remove the empty list.
-  TreeChunk* removeChunkFromTree(TreeChunk* tc);
-  // Remove the node in the trees starting at tl that has the
-  // minimum value and return it.  Repair the tree as needed.
-  TreeList* removeTreeMinimum(TreeList* tl);
-  void       semiSplayStep(TreeList* tl);
-  // Add this free chunk to the tree.
-  void       insertChunkInTree(FreeChunk* freeChunk);
- public:
-  void       verifyTree() const;
-  // verify that the given chunk is in the tree.
-  bool       verifyChunkInFreeLists(FreeChunk* tc) const;
- private:
-  void          verifyTreeHelper(TreeList* tl) const;
-  static size_t verifyPrevFreePtrs(TreeList* tl);
-
-  // Returns the total number of chunks in the list.
-  size_t     totalListLength(TreeList* tl) const;
-  // Returns the total number of words in the chunks in the tree
-  // starting at "tl".
-  size_t     totalSizeInTree(TreeList* tl) const;
-  // Returns the sum of the square of the size of each block
-  // in the tree starting at "tl".
-  double     sum_of_squared_block_sizes(TreeList* const tl) const;
-  // Returns the total number of free blocks in the tree starting
-  // at "tl".
-  size_t     totalFreeBlocksInTree(TreeList* tl) const;
-  size_t     numFreeBlocks() const;
-  size_t     treeHeight() const;
-  size_t     treeHeightHelper(TreeList* tl) const;
-  size_t     totalNodesInTree(TreeList* tl) const;
-  size_t     totalNodesHelper(TreeList* tl) const;
-
- public:
-  // Constructor
-  BinaryTreeDictionary(MemRegion mr, bool splay = false);
-
-  // Reset the dictionary to the initial conditions with
-  // a single free chunk.
-  void	     reset(MemRegion mr);
-  void       reset(HeapWord* addr, size_t size);
-  // Reset the dictionary to be empty.
-  void       reset();
-
-  // Return a chunk of size "size" or greater from
-  // the tree.  
-  // want a better dynamic splay strategy for the future.
-  FreeChunk* getChunk(size_t size, Dither dither) {
-    verify_par_locked();
-    FreeChunk* res = getChunkFromTree(size, dither, splay());
-    assert(res == NULL || res->isFree(),
-           "Should be returning a free chunk");
-    return res;
-  }
-
-  void returnChunk(FreeChunk* chunk) {
-    verify_par_locked();
-    insertChunkInTree(chunk);
-  }
-
-  void removeChunk(FreeChunk* chunk) {
-    verify_par_locked();
-    removeChunkFromTree((TreeChunk*)chunk);
-    assert(chunk->isFree(), "Should still be a free chunk");
-  }
-
-  size_t     maxChunkSize() const;
-  size_t     totalChunkSize(debug_only(const Mutex* lock)) const {
-    debug_only(
-      if (lock != NULL && lock->owned_by_self()) {
-        assert(totalSizeInTree(root()) == totalSize(),
-               "_totalSize inconsistency");
-      }
-    )
-    return totalSize();
-  }
-
-  size_t     minSize() const {
-    return MIN_TREE_CHUNK_SIZE;
-  }
-
-  double     sum_of_squared_block_sizes() const {
-    return sum_of_squared_block_sizes(root());
-  }
-
-  FreeChunk* find_chunk_ends_at(HeapWord* target) const;
-
-  // Find the list with size "size" in the binary tree and update
-  // the statistics in the list according to "split" (chunk was
-  // split or coalesce) and "birth" (chunk was added or removed).
-  void       dictCensusUpdate(size_t size, bool split, bool birth);
-  // Return true if the dictionary is overpopulated (more chunks of
-  // this size than desired) for size "size".
-  bool       coalDictOverPopulated(size_t size);
-  // Methods called at the beginning of a sweep to prepare the
-  // statistics for the sweep.
-  void       beginSweepDictCensus(double coalSurplusPercent,
-                                  float sweep_current,
-                                  float sweep_estimate);
-  // Methods called after the end of a sweep to modify the
-  // statistics for the sweep.
-  void       endSweepDictCensus(double splitSurplusPercent);
-  // Return the largest free chunk in the tree.
-  FreeChunk* findLargestDict() const;
-  // Accessors for statistics
-  void 	     setTreeSurplus(double splitSurplusPercent);
-  void 	     setTreeHints(void);
-  // Reset statistics for all the lists in the tree.
-  void	     clearTreeCensus(void);
-  // Print the statistcis for all the lists in the tree.  Also may
-  // print out summaries.
-  void	     printDictCensus(void) const;
-
-  // For debugging.  Returns the sum of the _returnedBytes for
-  // all lists in the tree.
-  size_t     sumDictReturnedBytes()	PRODUCT_RETURN0;
-  // Sets the _returnedBytes for all the lists in the tree to zero.
-  void	     initializeDictReturnedBytes()	PRODUCT_RETURN;
-  // For debugging.  Return the total number of chunks in the dictionary.
-  size_t     totalCount()	PRODUCT_RETURN0;
-
-  void       reportStatistics() const;
-
-  void       verify() const;
-};
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)cardTableModRefBS.cpp	1.56 07/05/05 17:05:43 JVM"
+#pragma ident "@(#)cardTableModRefBS.cpp	1.57 07/05/17 15:54:33 JVM"
 #endif
 /*
  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -177,7 +177,9 @@
 
 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
   // We don't change the start of a region, only the end.
-  assert(_whole_heap.contains(new_region), "attempt to cover area not in reserved area");
+  assert(_whole_heap.contains(new_region), 
+	   "attempt to cover area not in reserved area");
+  debug_only(verify_guard();)
   int ind = find_covering_region_by_base(new_region.start());
   MemRegion old_region = _covered[ind];
   assert(old_region.start() == new_region.start(), "just checking");
@@ -197,16 +199,22 @@
       (HeapWord*)align_size_up((uintptr_t)new_end, os::vm_page_size());
     assert(new_end_aligned >= (HeapWord*) new_end,
            "align up, but less");
-    if (new_end_aligned > cur_committed.end()) {
+    // The guard page is always committed and should not be committed over.
+    HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
+    if (new_end_for_commit > cur_committed.end()) {
       // Must commit new pages.
       MemRegion new_committed =
-	MemRegion(cur_committed.end(), new_end_aligned);
+	MemRegion(cur_committed.end(), new_end_for_commit);
+
+      assert(!new_committed.is_empty(), "Region should not be empty here");
       if (!os::commit_memory((char*)new_committed.start(),
-			     new_committed.byte_size())) {
-	// Do better than this for Merlin
-	vm_exit_out_of_memory(new_committed.byte_size(),
-			      "card table expansion");
+	                     new_committed.byte_size())) {
+        // Do better than this for Merlin
+        vm_exit_out_of_memory(new_committed.byte_size(),
+	        "card table expansion");
       }
+    // Use new_end_aligned (as opposed to new_end_for_commit) because
+    // the cur_committed region may include the guard region.
     } else if (new_end_aligned < cur_committed.end()) {
       // Must uncommit pages.
       MemRegion uncommit_region = 
@@ -231,6 +239,8 @@
     } else {
       entry = byte_after(old_region.last());
     }
+    assert(index_for(new_region.last()) < (int) _guard_index,
+      "The guard card will be overwritten");
     jbyte* end = byte_after(new_region.last());
     // do nothing if we resized downward.
     if (entry < end) {
@@ -262,6 +272,7 @@
                   addr_for((jbyte*) _committed[ind].start()),
                   addr_for((jbyte*) _committed[ind].last()));
   }
+  debug_only(verify_guard();)
 }
 
 // Note that these versions are precise!  The scanning code has to handle the
@@ -772,11 +783,16 @@
   lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
 }
 
-void CardTableModRefBS::verify() {
+void CardTableModRefBS::verify_guard() {
+  // For product build verification
   guarantee(_byte_map[_guard_index] == last_card,
             "card table guard has been modified");
 }
 
+void CardTableModRefBS::verify() {
+  verify_guard();
+}
+
 #ifndef PRODUCT
 class GuaranteeNotModClosure: public MemRegionClosure {
   CardTableModRefBS* _ct;
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)cardTableModRefBS.hpp	1.49 07/05/05 17:05:43 JVM"
+#pragma ident "@(#)cardTableModRefBS.hpp	1.50 07/05/17 15:54:35 JVM"
 #endif
 /*
  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -128,7 +128,8 @@
 
   // Returns the part of the region mr that doesn't intersect with 
   // any committed region other than self.  Used to prevent uncommitting 
-  // regions that are also committed by other regions.
+  // regions that are also committed by other regions.  Also protects
+  // against uncommitting the guard region.
   MemRegion committed_unique_to_self(int self, MemRegion mr) const;
 
   // Mapping from address to card marking array entry
@@ -387,6 +388,7 @@
   }
 
   void verify();
+  void verify_guard();
 
   void verify_clean_region(MemRegion mr) PRODUCT_RETURN;
 
--- a/hotspot/src/share/vm/memory/cmsLockVerifier.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,99 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)cmsLockVerifier.cpp	1.14 07/05/05 17:05:44 JVM"
-#endif
-/*
- * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_cmsLockVerifier.cpp.incl"
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except that it relaxes the
-// assertion somewhat for the parallel GC case, where VM thread
-// or the CMS thread might hold the lock on behalf of the parallel
-// threads. The second argument is in support of an extra locking
-// check for CFL spaces' free list locks.
-#ifndef PRODUCT
-void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
-  if (!Universe::is_fully_initialized()) {
-    return;
-  }
-
-  Thread* myThread = Thread::current();
-
-  if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
-    assert(p_lock == NULL, "Unexpected state");
-    if (myThread->is_ConcurrentGC_thread()) {
-      // This test might have to change in the future, if there can be
-      // multiple peer CMS threads.  But for now, if we're testing the CMS
-      assert(myThread == ConcurrentMarkSweepThread::cmst(),
-	     "In CMS, CMS thread is the only Conc GC thread.");
-      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-             "CMS thread should have CMS token");
-    } else if (myThread->is_VM_thread()) {
-      assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-             "VM thread should have CMS token");
-    } else {
-      // Token should be held on our behalf by one of the other
-      // of CMS or VM thread; not enough easily testable
-      // state info to test which here.
-      assert(myThread->is_GC_task_thread(), "Unexpected thread type");
-    }
-    return;
-  } 
-
-  if (ParallelGCThreads == 0) {
-    assert_lock_strong(lock);
-  } else {
-    if (myThread->is_VM_thread()
-        || myThread->is_ConcurrentGC_thread()
-        || myThread->is_Java_thread()) {
-      // Make sure that we are holding the associated lock.
-      assert_lock_strong(lock);
-      // The checking of p_lock is a spl case for CFLS' free list
-      // locks: we make sure that none of the parallel GC work gang
-      // threads are holding "sub-locks" of freeListLock(). We check only
-      // the parDictionaryAllocLock because the others are too numerous.
-      // This spl case code is somewhat ugly and any improvements
-      // are welcome XXX FIX ME!!
-      if (p_lock != NULL) {
-        assert(!p_lock->is_locked() || p_lock->owned_by_self(),
-               "Possible race between this and parallel GC threads");
-      }
-    } else if (myThread->is_GC_task_thread()) {
-      // Make sure that the VM or CMS thread holds lock on our behalf
-      // XXX If there were a concept of a gang_master for a (set of)
-      // gang_workers, we could have used the identity of that thread
-      // for checking ownership here; for now we just disjunct.
-      assert(lock->owner() == VMThread::vm_thread() ||
-             lock->owner() == ConcurrentMarkSweepThread::cmst(),
-             "Should be locked by VM thread or CMS thread on my behalf");
-    } else {
-      // Make sure we didn't miss some obscure corner case
-      ShouldNotReachHere();
-    }
-  }
-}
-#endif
-
--- a/hotspot/src/share/vm/memory/cmsLockVerifier.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)cmsLockVerifier.hpp	1.9 07/05/05 17:05:44 JVM"
-#endif
-/*
- * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except
-// that it relaxes the assertion somewhat for the parallel GC case, where
-// main GC thread or the CMS thread might hold the lock on behalf of
-// the parallel threads.
-class CMSLockVerifier: AllStatic {
- public:
-  static void assert_locked(const Mutex* lock, const Mutex* p_lock)
-    PRODUCT_RETURN;
-  static void assert_locked(const Mutex* lock) {
-    assert_locked(lock, NULL);
-  }
-};
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)collectorPolicy.cpp	1.86 07/05/05 17:05:43 JVM"
+#pragma ident "@(#)collectorPolicy.cpp	1.87 07/05/17 15:54:37 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -60,6 +60,8 @@
   // User inputs from -mx and ms are aligned
   _initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(),
 					  min_alignment());
+  _min_heap_byte_size = align_size_up(Arguments::min_heap_size(),
+					  min_alignment());
   _max_heap_byte_size = align_size_up(MaxHeapSize, max_alignment());
 
   // Check validity of heap parameters from launcher
@@ -69,15 +71,31 @@
     Universe::check_alignment(_initial_heap_byte_size, min_alignment(),
                             "initial heap");
   }
+  if (_min_heap_byte_size == 0) {
+    _min_heap_byte_size = NewSize + OldSize;
+  } else {
+    Universe::check_alignment(_min_heap_byte_size, min_alignment(),
+                            "initial heap");
+  }
 
   // Check heap parameter properties
   if (_initial_heap_byte_size < M) {
     vm_exit_during_initialization("Too small initial heap");
   }
+  // Check heap parameter properties
+  if (_min_heap_byte_size < M) {
+    vm_exit_during_initialization("Too small minimum heap");
+  }
   if (_initial_heap_byte_size <= NewSize) {
      // make sure there is at least some room in old space
     vm_exit_during_initialization("Too small initial heap for new size specified");
   }
+  if (_max_heap_byte_size < _min_heap_byte_size) {
+    vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
+  }
+  if (_initial_heap_byte_size < _min_heap_byte_size) {
+    vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
+  }
   if (_max_heap_byte_size < _initial_heap_byte_size) {
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
@@ -195,15 +213,23 @@
 
   // Minimum sizes of the generations may be different than
   // the initial sizes.
-  _min_gen0_size = NewSize;
+  if (!FLAG_IS_DEFAULT(NewSize)) {
+    _min_gen0_size = NewSize;
+  } else {
+    _min_gen0_size = align_size_down(_min_heap_byte_size / (NewRatio+1),
+				     min_alignment());
+  }	
 
   // Parameters are valid, compute area sizes.
   size_t max_new_size = align_size_down(_max_heap_byte_size / (NewRatio+1),
 					min_alignment());
   max_new_size = MIN2(MAX2(max_new_size, _min_gen0_size), MaxNewSize);
 
-  size_t desired_new_size = align_size_down(_initial_heap_byte_size / (NewRatio+1),
-					    min_alignment());
+  // desired_new_size is used to set the initial size.  The
+  // initial size must be greater than the minimum size.
+  size_t desired_new_size = 
+    align_size_down(_initial_heap_byte_size / (NewRatio+1),
+		  min_alignment());
 
   size_t new_size = MIN2(MAX2(desired_new_size, _min_gen0_size), max_new_size);
 
@@ -215,8 +241,24 @@
   GenCollectorPolicy::initialize_size_info();
   
   // Minimum sizes of the generations may be different than
-  // the initial sizes.
-  _min_gen1_size = OldSize;
+  // the initial sizes.  An inconsistently is permitted here
+  // in the total size that can be specified explicitly by
+  // command line specification of OldSize and NewSize and
+  // also a command line specification of -Xms.  Issue a warning
+  // but allow the values to pass.
+  if (!FLAG_IS_DEFAULT(OldSize)) {
+    _min_gen1_size = OldSize;
+    // The generation minimums and the overall heap mimimum should
+    // be within one heap alignment.
+    if ((_min_gen1_size + _min_gen0_size + max_alignment()) < 
+	 _min_heap_byte_size) {
+      warning("Inconsistency between minimum heap size and minimum "
+	"generation sizes: using min heap = " SIZE_FORMAT, 
+	_min_heap_byte_size);
+    }
+  } else {
+    _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
+  }
 
   _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
   _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
@@ -256,11 +298,6 @@
       }
       // Note that only large objects get a shot at being
       // allocated in later generations.
-#ifdef JVMPI_SUPPORT
-      // If jvmpi slow allocation
-      // is enabled, allocate in later generations (since the
-      // first generation is always full.
-#endif // JVMPI_SUPPORT
       bool first_only = ! should_try_older_generation_allocation(size);
 
       result = gch->attempt_allocation(size, is_tlab, first_only);
@@ -450,9 +487,6 @@
 // . heap memory is tight -- the most recent previous collection
 //   was a full collection because a partial collection (would
 //   have) failed and is likely to fail again
-#ifdef JVMPI_SUPPORT
-// . jvmpi_slow_allocation
-#endif // JVMPI_SUPPORT
 bool GenCollectorPolicy::should_try_older_generation_allocation(
 	size_t word_size) const {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -460,12 +494,7 @@
   return    (word_size > heap_word_size(gen0_capacity))
          || (GC_locker::is_active_and_needs_gc())
          || (   gch->last_incremental_collection_failed()
-#ifdef JVMPI_SUPPORT
-             && gch->incremental_collection_will_fail())
-         || Universe::jvmpi_slow_allocation();
-#else // !JVMPI_SUPPORT
              && gch->incremental_collection_will_fail());
-#endif // JVMPI_SUPPORT
 }
 
 //
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)collectorPolicy.hpp	1.39 07/05/05 17:05:45 JVM"
+#pragma ident "@(#)collectorPolicy.hpp	1.40 07/05/17 15:54:39 JVM"
 #endif
 /*
  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -63,6 +63,7 @@
 
   size_t _initial_heap_byte_size;
   size_t _max_heap_byte_size;
+  size_t _min_heap_byte_size;
 
   size_t _min_alignment;
   size_t _max_alignment;
@@ -71,7 +72,8 @@
     _min_alignment(1),
     _max_alignment(1),
     _initial_heap_byte_size(0),
-    _max_heap_byte_size(0)
+    _max_heap_byte_size(0),
+    _min_heap_byte_size(0)
   {}
 
  public:
@@ -82,6 +84,7 @@
 
   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
+  size_t min_heap_byte_size()     { return _min_heap_byte_size; }
 
   enum Name {
     CollectorPolicyKind,
--- a/hotspot/src/share/vm/memory/compactibleFreeListSpace.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2843 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)compactibleFreeListSpace.cpp	1.141 07/05/05 17:05:45 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_compactibleFreeListSpace.cpp.incl"
-
-/////////////////////////////////////////////////////////////////////////
-//// CompactibleFreeListSpace
-/////////////////////////////////////////////////////////////////////////
-
-// highest ranked  free list lock rank
-int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
-
-// Constructor
-CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
-  MemRegion mr, bool use_adaptive_freelists,
-  FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
-  _dictionaryChoice(dictionaryChoice),
-  _adaptive_freelists(use_adaptive_freelists),
-  _bt(bs, mr),
-  // free list locks are in the range of values taken by _lockRank
-  // This range currently is [_leaf+2, _leaf+3]
-  // Note: this requires that CFLspace c'tors
-  // are called serially in the order in which the locks are
-  // are acquired in the program text. This is true today.
-  _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
-  _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
-			  "CompactibleFreeListSpace._dict_par_lock", true),
-  _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
-                    CMSRescanMultiple),
-  _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
-                    CMSConcMarkMultiple),
-  _collector(NULL)
-{
-  _bt.set_space(this);
-  initialize(mr, true);
-  // We have all of "mr", all of which we place in the dictionary
-  // as one big chunk. We'll need to decide here which of several
-  // possible alternative dictionary implementations to use. For
-  // now the choice is easy, since we have only one working
-  // implementation, namely, the simple binary tree (splaying
-  // temporarily disabled).
-  switch (dictionaryChoice) {
-    case FreeBlockDictionary::dictionaryBinaryTree:
-      _dictionary = new BinaryTreeDictionary(mr);
-      break;
-    case FreeBlockDictionary::dictionarySplayTree:
-    case FreeBlockDictionary::dictionarySkipList:
-    default:
-      warning("dictionaryChoice: selected option not understood; using"
-              " default BinaryTreeDictionary implementation instead.");
-      _dictionary = new BinaryTreeDictionary(mr);
-      break;
-  }
-  splitBirth(mr.word_size());
-  assert(_dictionary != NULL, "CMS dictionary initialization");
-  // The indexed free lists are initially all empty and are lazily
-  // filled in on demand. Initialize the array elements to NULL.
-  initializeIndexedFreeListArray();
-
-  // Not using adaptive free lists assumes that allocation is first
-  // from the linAB's.  Also a cms perm gen which can be compacted
-  // has to have the klass's klassKlass allocated at a lower
-  // address in the heap than the klass so that the klassKlass is
-  // moved to its new location before the klass is moved.
-  // Set the _refillSize for the linear allocation blocks
-  if (!use_adaptive_freelists) {
-    FreeChunk* fc = _dictionary->getChunk(mr.word_size());
-    // The small linAB initially has all the space and will allocate
-    // a chunk of any size.
-    HeapWord* addr = (HeapWord*) fc;
-    _smallLinearAllocBlock.set(addr, fc->size() , 
-      1024*SmallForLinearAlloc, fc->size());
-    // Note that _unallocated_block is not updated here.
-    // Allocations from the linear allocation block should
-    // update it.
-  } else {
-    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, 
-			       SmallForLinearAlloc);
-  }
-  // CMSIndexedFreeListReplenish should be at least 1
-  CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
-  _promoInfo.setSpace(this);
-  if (UseCMSBestFit) {
-    _fitStrategy = FreeBlockBestFitFirst;
-  } else {
-    _fitStrategy = FreeBlockStrategyNone;
-  }
-  checkFreeListConsistency();
-
-  // Initialize locks for parallel case.
-  if (ParallelGCThreads > 0) {
-    for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-      _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
-					      "a freelist par lock",
-					      true);
-      if (_indexedFreeListParLocks[i] == NULL) 
-	vm_exit_during_initialization("Could not allocate a par lock");
-      DEBUG_ONLY(
-        _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
-      )
-    }
-    _dictionary->set_par_lock(&_parDictionaryAllocLock);
-  }
-}
-
-// Like CompactibleSpace forward() but always calls cross_threshold() to
-// update the block offset table.  Removed initialize_threshold call because
-// CFLS does not use a block offset array for contiguous spaces.
-HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, 
-				    CompactPoint* cp, HeapWord* compact_top) {
-  // q is alive
-  // First check if we should switch compaction space
-  assert(this == cp->space, "'this' should be current compaction space.");
-  size_t compaction_max_size = pointer_delta(end(), compact_top);
-  assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
-    "virtual adjustObjectSize_v() method is not correct");
-  size_t adjusted_size = adjustObjectSize(size);
-  assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
-         "no small fragments allowed");
-  assert(minimum_free_block_size() == MinChunkSize,
-         "for de-virtualized reference below");
-  // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
-  if (adjusted_size + MinChunkSize > compaction_max_size &&
-      adjusted_size != compaction_max_size) {
-    do {
-      // switch to next compaction space
-      cp->space->set_compaction_top(compact_top);
-      cp->space = cp->space->next_compaction_space();
-      if (cp->space == NULL) {
-        cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
-        assert(cp->gen != NULL, "compaction must succeed");
-        cp->space = cp->gen->first_compaction_space();
-        assert(cp->space != NULL, "generation must have a first compaction space");
-      }
-      compact_top = cp->space->bottom();
-      cp->space->set_compaction_top(compact_top);
-      // The correct adjusted_size may not be the same as that for this method
-      // (i.e., cp->space may no longer be "this" so adjust the size again.
-      // Use the virtual method which is not used above to save the virtual
-      // dispatch.
-      adjusted_size = cp->space->adjust_object_size_v(size);
-      compaction_max_size = pointer_delta(cp->space->end(), compact_top);
-      assert(cp->space->minimum_free_block_size() == 0, "just checking");
-    } while (adjusted_size > compaction_max_size);
-  }
-
-  // store the forwarding pointer into the mark word
-  if ((HeapWord*)q != compact_top) {
-    q->forward_to(oop(compact_top));
-    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
-  } else {
-    // if the object isn't moving we can just set the mark to the default
-    // mark and handle it specially later on.  
-    q->init_mark();
-    assert(q->forwardee() == NULL, "should be forwarded to NULL");
-  }
-
-  debug_only(MarkSweep::register_live_oop(q, adjusted_size));
-  compact_top += adjusted_size;
-
-  // we need to update the offset table so that the beginnings of objects can be
-  // found during scavenge.  Note that we are updating the offset table based on
-  // where the object will be once the compaction phase finishes.
-
-  // Always call cross_threshold().  A contiguous space can only call it when
-  // the compaction_top exceeds the current threshold but not for an
-  // non-contiguous space.
-  cp->threshold =
-    cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
-  return compact_top;
-}
-
-// A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
-// and use of single_block instead of alloc_block.  The name here is not really
-// appropriate - maybe a more general name could be invented for both the
-// contiguous and noncontiguous spaces.
-
-HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
-  _bt.single_block(start, the_end);
-  return end();
-}
-
-// Initialize them to NULL.
-void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
-  for (size_t i = 0; i < IndexSetSize; i++) {
-    // Note that on platforms where objects are double word aligned,
-    // the odd array elements are not used.  It is convenient, however,
-    // to map directly from the object size to the array element.
-    _indexedFreeList[i].reset(IndexSetSize);
-    _indexedFreeList[i].set_size(i);
-    assert(_indexedFreeList[i].count() == 0, "reset check failed");
-    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
-  }
-}
-
-void CompactibleFreeListSpace::resetIndexedFreeListArray() {
-  for (int i = 1; i < IndexSetSize; i++) {
-    assert(_indexedFreeList[i].size() == (size_t) i, 
-      "Indexed free list sizes are incorrect");
-    _indexedFreeList[i].reset(IndexSetSize);
-    assert(_indexedFreeList[i].count() == 0, "reset check failed");
-    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
-  }
-}
-
-void CompactibleFreeListSpace::reset(MemRegion mr) {
-  resetIndexedFreeListArray();
-  dictionary()->reset();
-  if (BlockOffsetArrayUseUnallocatedBlock) {
-    assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
-    // Everything's allocated until proven otherwise.
-    _bt.set_unallocated_block(end());
-  }
-  if (!mr.is_empty()) {
-    assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
-    _bt.single_block(mr.start(), mr.word_size());
-    FreeChunk* fc = (FreeChunk*) mr.start();
-    fc->setSize(mr.word_size());
-    if (mr.word_size() >= IndexSetSize ) {
-      returnChunkToDictionary(fc);
-    } else {
-      _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-      _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
-    }
-  }
-  _promoInfo.reset();
-  _smallLinearAllocBlock._ptr = NULL;
-  _smallLinearAllocBlock._word_size = 0;
-}
-
-void CompactibleFreeListSpace::reset_after_compaction() {
-  // Reset the space to the new reality - one free chunk.
-  MemRegion mr(compaction_top(), end());
-  reset(mr);
-  // Now refill the linear allocation block(s) if possible.
-  if (_adaptive_freelists) {
-    refillLinearAllocBlocksIfNeeded();
-  } else {
-    // Place as much of mr in the linAB as we can get,
-    // provided it was big enough to go into the dictionary.
-    FreeChunk* fc = dictionary()->findLargestDict();
-    if (fc != NULL) {
-      assert(fc->size() == mr.word_size(),
-             "Why was the chunk broken up?");
-      removeChunkFromDictionary(fc);
-      HeapWord* addr = (HeapWord*) fc;
-      _smallLinearAllocBlock.set(addr, fc->size() ,
-        1024*SmallForLinearAlloc, fc->size());
-      // Note that _unallocated_block is not updated here.
-    }
-  }
-}
-
-// Walks the entire dictionary, returning a coterminal
-// chunk, if it exists. Use with caution since it involves
-// a potentially complete walk of a potentially large tree.
-FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
-
-  assert_lock_strong(&_freelistLock);
-
-  return dictionary()->find_chunk_ends_at(end());
-}
-
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
-  }
-}
-
-size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
-  size_t sum = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
-  }
-  return sum;
-}
-
-size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
-  size_t count = 0;
-  for (int i = MinChunkSize; i < IndexSetSize; i++) {
-    debug_only(
-      ssize_t total_list_count = 0;
-      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-         fc = fc->next()) {
-        total_list_count++;
-      }
-      assert(total_list_count ==  _indexedFreeList[i].count(),
-	"Count in list is incorrect");
-    )
-    count += _indexedFreeList[i].count();
-  }
-  return count;
-}
-
-size_t CompactibleFreeListSpace::totalCount() {
-  size_t num = totalCountInIndexedFreeLists();
-  num +=  dictionary()->totalCount();
-  if (_smallLinearAllocBlock._word_size != 0) {
-    num++;
-  }
-  return num;
-}
-#endif
-
-bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*) p;
-  return fc->isFree();
-}
-
-size_t CompactibleFreeListSpace::used() const {
-  return capacity() - free();
-}
-
-size_t CompactibleFreeListSpace::free() const {
-  // "MT-safe, but not MT-precise"(TM), if you will: i.e.
-  // if you do this while the structures are in flux you
-  // may get an approximate answer only; for instance
-  // because there is concurrent allocation either
-  // directly by mutators or for promotion during a GC.
-  // It's "MT-safe", however, in the sense that you are guaranteed
-  // not to crash and burn, for instance, because of walking
-  // pointers that could disappear as you were walking them.
-  // The approximation is because the various components
-  // that are read below are not read atomically (and
-  // further the computation of totalSizeInIndexedFreeLists()
-  // is itself a non-atomic computation. The normal use of
-  // this is during a resize operation at the end of GC
-  // and at that time you are guaranteed to get the
-  // correct actual value. However, for instance, this is
-  // also read completely asynchronously by the "perf-sampler"
-  // that supports jvmstat, and you are apt to see the values
-  // flicker in such cases.
-  assert(_dictionary != NULL, "No _dictionary?");
-  return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
-          totalSizeInIndexedFreeLists() +
-          _smallLinearAllocBlock._word_size) * HeapWordSize;
-}
-
-size_t CompactibleFreeListSpace::max_alloc_in_words() const {
-  assert(_dictionary != NULL, "No _dictionary?");
-  assert_locked();
-  size_t res = _dictionary->maxChunkSize();
-  res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
-                       (size_t) SmallForLinearAlloc - 1));
-  // XXX the following could potentially be pretty slow;
-  // should one, pesimally for the rare cases when res
-  // caclulated above is less than IndexSetSize,
-  // just return res calculated above? My reasoning was that
-  // those cases will be so rare that the extra time spent doesn't
-  // really matter....
-  // Note: do not change the loop test i >= res + IndexSetStride
-  // to i > res below, because i is unsigned and res may be zero.
-  for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
-       i -= IndexSetStride) {
-    if (_indexedFreeList[i].head() != NULL) {
-      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
-      return i;
-    }
-  }
-  return res;
-}
-
-void CompactibleFreeListSpace::reportFreeListStatistics() const {
-  assert_lock_strong(&_freelistLock);
-  assert(PrintFLSStatistics != 0, "Reporting error");
-  _dictionary->reportStatistics();
-  if (PrintFLSStatistics > 1) {
-    reportIndexedFreeListStatistics();
-    size_t totalSize = totalSizeInIndexedFreeLists() +
-                       _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
-    gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
-  }
-}
-
-void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
-  assert_lock_strong(&_freelistLock);
-  gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
-                      "--------------------------------\n");
-  size_t totalSize = totalSizeInIndexedFreeLists();
-  size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
-  gclog_or_tty->print("Total Free Space: %d\n", totalSize);
-  gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
-  gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
-  if (freeBlocks != 0) {
-    gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
-  }
-}
-
-size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
-  size_t res = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    debug_only(
-      ssize_t recount = 0;
-      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-         fc = fc->next()) {
-        recount += 1;
-      }
-      assert(recount == _indexedFreeList[i].count(), 
-	"Incorrect count in list");
-    )
-    res += _indexedFreeList[i].count();
-  }
-  return res;
-}
-
-size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
-  for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
-    if (_indexedFreeList[i].head() != NULL) {
-      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
-      return (size_t)i;
-    }
-  }
-  return 0;
-}
-
-void CompactibleFreeListSpace::set_end(HeapWord* value) {
-  HeapWord* prevEnd = end();
-  assert(prevEnd != value, "unnecessary set_end call");
-  assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
-  _end = value;
-  if (prevEnd != NULL) {
-    // Resize the underlying block offset table.
-    _bt.resize(pointer_delta(value, bottom()));
-  if (value <= prevEnd) {
-    assert(value >= unallocated_block(), "New end is below unallocated block");
-  } else {
-    // Now, take this new chunk and add it to the free blocks.
-    // Note that the BOT has not yet been updated for this block.
-    size_t newFcSize = pointer_delta(value, prevEnd);
-    // XXX This is REALLY UGLY and should be fixed up. XXX
-    if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
-      // Mark the boundary of the new block in BOT
-      _bt.mark_block(prevEnd, value);
-      // put it all in the linAB
-      if (ParallelGCThreads == 0) {
-        _smallLinearAllocBlock._ptr = prevEnd;
-        _smallLinearAllocBlock._word_size = newFcSize;
-        repairLinearAllocBlock(&_smallLinearAllocBlock);
-      } else { // ParallelGCThreads > 0
-        MutexLockerEx x(parDictionaryAllocLock(),
-                        Mutex::_no_safepoint_check_flag);
-        _smallLinearAllocBlock._ptr = prevEnd;
-        _smallLinearAllocBlock._word_size = newFcSize;
-        repairLinearAllocBlock(&_smallLinearAllocBlock);
-      }
-      // Births of chunks put into a LinAB are not recorded.  Births
-      // of chunks as they are allocated out of a LinAB are.
-    } else {
-      // Add the block to the free lists, if possible coalescing it
-      // with the last free block, and update the BOT and census data.
-      addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
-    }
-  }
-  }
-}
-
-class FreeListSpace_DCTOC : public Filtering_DCTOC {
-  CompactibleFreeListSpace* _cfls;
-  CMSCollector* _collector;
-protected:
-  // Override.
-#define walk_mem_region_with_cl_DECL(ClosureType)                       \
-  virtual void walk_mem_region_with_cl(MemRegion mr,                    \
-				       HeapWord* bottom, HeapWord* top, \
-				       ClosureType* cl);                \
-      void walk_mem_region_with_cl_par(MemRegion mr,                    \
-				       HeapWord* bottom, HeapWord* top, \
-				       ClosureType* cl);                \
-    void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
-				       HeapWord* bottom, HeapWord* top, \
-				       ClosureType* cl)
-  walk_mem_region_with_cl_DECL(OopClosure);
-  walk_mem_region_with_cl_DECL(FilteringClosure);
-
-public:
-  FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
-                      CMSCollector* collector,
-                      OopClosure* cl,
-  		      CardTableModRefBS::PrecisionStyle precision,
-		      HeapWord* boundary) :
-    Filtering_DCTOC(sp, cl, precision, boundary),
-    _cfls(sp), _collector(collector) {}
-};
-
-// We de-virtualize the block-related calls below, since we know that our
-// space is a CompactibleFreeListSpace.
-#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
-void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
-						 HeapWord* bottom,              \
-						 HeapWord* top,                 \
-						 ClosureType* cl) {             \
-   if (SharedHeap::heap()->n_par_threads() > 0) {                               \
-     walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
-   } else {                                                                     \
-     walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
-   }                                                                            \
-}                                                                               \
-void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
-						      HeapWord* bottom,         \
-						      HeapWord* top,            \
-						      ClosureType* cl) {        \
-  /* Skip parts that are before "mr", in case "block_start" sent us             \
-     back too far. */                                                           \
-  HeapWord* mr_start = mr.start();                                              \
-  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
-  HeapWord* next = bottom + bot_size;                                           \
-  while (next < mr_start) {                                                     \
-    bottom = next;                                                              \
-    bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
-    next = bottom + bot_size;                                                   \
-  }                                                                             \
-                                                                                \
-  while (bottom < top) {                                                        \
-    if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
-	!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
-                    oop(bottom)) &&                                             \
-	!_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
-      bottom += _cfls->adjustObjectSize(word_sz);                               \
-    } else {                                                                    \
-      bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
-    }                                                                           \
-  }                                                                             \
-}                                                                               \
-void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
-						        HeapWord* bottom,       \
-						        HeapWord* top,          \
-						        ClosureType* cl) {      \
-  /* Skip parts that are before "mr", in case "block_start" sent us             \
-     back too far. */                                                           \
-  HeapWord* mr_start = mr.start();                                              \
-  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
-  HeapWord* next = bottom + bot_size;                                           \
-  while (next < mr_start) {                                                     \
-    bottom = next;                                                              \
-    bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
-    next = bottom + bot_size;                                                   \
-  }                                                                             \
-                                                                                \
-  while (bottom < top) {                                                        \
-    if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
-	!_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
-                    oop(bottom)) &&                                             \
-	!_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
-      bottom += _cfls->adjustObjectSize(word_sz);                               \
-    } else {                                                                    \
-      bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
-    }                                                                           \
-  }                                                                             \
-}
-
-// (There are only two of these, rather than N, because the split is due
-// only to the introduction of the FilteringClosure, a local part of the
-// impl of this abstraction.)
-FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
-FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
-
-DirtyCardToOopClosure*
-CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
-				      CardTableModRefBS::PrecisionStyle precision,
-				      HeapWord* boundary) {
-  return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
-}
-
-
-// Note on locking for the space iteration functions:
-// since the collector's iteration activities are concurrent with
-// allocation activities by mutators, absent a suitable mutual exclusion
-// mechanism the iterators may go awry. For instace a block being iterated
-// may suddenly be allocated or divided up and part of it allocated and
-// so on.
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += cl->do_blk_careful(cur));
-}
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += cl->do_blk(cur));
-}
-
-// Apply the given closure to each oop in the space.
-void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur)) {
-      oop(cur)->oop_iterate(cl);
-    }
-  }
-}
-
-// Apply the given closure to each oop in the space \intersect memory region.
-void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
-  assert_lock_strong(freelistLock());
-  if (is_empty()) {
-    return;
-  }
-  MemRegion cur = MemRegion(bottom(), end());
-  mr = mr.intersection(cur);
-  if (mr.is_empty()) {
-    return;
-  }
-  if (mr.equals(cur)) {
-    oop_iterate(cl);
-    return;
-  }
-  assert(mr.end() <= end(), "just took an intersection above");
-  HeapWord* obj_addr = block_start(mr.start());
-  HeapWord* t = mr.end();
-
-  SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
-  if (block_is_obj(obj_addr)) {
-    // Handle first object specially.
-    oop obj = oop(obj_addr);
-    obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
-  } else {
-    FreeChunk* fc = (FreeChunk*)obj_addr;
-    obj_addr += fc->size();
-  }
-  while (obj_addr < t) {
-    HeapWord* obj = obj_addr;
-    obj_addr += block_size(obj_addr);
-    // If "obj_addr" is not greater than top, then the
-    // entire object "obj" is within the region.
-    if (obj_addr <= t) {    
-      if (block_is_obj(obj)) {
-        oop(obj)->oop_iterate(cl);
-      }               
-    } else {
-      // "obj" extends beyond end of region
-      if (block_is_obj(obj)) {
-        oop(obj)->oop_iterate(&smr_blk);
-      }    
-      break;
-    }
-  }
-}
-
-// NOTE: In the following methods, in order to safely be able to
-// apply the closure to an object, we need to be sure that the
-// object has been initialized. We are guaranteed that an object
-// is initialized if we are holding the Heap_lock with the
-// world stopped.
-void CompactibleFreeListSpace::verify_objects_initialized() const {
-  if (is_init_completed()) {
-    assert_locked_or_safepoint(Heap_lock);
-    if (Universe::is_fully_initialized()) {
-      guarantee(SafepointSynchronize::is_at_safepoint(),
-                "Required for objects to be initialized");
-    }
-  } // else make a concession at vm start-up
-}
-
-// Apply the given closure to each object in the space
-void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
-  assert_lock_strong(freelistLock());
-  NOT_PRODUCT(verify_objects_initialized());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur)) {
-      blk->do_object(oop(cur));
-    }
-  }
-}
-
-void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
-                                                  UpwardsObjectClosure* cl) {
-  assert_locked();
-  NOT_PRODUCT(verify_objects_initialized());
-  Space::object_iterate_mem(mr, cl);
-}
-
-// Callers of this iterator beware: The closure application should
-// be robust in the face of uninitialized objects and should (always)
-// return a correct size so that the next addr + size below gives us a
-// valid block boundary. [See for instance,
-// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
-// in ConcurrentMarkSweepGeneration.cpp.]
-HeapWord*
-CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *addr, *last;
-  size_t size;
-  for (addr = bottom(), last  = end();
-       addr < last; addr += size) {
-    FreeChunk* fc = (FreeChunk*)addr;
-    if (fc->isFree()) {
-      // Since we hold the free list lock, which protects direct
-      // allocation in this generation by mutators, a free object
-      // will remain free throughout this iteration code.
-      size = fc->size();
-    } else {
-      // Note that the object need not necessarily be initialized,
-      // because (for instance) the free list lock does NOT protect
-      // object initialization. The closure application below must
-      // therefore be correct in the face of uninitialized objects.
-      size = cl->do_object_careful(oop(addr));
-      if (size == 0) {
-        // An unparsable object found. Signal early termination.
-        return addr;
-      }
-    }
-  }
-  return NULL;
-}
-
-// Callers of this iterator beware: The closure application should
-// be robust in the face of uninitialized objects and should (always)
-// return a correct size so that the next addr + size below gives us a
-// valid block boundary. [See for instance,
-// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
-// in ConcurrentMarkSweepGeneration.cpp.]
-HeapWord*
-CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
-  ObjectClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  // Can't use used_region() below because it may not necessarily
-  // be the same as [bottom(),end()); although we could
-  // use [used_region().start(),round_to(used_region().end(),CardSize)),
-  // that appears too cumbersome, so we just do the simpler check
-  // in the assertion below.
-  assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
-         "mr should be non-empty and within used space");
-  HeapWord *addr, *end;
-  size_t size;
-  for (addr = block_start_careful(mr.start()), end  = mr.end();
-       addr < end; addr += size) {
-    FreeChunk* fc = (FreeChunk*)addr;
-    if (fc->isFree()) {
-      // Since we hold the free list lock, which protects direct
-      // allocation in this generation by mutators, a free object
-      // will remain free throughout this iteration code.
-      size = fc->size();
-    } else {
-      // Note that the object need not necessarily be initialized,
-      // because (for instance) the free list lock does NOT protect
-      // object initialization. The closure application below must
-      // therefore be correct in the face of uninitialized objects.
-      size = cl->do_object_careful_m(oop(addr), mr);
-      if (size == 0) {
-        // An unparsable object found. Signal early termination.
-        return addr;
-      }
-    }
-  }
-  return NULL;
-}
-
-
-HeapWord* CompactibleFreeListSpace::block_start(const void* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  return _bt.block_start(p);
-}
-
-HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
-  return _bt.block_start_careful(p);
-}
-
-size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
-  // This must be volatile, or else there is a danger that the compiler
-  // will compile the code below into a sometimes-infinite loop, by keeping 
-  // the value read the first time in a register.
-  oop o = (oop)p;
-  volatile oop* second_word_addr = o->klass_addr();
-  while (true) {
-    klassOop k = (klassOop)(*second_word_addr);
-    // We must do this until we get a consistent view of the object.
-    if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
-      FreeChunk* fc = (FreeChunk*)p;
-      volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
-      size_t res = (*sz_addr);
-      klassOop k2 = (klassOop)(*second_word_addr);  // Read to confirm.
-      if (k == k2) {
-        assert(res != 0, "Block size should not be 0");
-        return res;
-      }
-    } else if (k != NULL) {
-      assert(k->is_oop(), "Should really be klass oop.");
-      assert(o->is_parsable(), "Should be parsable");
-      assert(o->is_oop(), "Should be an oop.");
-      size_t res = o->size_given_klass(k->klass_part());
-      res = adjustObjectSize(res);
-      assert(res != 0, "Block size should not be 0");
-      return res;
-    }
-  }
-}
-
-// A variant of the above that uses the Printezis bits for
-// unparsable but allocated objects. This avoids any possible
-// stalls waiting for mutators to initialize objects, and is
-// thus potentially faster than the variant above. However,
-// this variant may return a zero size for a block that is
-// under mutation and for which a consistent size cannot be
-// inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
-size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
-                                                     const CMSCollector* c)
-const {
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
-  // This must be volatile, or else there is a danger that the compiler
-  // will compile the code below into a sometimes-infinite loop, by keeping
-  // the value read the first time in a register.
-  oop o = (oop)p;
-  volatile oop* second_word_addr = o->klass_addr();
-  DEBUG_ONLY(uint loops = 0;)
-  while (true) {
-    klassOop k = (klassOop)(*second_word_addr);
-    // We must do this until we get a consistent view of the object.
-    if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
-      FreeChunk* fc = (FreeChunk*)p;
-      volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
-      size_t res = (*sz_addr);
-      klassOop k2 = (klassOop)(*second_word_addr);  // Read to confirm.
-      if (k == k2) {
-        assert(res != 0, "Block size should not be 0");
-        assert(loops == 0, "Should be 0");
-        return res;
-      }
-    } else if (k != NULL && o->is_parsable()) {
-      assert(k->is_oop(), "Should really be klass oop.");
-      assert(o->is_oop(), "Should be an oop");
-      size_t res = o->size_given_klass(k->klass_part());
-      res = adjustObjectSize(res);
-      assert(res != 0, "Block size should not be 0");
-      return res;
-    } else {
-      return c->block_size_if_printezis_bits(p);
-    }
-    assert(loops == 0, "Can loop at most once");
-    DEBUG_ONLY(loops++;)
-  }
-}
-
-size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
-  FreeChunk* fc = (FreeChunk*)p;
-  if (fc->isFree()) {
-    return fc->size();
-  } else {
-    // Ignore mark word because this may be a recently promoted
-    // object whose mark word is used to chain together grey
-    // objects (the last one would have a null value).
-    assert(oop(p)->is_oop(true), "Should be an oop");
-    return adjustObjectSize(oop(p)->size());
-  }
-}
-
-// This implementation assumes that the property of "being an object" is
-// stable.  But being a free chunk may not be (because of parallel
-// promotion.)
-bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*)p;
-  assert(is_in_reserved(p), "Should be in space");
-  // When doing a mark-sweep-compact of the CMS generation, this
-  // assertion may fail because prepare_for_compaction() uses
-  // space that is garbage to maintain information on ranges of
-  // live objects so that these live ranges can be moved as a whole.
-  // Comment out this assertion until that problem can be solved
-  // (i.e., that the block start calculation may look at objects
-  // at address below "p" in finding the object that contains "p"
-  // and those objects (if garbage) may have been modified to hold
-  // live range information.
-  // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
-  klassOop k = oop(p)->klass();
-  intptr_t ki = (intptr_t)k;
-  if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
-  if (k != NULL) {
-    // Ignore mark word because it may have been used to
-    // chain together promoted objects (the last one
-    // would have a null value).
-    assert(oop(p)->is_oop(true), "Should be an oop");
-    return true;
-  } else {
-    return false;  // Was not an object at the start of collection.
-  }
-}
-
-// Check if the object is alive. This fact is checked either by consulting
-// the main marking bitmap in the sweeping phase or, if it's a permanent
-// generation and we're not in the sweeping phase, by checking the
-// perm_gen_verify_bit_map where we store the "deadness" information if
-// the CMSPermGenSweepingEnable is false.
-bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
-  assert (block_is_obj(p), "The address should point to an object");
-
-  CMSBitMap* bit_map;
-  // If we're sweeping, we use object liveness information from the main bit map
-  // for both perm gen and old gen.
-  // We don't need to lock the bitmap, because either we are in the middle
-  // of the sweeping phase, and the main marking bit map is locked
-  // or we're in other phases and perm_gen_verify_bit_map is stable, because
-  // it's mutated only in the sweeping phase.
-  if (_collector->abstract_state() == CMSCollector::Sweeping) {
-    bit_map = _collector->markBitMap();
-    return bit_map->isMarked((HeapWord*) p);
-  } else {
-    // If we're not sweeping and we haven't swept the perm gen, we use
-    // the "deadness" information that we had saved in perm_gen_verify_bit_map.
-    if (!CMSPermGenSweepingEnabled && _collector->_permGen->reserved().contains(p)) {
-      if (_collector->verifying()) {
-        bit_map = _collector->perm_gen_verify_bit_map();
-        return !bit_map->par_isMarked((HeapWord*) p); // Object is marked when it's dead.    
-      } else {
-        return false; // We can say for sure if it's live, so we say that it's dead.
-      }
-    }
-  }
-  return true;
-}
-
-bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*)p;
-  assert(is_in_reserved(p), "Should be in space");
-  assert(_bt.block_start(p) == p, "Should be a block boundary");
-  if (!fc->isFree()) {
-    // Ignore mark word because it may have been used to
-    // chain together promoted objects (the last one
-    // would have a null value).
-    assert(oop(p)->is_oop(true), "Should be an oop");
-    return true;
-  }
-  return false;
-}
-
-// "MT-safe but not guaranteed MT-precise" (TM); you may get an
-// approximate answer if you don't hold the freelistlock when you call this.
-size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
-  size_t size = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    debug_only(
-      // We may be calling here without the lock in which case we
-      // won't do this modest sanity check.
-      if (freelistLock()->owned_by_self()) {
-        size_t total_list_size = 0;
-        for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-          fc = fc->next()) {
-          total_list_size += i;
-        }
-        assert(total_list_size == i * _indexedFreeList[i].count(),
-               "Count in list is incorrect");
-      }
-    )
-    size += i * _indexedFreeList[i].count();
-  }
-  return size;
-}
-
-HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
-  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
-  return allocate(size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
-  return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
-}
-
-HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
-  assert_lock_strong(freelistLock());
-  HeapWord* res = NULL;
-  assert(size == adjustObjectSize(size),
-         "use adjustObjectSize() before calling into allocate()");
-  
-  if (_adaptive_freelists) {
-    res = allocate_adaptive_freelists(size);
-  } else {  // non-adaptive free lists
-    res = allocate_non_adaptive_freelists(size);
-  }
-  
-  if (res != NULL) {
-    // check that res does lie in this space!
-    assert(is_in_reserved(res), "Not in this space!");
-    assert(is_aligned((void*)res), "alignment check");
-
-    FreeChunk* fc = (FreeChunk*)res;
-    fc->markNotFree();
-    assert(!fc->isFree(), "shouldn't be marked free");
-    assert(oop(fc)->klass() == NULL, "should look uninitialized");
-    // Verify that the block offset table shows this to
-    // be a single block, but not one which is unallocated.
-    _bt.verify_single_block(res, size); 
-    _bt.verify_not_unallocated(res, size);
-    // mangle a just allocated object with a distinct pattern.
-    debug_only(fc->mangleAllocated(size));
-  }
-  
-  return res;
-}
-
-HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
-  HeapWord* res = NULL;
-  // try and use linear allocation for smaller blocks
-  if (size < _smallLinearAllocBlock._allocation_size_limit) {
-    // if successful, the following also adjusts block offset table
-    res = getChunkFromSmallLinearAllocBlock(size);
-  }
-  // Else triage to indexed lists for smaller sizes
-  if (res == NULL) {
-    if (size < SmallForDictionary) {
-      res = (HeapWord*) getChunkFromIndexedFreeList(size);
-    } else { 
-      // else get it from the big dictionary; if even this doesn't
-      // work we are out of luck.
-      res = (HeapWord*)getChunkFromDictionaryExact(size);
-    }
-  }
-
-  return res;
-}
-
-HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
-  assert_lock_strong(freelistLock());
-  HeapWord* res = NULL;
-  assert(size == adjustObjectSize(size),
-         "use adjustObjectSize() before calling into allocate()");
-  
-  // Strategy
-  //   if small
-  //     exact size from small object indexed list if small
-  //     small or large linear allocation block (linAB) as appropriate
-  //     take from lists of greater sized chunks
-  //   else
-  //     dictionary
-  //     small or large linear allocation block if it has the space
-  // Try allocating exact size from indexTable first
-  if (size < IndexSetSize) {
-    res = (HeapWord*) getChunkFromIndexedFreeList(size);
-    if(res != NULL) {
-      assert(res != (HeapWord*)_indexedFreeList[size].head(), 
-        "Not removed from free list");
-      // no block offset table adjustment is necessary on blocks in
-      // the indexed lists.
-
-    // Try allocating from the small LinAB
-    } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
-	(res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
-	// if successful, the above also adjusts block offset table
-	// Note that this call will refill the LinAB to 
-	// satisfy the request.  This is different that
-	// evm.  
-        // Don't record chunk off a LinAB?  smallSplitBirth(size);
-  
-    } else {
-      // Raid the exact free lists larger than size, even if they are not
-      // overpopulated.
-      res = (HeapWord*) getChunkFromGreater(size);
-    }
-  } else {
-    // Big objects get allocated directly from the dictionary.
-    res = (HeapWord*) getChunkFromDictionaryExact(size);
-    if (res == NULL) {
-      // Try hard not to fail since an allocation failure will likely
-      // trigger a synchronous GC.  Try to get the space from the 
-      // allocation blocks.
-      res = getChunkFromSmallLinearAllocBlockRemainder(size);
-    }
-  }
-  
-  return res;
-}
-
-// A worst-case estimate of the space required (in HeapWords) to expand the heap
-// when promoting obj.
-size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
-  // Depending on the object size, expansion may require refilling either a
-  // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
-  // is added because the dictionary may over-allocate to avoid fragmentation.
-  size_t space = obj_size;
-  if (!_adaptive_freelists) {
-    space = MAX2(space, _smallLinearAllocBlock._refillSize);
-  }
-  space += _promoInfo.refillSize() + 2 * MinChunkSize;
-  return space;
-}
-
-FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
-  FreeChunk* ret;
-
-  assert(numWords >= MinChunkSize, "Size is less than minimum");
-  assert(linearAllocationWouldFail() || bestFitFirst(),
-    "Should not be here");
-
-  size_t i;
-  size_t currSize = numWords + MinChunkSize;
-  assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
-  for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
-    FreeList* fl = &_indexedFreeList[i];
-    if (fl->head()) {
-      ret = getFromListGreater(fl, numWords);
-      assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
-      return ret;
-    }
-  }
-
-  currSize = MAX2((size_t)SmallForDictionary,
-                  (size_t)(numWords + MinChunkSize));
-
-  /* Try to get a chunk that satisfies request, while avoiding
-     fragmentation that can't be handled. */
-  {
-    ret =  dictionary()->getChunk(currSize);
-    if (ret != NULL) {
-      assert(ret->size() - numWords >= MinChunkSize,
-             "Chunk is too small");
-      _bt.allocated((HeapWord*)ret, ret->size());
-      /* Carve returned chunk. */
-      (void) splitChunkAndReturnRemainder(ret, numWords);
-      /* Label this as no longer a free chunk. */
-      assert(ret->isFree(), "This chunk should be free");
-      ret->linkPrev(NULL);
-    }
-    assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
-    return ret;
-  }
-  ShouldNotReachHere();
-}
-
-bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) 
-  const {
-  assert(fc->size() < IndexSetSize, "Size of chunk is too large");
-  return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
-}
-
-bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
-  if (fc->size() >= IndexSetSize) {
-    return dictionary()->verifyChunkInFreeLists(fc);
-  } else {
-    return verifyChunkInIndexedFreeLists(fc);
-  }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::assert_locked() const {
-  CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
-}
-#endif
-
-FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
-  // In the parallel case, the main thread holds the free list lock
-  // on behalf the parallel threads.
-  assert_locked();
-  FreeChunk* fc;
-  {
-    // If GC is parallel, this might be called by several threads.
-    // This should be rare enough that the locking overhead won't affect
-    // the sequential code.
-    MutexLockerEx x(parDictionaryAllocLock(),
-                    Mutex::_no_safepoint_check_flag);
-    fc = getChunkFromDictionary(size);
-  }
-  if (fc != NULL) {
-    fc->dontCoalesce();
-    assert(fc->isFree(), "Should be free, but not coalescable");
-    // Verify that the block offset table shows this to
-    // be a single block, but not one which is unallocated.
-    _bt.verify_single_block((HeapWord*)fc, fc->size());
-    _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-  }
-  return fc;
-}
-
-oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
-  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
-  assert_locked();
-
-  // if we are tracking promotions, then first ensure space for
-  // promotion (including spooling space for saving header if necessary).
-  // then allocate and copy, then track promoted info if needed.
-  // When tracking (see PromotionInfo::track()), the mark word may
-  // be displaced and in this case restoration of the mark word
-  // occurs in the (oop_since_save_marks_)iterate phase.
-  if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
-    return NULL;
-  }
-  // Call the allocate(size_t, bool) form directly to avoid the
-  // additional call through the allocate(size_t) form.  Having
-  // the compile inline the call is problematic because allocate(size_t)
-  // is a virtual method.
-  HeapWord* res = allocate(adjustObjectSize(obj_size));
-  if (res != NULL) {
-    Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
-    // if we should be tracking promotions, do so.
-    if (_promoInfo.tracking()) {
-        _promoInfo.track((PromotedObject*)res);
-    }
-  }
-  return oop(res);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "minimum chunk size");
-  assert(size <  _smallLinearAllocBlock._allocation_size_limit, 
-    "maximum from smallLinearAllocBlock");
-  return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
-                                                       size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "too small");
-  HeapWord* res = NULL;
-  // Try to do linear allocation from blk, making sure that
-  if (blk->_word_size == 0) {
-    // We have probably been unable to fill this either in the prologue or
-    // when it was exhausted at the last linear allocation. Bail out until
-    // next time.
-    assert(blk->_ptr == NULL, "consistency check");
-    return NULL;
-  }
-  assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
-  res = getChunkFromLinearAllocBlockRemainder(blk, size);
-  if (res != NULL) return res;
-
-  // about to exhaust this linear allocation block
-  if (blk->_word_size == size) { // exactly satisfied
-    res = blk->_ptr;
-    _bt.allocated(res, blk->_word_size);
-  } else if (size + MinChunkSize <= blk->_refillSize) {
-    // Update _unallocated_block if the size is such that chunk would be
-    // returned to the indexed free list.  All other chunks in the indexed
-    // free lists are allocated from the dictionary so that _unallocated_block
-    // has already been adjusted for them.  Do it here so that the cost
-    // for all chunks added back to the indexed free lists.
-    if (blk->_word_size < SmallForDictionary) {
-      _bt.allocated(blk->_ptr, blk->_word_size);
-    }
-    // Return the chunk that isn't big enough, and then refill below.
-    addChunkToFreeLists(blk->_ptr, blk->_word_size);
-    _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
-    // Don't keep statistics on adding back chunk from a LinAB.
-  } else {
-    // A refilled block would not satisfy the request.
-    return NULL;
-  }
-
-  blk->_ptr = NULL; blk->_word_size = 0;
-  refillLinearAllocBlock(blk);
-  assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
-	 "block was replenished");
-  if (res != NULL) {
-    splitBirth(size);
-    repairLinearAllocBlock(blk);
-  } else if (blk->_ptr != NULL) {
-    res = blk->_ptr;
-    size_t blk_size = blk->_word_size;
-    blk->_word_size -= size;
-    blk->_ptr  += size;
-    splitBirth(size);
-    repairLinearAllocBlock(blk);
-    // Update BOT last so that other (parallel) GC threads see a consistent
-    // view of the BOT and free blocks.
-    // Above must occur before BOT is updated below.
-    _bt.split_block(res, blk_size, size);  // adjust block offset table
-  }
-  return res;
-}
-
-HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
-					LinearAllocBlock* blk, 
-					size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "too small");
-
-  HeapWord* res = NULL;
-  // This is the common case.  Keep it simple.
-  if (blk->_word_size >= size + MinChunkSize) {
-    assert(blk->_ptr != NULL, "consistency check");
-    res = blk->_ptr;
-    // Note that the BOT is up-to-date for the linAB before allocation.  It
-    // indicates the start of the linAB.  The split_block() updates the
-    // BOT for the linAB after the allocation (indicates the start of the
-    // next chunk to be allocated).
-    size_t blk_size = blk->_word_size;
-    blk->_word_size -= size;
-    blk->_ptr  += size;
-    splitBirth(size);
-    repairLinearAllocBlock(blk);
-    // Update BOT last so that other (parallel) GC threads see a consistent
-    // view of the BOT and free blocks.
-    // Above must occur before BOT is updated below.
-    _bt.split_block(res, blk_size, size);  // adjust block offset table
-    _bt.allocated(res, size);
-  } 
-  return res;
-}
-
-FreeChunk* 
-CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
-  assert_locked();
-  assert(size < SmallForDictionary, "just checking");
-  FreeChunk* res;
-  res = _indexedFreeList[size].getChunkAtHead();
-  if (res == NULL) {
-    res = getChunkFromIndexedFreeListHelper(size);
-  }
-  _bt.verify_not_unallocated((HeapWord*) res, size);
-  return res;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
-  assert_locked();
-  FreeChunk* fc = NULL;
-  if (size < SmallForDictionary) {
-    assert(_indexedFreeList[size].head() == NULL ||
-      _indexedFreeList[size].surplus() <= 0,
-      "List for this size should be empty or under populated");
-    // Try best fit in exact lists before replenishing the list
-    if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
-      // Replenish list.
-      //
-      // Things tried that failed.
-      //   Tried allocating out of the two LinAB's first before 
-      // replenishing lists.  
-      //   Tried small linAB of size 256 (size in indexed list)
-      // and replenishing indexed lists from the small linAB.
-      //
-      FreeChunk* newFc = NULL;
-      size_t replenish_size = CMSIndexedFreeListReplenish * size;
-      if (replenish_size < SmallForDictionary) {
-	// Do not replenish from an underpopulated size.
-	if (_indexedFreeList[replenish_size].surplus() > 0 &&
-	    _indexedFreeList[replenish_size].head() != NULL) {
-          newFc = 
-            _indexedFreeList[replenish_size].getChunkAtHead();
-	} else {
-	  newFc = bestFitSmall(replenish_size);
-	}
-      }
-      if (newFc != NULL) {
-	splitDeath(replenish_size);
-      } else if (replenish_size > size) {
-        assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
-        newFc = 
-          getChunkFromIndexedFreeListHelper(replenish_size);
-      }
-      if (newFc != NULL) {
-        assert(newFc->size() == replenish_size, "Got wrong size");
-        size_t i;
-        FreeChunk *curFc, *nextFc;
-        // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
-	// The last chunk is not added to the lists but is returned as the
-	// free chunk.
-        for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), 
-  	     i = 0;
-             i < (CMSIndexedFreeListReplenish - 1);
-             curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), 
-  	     i++) {
-          curFc->setSize(size);
-  	  // Don't record this as a return in order to try and
-  	  // determine the "returns" from a GC.
-          _bt.verify_not_unallocated((HeapWord*) fc, size);
-  	  _indexedFreeList[size].returnChunkAtTail(curFc, false);
-  	  _bt.mark_block((HeapWord*)curFc, size);
-  	  splitBirth(size);
-  	  // Don't record the initial population of the indexed list
-  	  // as a split birth.
-        }
-
-        // check that the arithmetic was OK above
-        assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
-          "inconsistency in carving newFc");
-        curFc->setSize(size);
-  	_bt.mark_block((HeapWord*)curFc, size);
-  	splitBirth(size);
-        return curFc;
-      }
-    }
-  } else {
-    // Get a free chunk from the free chunk dictionary to be returned to
-    // replenish the indexed free list.
-    fc = getChunkFromDictionaryExact(size);
-  }
-  assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
-  return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
-  assert_locked();
-  FreeChunk* fc = _dictionary->getChunk(size);
-  if (fc == NULL) {
-    return NULL;
-  }
-  _bt.allocated((HeapWord*)fc, fc->size());
-  if (fc->size() >= size + MinChunkSize) {
-    fc = splitChunkAndReturnRemainder(fc, size);
-  }
-  assert(fc->size() >= size, "chunk too small");
-  assert(fc->size() < size + MinChunkSize, "chunk too big");
-  _bt.verify_single_block((HeapWord*)fc, fc->size());
-  return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
-  assert_locked();
-  FreeChunk* fc = _dictionary->getChunk(size);
-  if (fc == NULL) {
-    return fc;
-  }
-  _bt.allocated((HeapWord*)fc, fc->size());
-  if (fc->size() == size) {
-    _bt.verify_single_block((HeapWord*)fc, size);
-    return fc;
-  }
-  assert(fc->size() > size, "getChunk() guarantee");
-  if (fc->size() < size + MinChunkSize) {
-    // Return the chunk to the dictionary and go get a bigger one.
-    returnChunkToDictionary(fc);
-    fc = _dictionary->getChunk(size + MinChunkSize); 
-    if (fc == NULL) {
-      return NULL;
-    }
-    _bt.allocated((HeapWord*)fc, fc->size());
-  }
-  assert(fc->size() >= size + MinChunkSize, "tautology");
-  fc = splitChunkAndReturnRemainder(fc, size);
-  assert(fc->size() == size, "chunk is wrong size");
-  _bt.verify_single_block((HeapWord*)fc, size);
-  return fc;
-}
-
-void
-CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
-  assert_locked();
-
-  size_t size = chunk->size();
-  _bt.verify_single_block((HeapWord*)chunk, size);
-  // adjust _unallocated_block downward, as necessary
-  _bt.freed((HeapWord*)chunk, size);
-  _dictionary->returnChunk(chunk);
-}
-
-void
-CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
-  assert_locked();
-  size_t size = fc->size();
-  _bt.verify_single_block((HeapWord*) fc, size);
-  _bt.verify_not_unallocated((HeapWord*) fc, size);
-  if (_adaptive_freelists) {
-    _indexedFreeList[size].returnChunkAtTail(fc);
-  } else {
-    _indexedFreeList[size].returnChunkAtHead(fc);
-  }
-}
-
-// Add chunk to end of last block -- if it's the largest
-// block -- and update BOT and census data. We would
-// of course have preferred to coalesce it with the
-// last block, but it's currently less expensive to find the
-// largest block than it is to find the last.
-void
-CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
-  HeapWord* chunk, size_t     size) {
-  // check that the chunk does lie in this space!
-  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
-  assert_locked();
-  // One of the parallel gc task threads may be here
-  // whilst others are allocating.
-  Mutex* lock = NULL;
-  if (ParallelGCThreads != 0) {
-    lock = &_parDictionaryAllocLock;
-  }
-  FreeChunk* ec;
-  {
-    MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
-    ec = dictionary()->findLargestDict();  // get largest block
-    if (ec != NULL && ec->end() == chunk) {
-      // It's a coterminal block - we can coalesce.
-      size_t old_size = ec->size();
-      coalDeath(old_size);
-      removeChunkFromDictionary(ec);
-      size += old_size;
-    } else {
-      ec = (FreeChunk*)chunk;
-    }
-  }
-  ec->setSize(size);
-  debug_only(ec->mangleFreed(size));
-  if (size < SmallForDictionary) {
-    lock = _indexedFreeListParLocks[size];
-  }
-  MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
-  addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
-  // record the birth under the lock since the recording involves
-  // manipulation of the list on which the chunk lives and
-  // if the chunk is allocated and is the last on the list,
-  // the list can go away.
-  coalBirth(size);
-}
-
-void
-CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
-                                              size_t     size) {
-  // check that the chunk does lie in this space!
-  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
-  assert_locked();
-  _bt.verify_single_block(chunk, size);
-
-  FreeChunk* fc = (FreeChunk*) chunk;
-  fc->setSize(size);
-  debug_only(fc->mangleFreed(size));
-  if (size < SmallForDictionary) {
-    returnChunkToFreeList(fc);
-  } else {
-    returnChunkToDictionary(fc);
-  }
-}
-
-void
-CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
-  size_t size, bool coalesced) {
-  assert_locked();
-  assert(chunk != NULL, "null chunk");
-  if (coalesced) {
-    // repair BOT
-    _bt.single_block(chunk, size);
-  }
-  addChunkToFreeLists(chunk, size);
-}
-
-// We _must_ find the purported chunk on our free lists;
-// we assert if we don't.
-void
-CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
-  size_t size = fc->size();
-  assert_locked();
-  debug_only(verifyFreeLists());
-  if (size < SmallForDictionary) {
-    removeChunkFromIndexedFreeList(fc);
-  } else {
-    removeChunkFromDictionary(fc);
-  }
-  _bt.verify_single_block((HeapWord*)fc, size);
-  debug_only(verifyFreeLists());
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
-  size_t size = fc->size();
-  assert_locked();
-  assert(fc != NULL, "null chunk");
-  _bt.verify_single_block((HeapWord*)fc, size);
-  _dictionary->removeChunk(fc);
-  // adjust _unallocated_block upward, as necessary
-  _bt.allocated((HeapWord*)fc, size);
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
-  assert_locked();
-  size_t size = fc->size();
-  _bt.verify_single_block((HeapWord*)fc, size);
-  NOT_PRODUCT(
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeList(size);
-    }
-  )
-  _indexedFreeList[size].removeChunk(fc);
-  debug_only(fc->clearNext());
-  debug_only(fc->clearPrev());
-  NOT_PRODUCT(
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeList(size);
-    }
-  )
-}
-
-FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
-  /* A hint is the next larger size that has a surplus.
-     Start search at a size large enough to guarantee that
-     the excess is >= MIN_CHUNK. */
-  size_t start = align_object_size(numWords + MinChunkSize);
-  if (start < IndexSetSize) {
-    FreeList* it   = _indexedFreeList;
-    size_t    hint = _indexedFreeList[start].hint();
-    while (hint < IndexSetSize) {
-      assert(hint % MinObjAlignment == 0, "hint should be aligned");
-      FreeList *fl = &_indexedFreeList[hint];
-      if (fl->surplus() > 0 && fl->head() != NULL) {
-        // Found a list with surplus, reset original hint
-        // and split out a free chunk which is returned.
-        _indexedFreeList[start].set_hint(hint);
-	FreeChunk* res = getFromListGreater(fl, numWords);
-	assert(res == NULL || res->isFree(), 
-	  "Should be returning a free chunk");
-        return res;
-      }
-      hint = fl->hint(); /* keep looking */
-    }
-    /* None found. */
-    it[start].set_hint(IndexSetSize);
-  }
-  return NULL;
-}
-
-/* Requires fl->size >= numWords + MinChunkSize */
-FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
-  size_t numWords) {
-  FreeChunk *curr = fl->head();
-  size_t oldNumWords = curr->size();
-  assert(numWords >= MinChunkSize, "Word size is too small");
-  assert(curr != NULL, "List is empty");
-  assert(oldNumWords >= numWords + MinChunkSize, 
-	"Size of chunks in the list is too small");
- 
-  fl->removeChunk(curr);
-  // recorded indirectly by splitChunkAndReturnRemainder - 
-  // smallSplit(oldNumWords, numWords);
-  FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
-  // Does anything have to be done for the remainder in terms of
-  // fixing the card table?
-  assert(new_chunk == NULL || new_chunk->isFree(), 
-    "Should be returning a free chunk");
-  return new_chunk;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
-  size_t new_size) {
-  assert_locked();
-  size_t size = chunk->size();
-  assert(size > new_size, "Split from a smaller block?");
-  assert(is_aligned(chunk), "alignment problem");
-  assert(size == adjustObjectSize(size), "alignment problem");
-  size_t rem_size = size - new_size;
-  assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
-  assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
-  FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
-  assert(is_aligned(ffc), "alignment problem");
-  ffc->setSize(rem_size);
-  ffc->linkNext(NULL);
-  ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
-  // Above must occur before BOT is updated below.
-  // adjust block offset table
-  _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
-  if (rem_size < SmallForDictionary) {
-    bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
-    if (is_par) _indexedFreeListParLocks[rem_size]->lock();
-    returnChunkToFreeList(ffc);
-    split(size, rem_size);
-    if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
-  } else {
-    returnChunkToDictionary(ffc);
-    split(size ,rem_size);
-  }
-  chunk->setSize(new_size);
-  return chunk;
-}
-
-void
-CompactibleFreeListSpace::sweep_completed() {
-  // Now that space is probably plentiful, refill linear
-  // allocation blocks as needed.
-  refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_prologue() {
-  assert_locked();
-  if (PrintFLSStatistics != 0) {
-    gclog_or_tty->print("Before GC:\n");
-    reportFreeListStatistics();
-  }
-  refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_epilogue() {
-  assert_locked();
-  if (PrintGCDetails && Verbose && !_adaptive_freelists) {
-    if (_smallLinearAllocBlock._word_size == 0)
-      warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
-  }
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-  _promoInfo.stopTrackingPromotions();
-  repairLinearAllocationBlocks();
-  // Print Space's stats
-  if (PrintFLSStatistics != 0) {
-    gclog_or_tty->print("After GC:\n");
-    reportFreeListStatistics();
-  }
-}
-
-// Iteration support, mostly delegated from a CMS generation
-
-void CompactibleFreeListSpace::save_marks() {
-  // mark the "end" of the used space at the time of this call;
-  // note, however, that promoted objects from this point
-  // on are tracked in the _promoInfo below.
-  set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? 
-                      unallocated_block() : end());
-  // inform allocator that promotions should be tracked.
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-  _promoInfo.startTrackingPromotions();
-}
-
-bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
-  assert(_promoInfo.tracking(), "No preceding save_marks?");
-  guarantee(SharedHeap::heap()->n_par_threads() == 0,
-	    "Shouldn't be called (yet) during parallel part of gc.");
-  return _promoInfo.noPromotions();
-}
-
-#define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
-                                                                            \
-void CompactibleFreeListSpace::                                             \
-oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
-  assert(SharedHeap::heap()->n_par_threads() == 0,                          \
-         "Shouldn't be called (yet) during parallel part of gc.");          \
-  _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
-  /*                                                                        \
-   * This also restores any displaced headers and removes the elements from \
-   * the iteration set as they are processed, so that we have a clean slate \
-   * at the end of the iteration. Note, thus, that if new objects are       \
-   * promoted as a result of the iteration they are iterated over as well.  \
-   */                                                                       \
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
-}
-
-ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
-
-//////////////////////////////////////////////////////////////////////////////
-// We go over the list of promoted objects, removing each from the list,    
-// and applying the closure (this may, in turn, add more elements to  
-// the tail of the promoted list, and these newly added objects will 
-// also be processed) until the list is empty.                      
-// To aid verification and debugging, in the non-product builds
-// we actually forward _promoHead each time we process a promoted oop.
-// Note that this is not necessary in general (i.e. when we don't need to
-// call PromotionInfo::verify()) because oop_iterate can only add to the
-// end of _promoTail, and never needs to look at _promoHead.
-
-#define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
-                                                                            \
-void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
-  NOT_PRODUCT(verify());                                                    \
-  PromotedObject *curObj, *nextObj;                                         \
-  for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
-    if ((nextObj = curObj->next()) == NULL) {                               \
-      /* protect ourselves against additions due to closure application     \
-         below by resetting the list.  */                                   \
-      assert(_promoTail == curObj, "Should have been the tail");            \
-      _promoHead = _promoTail = NULL;                                       \
-    }                                                                       \
-    if (curObj->hasDisplacedMark()) {                                       \
-      /* restore displaced header */                                        \
-      oop(curObj)->set_mark(nextDisplacedHeader());                         \
-    } else {                                                                \
-      /* restore prototypical header */                                     \
-      oop(curObj)->init_mark();                                             \
-    }                                                                       \
-    /* The "promoted_mark" should now not be set */                         \
-    assert(!curObj->hasPromotedMark(),                                      \
-           "Should have been cleared by restoring displaced mark-word");    \
-    NOT_PRODUCT(_promoHead = nextObj);                                      \
-    if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
-    if (nextObj == NULL) { /* start at head of list reset above */          \
-      nextObj = _promoHead;                                                 \
-    }                                                                       \
-  }                                                                         \
-  assert(noPromotions(), "post-condition violation");                       \
-  assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
-  assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
-  assert(_firstIndex == _nextIndex, "empty buffer");                        \
-}
-
-// This should have been ALL_SINCE_...() just like the others,
-// but, because the body of the method above is somehwat longer,
-// the MSVC compiler cannot cope; as a workaround, we split the
-// macro into its 3 constituent parts below (see original macro
-// definition in specializedOopClosures.hpp).
-SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
-PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
-
-
-void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
-  // ugghh... how would one do this efficiently for a non-contiguous space?
-  guarantee(false, "NYI");
-}
-
-bool CompactibleFreeListSpace::linearAllocationWouldFail() {
-  return _smallLinearAllocBlock._word_size == 0;
-}
-
-void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
-  // Fix up linear allocation blocks to look like free blocks
-  repairLinearAllocBlock(&_smallLinearAllocBlock);
-}
-
-void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
-  assert_locked();
-  if (blk->_ptr != NULL) {
-    assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
-           "Minimum block size requirement");
-    FreeChunk* fc = (FreeChunk*)(blk->_ptr);
-    fc->setSize(blk->_word_size);
-    fc->linkPrev(NULL);   // mark as free
-    fc->dontCoalesce();
-    assert(fc->isFree(), "just marked it free");
-    assert(fc->cantCoalesce(), "just marked it uncoalescable");
-  }
-}
-
-void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
-  assert_locked();
-  if (_smallLinearAllocBlock._ptr == NULL) {
-    assert(_smallLinearAllocBlock._word_size == 0, 
-      "Size of linAB should be zero if the ptr is NULL");
-    // Reset the linAB refill and allocation size limit.
-    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
-  }
-  refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
-  assert_locked();
-  assert((blk->_ptr == NULL && blk->_word_size == 0) ||
-         (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
-         "blk invariant");
-  if (blk->_ptr == NULL) {
-    refillLinearAllocBlock(blk);
-  }
-  if (PrintMiscellaneous && Verbose) {
-    if (blk->_word_size == 0) {
-      warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
-    }
-  }
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
-  assert_locked();
-  assert(blk->_word_size == 0 && blk->_ptr == NULL,
-         "linear allocation block should be empty");
-  FreeChunk* fc;
-  if (blk->_refillSize < SmallForDictionary && 
-      (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
-    // A linAB's strategy might be to use small sizes to reduce
-    // fragmentation but still get the benefits of allocation from a
-    // linAB.
-  } else {
-    fc = getChunkFromDictionary(blk->_refillSize);
-  }
-  if (fc != NULL) {
-    blk->_ptr  = (HeapWord*)fc;
-    blk->_word_size = fc->size();
-    fc->dontCoalesce();   // to prevent sweeper from sweeping us up
-  }
-}
-
-// Support for compaction
-
-void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
-  // prepare_for_compaction() uses the space between live objects
-  // so that later phase can skip dead space quickly.  So verification
-  // of the free lists doesn't work after.
-}
-
-#define obj_size(q) adjustObjectSize(oop(q)->size())
-#define adjust_obj_size(s) adjustObjectSize(s)
-
-void CompactibleFreeListSpace::adjust_pointers() {
-  // In other versions of adjust_pointers(), a bail out
-  // based on the amount of live data in the generation
-  // (i.e., if 0, bail out) may be used.
-  // Cannot test used() == 0 here because the free lists have already
-  // been mangled by the compaction.
-
-  SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
-  // See note about verification in prepare_for_compaction().
-}
-
-void CompactibleFreeListSpace::compact() {
-  SCAN_AND_COMPACT(obj_size);
-}
-
-// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
-// where fbs is free block sizes
-double CompactibleFreeListSpace::flsFrag() const {
-  size_t itabFree = totalSizeInIndexedFreeLists();
-  double frag = 0.0;
-  size_t i;
-
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    double sz  = i;
-    frag      += _indexedFreeList[i].count() * (sz * sz);
-  }
-
-  double totFree = itabFree +
-                   _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
-  if (totFree > 0) {
-    frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / 
-            (totFree * totFree));
-    frag = (double)1.0  - frag;
-  } else {
-    assert(frag == 0.0, "Follows from totFree == 0");
-  }
-  return frag;
-}
-
-#define CoalSurplusPercent 1.05
-#define SplitSurplusPercent 1.10
-
-void CompactibleFreeListSpace::beginSweepFLCensus(
-  float inter_sweep_current,
-  float inter_sweep_estimate) {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    FreeList* fl    = &_indexedFreeList[i];
-    fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
-    fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
-    fl->set_beforeSweep(fl->count());
-    fl->set_bfrSurp(fl->surplus());
-  }
-  _dictionary->beginSweepDictCensus(CoalSurplusPercent,
-                                    inter_sweep_current,
-                                    inter_sweep_estimate);
-}
-
-void CompactibleFreeListSpace::setFLSurplus() {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    FreeList *fl = &_indexedFreeList[i];
-    fl->set_surplus(fl->count() - 
-                    (ssize_t)((double)fl->desired() * SplitSurplusPercent));
-  }
-}
-
-void CompactibleFreeListSpace::setFLHints() {
-  assert_locked();
-  size_t i;
-  size_t h = IndexSetSize;
-  for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
-    FreeList *fl = &_indexedFreeList[i];
-    fl->set_hint(h);
-    if (fl->surplus() > 0) {
-      h = i;
-    }
-  }
-}
-
-void CompactibleFreeListSpace::clearFLCensus() {
-  assert_locked();
-  int i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    FreeList *fl = &_indexedFreeList[i];
-    fl->set_prevSweep(fl->count());
-    fl->set_coalBirths(0);
-    fl->set_coalDeaths(0);
-    fl->set_splitBirths(0);
-    fl->set_splitDeaths(0);  
-  }
-}
-
-void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
-  setFLSurplus();
-  setFLHints();
-  if (PrintGC && PrintFLSCensus > 0) {
-    printFLCensus(sweepCt);
-  }
-  clearFLCensus();
-  assert_locked();
-  _dictionary->endSweepDictCensus(SplitSurplusPercent);
-}
-
-bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
-  if (size < SmallForDictionary) {
-    FreeList *fl = &_indexedFreeList[size];
-    return (fl->coalDesired() < 0) ||
-           ((int)fl->count() > fl->coalDesired());
-  } else {
-    return dictionary()->coalDictOverPopulated(size);
-  }
-}
-
-void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList *fl = &_indexedFreeList[size];
-  fl->increment_coalBirths();
-  fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList *fl = &_indexedFreeList[size];
-  fl->increment_coalDeaths();
-  fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::coalBirth(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallCoalBirth(size);
-  } else {
-    dictionary()->dictCensusUpdate(size, 
-			           false /* split */, 
-				   true /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::coalDeath(size_t size) {
-  if(size  < SmallForDictionary) {
-    smallCoalDeath(size);
-  } else {
-    dictionary()->dictCensusUpdate(size, 
-				   false /* split */, 
-				   false /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList *fl = &_indexedFreeList[size];
-  fl->increment_splitBirths();
-  fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  FreeList *fl = &_indexedFreeList[size];
-  fl->increment_splitDeaths();
-  fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::splitBirth(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallSplitBirth(size);
-  } else {
-    dictionary()->dictCensusUpdate(size, 
-				   true /* split */, 
-				   true /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::splitDeath(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallSplitDeath(size);
-  } else {
-    dictionary()->dictCensusUpdate(size, 
-				   true /* split */, 
-				   false /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::split(size_t from, size_t to1) {
-  size_t to2 = from - to1;
-  splitDeath(from);
-  splitBirth(to1);
-  splitBirth(to2);
-}
-
-
-void CompactibleFreeListSpace::print() const {
-  tty->print(" CompactibleFreeListSpace");
-  Space::print();
-}
-
-void CompactibleFreeListSpace::prepare_for_verify() {
-  assert_locked();
-  repairLinearAllocationBlocks();
-  // Verify that the SpoolBlocks look like free blocks of
-  // appropriate sizes... To be done ...
-}
-
-class VerifyAllBlksClosure: public BlkClosure {
-  const CompactibleFreeListSpace* _sp;
-  const MemRegion                 _span;
-
- public:
-  VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
-    MemRegion span) :  _sp(sp), _span(span) { }
-
-  size_t do_blk(HeapWord* addr) {
-    size_t res;
-    if (_sp->block_is_obj(addr)) {
-      oop p = oop(addr);
-      guarantee(p->is_oop(), "Should be an oop");
-      res = _sp->adjustObjectSize(p->size());
-      if (_sp->obj_is_alive(addr)) {
-        p->verify();
-      }
-    } else {
-      FreeChunk* fc = (FreeChunk*)addr;
-      res = fc->size();
-      if (FLSVerifyLists && !fc->cantCoalesce()) {
-        guarantee(_sp->verifyChunkInFreeLists(fc),
-                  "Chunk should be on a free list");
-      }
-    }
-    guarantee(res != 0, "Livelock: no rank reduction!");
-    return res;
-  }
-};
-
-class VerifyAllOopsClosure: public OopClosure {
-  const CMSCollector*             _collector;
-  const CompactibleFreeListSpace* _sp;
-  const MemRegion                 _span;
-  const bool                      _past_remark;
-  const CMSBitMap*                _bit_map;
-
- public:
-  VerifyAllOopsClosure(const CMSCollector* collector,
-    const CompactibleFreeListSpace* sp, MemRegion span,
-    bool past_remark, CMSBitMap* bit_map) :
-    OopClosure(), _collector(collector), _sp(sp), _span(span),
-    _past_remark(past_remark), _bit_map(bit_map) { }
-
-  void do_oop(oop* ptr) {
-    oop p = *ptr;
-    if (p != NULL) {
-      if (_span.contains(p)) { // the interior oop points into CMS heap
-        if (!_span.contains(ptr)) { // reference from outside CMS heap
-          // Should be a valid object; the first disjunct below allows
-          // us to sidestep an assertion in block_is_obj() that insists
-          // that p be in _sp. Note that several generations (and spaces)
-          // are spanned by _span (CMS heap) above.
-          guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
-                    "Should be an object");
-          guarantee(p->is_oop(), "Should be an oop");
-          p->verify();
-          if (_past_remark) {
-            // Remark has been completed, the object should be marked
-            _bit_map->isMarked((HeapWord*)p);
-          }
-        }
-        else { // reference within CMS heap
-          if (_past_remark) {
-            // Remark has been completed -- so the referent should have
-            // been marked, if referring object is.
-            if (_bit_map->isMarked(_collector->block_start(ptr))) {
-              guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
-            }
-          }
-        }
-      } else if (_sp->is_in_reserved(ptr)) {
-        // the reference is from FLS, and points out of FLS
-        guarantee(p->is_oop(), "Should be an oop");
-        p->verify();
-      }
-    }
-  }
-};
-
-void CompactibleFreeListSpace::verify(bool ignored) const {
-  assert_lock_strong(&_freelistLock);
-  verify_objects_initialized();
-  MemRegion span = _collector->_span;
-  bool past_remark = (_collector->abstract_state() ==
-                      CMSCollector::Sweeping);
-
-  ResourceMark rm;
-  HandleMark  hm;
-
-  // Check integrity of CFL data structures
-  _promoInfo.verify();
-  _dictionary->verify();
-  if (FLSVerifyIndexTable) {
-    verifyIndexedFreeLists();
-  }
-  // Check integrity of all objects and free blocks in space
-  {
-    VerifyAllBlksClosure cl(this, span);
-    ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
-  }
-  // Check that all references in the heap to FLS
-  // are to valid objects in FLS or that references in
-  // FLS are to valid objects elsewhere in the heap
-  if (FLSVerifyAllHeapReferences)
-  {
-    VerifyAllOopsClosure cl(_collector, this, span, past_remark,
-      _collector->markBitMap());
-    CollectedHeap* ch = Universe::heap();
-    ch->oop_iterate(&cl);              // all oops in generations
-    ch->permanent_oop_iterate(&cl);    // all oops in perm gen
-  }
-
-  if (VerifyObjectStartArray) {
-    // Verify the block offset table
-    _bt.verify();
-  }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::verifyFreeLists() const {
-  if (FLSVerifyLists) {
-    _dictionary->verify();
-    verifyIndexedFreeLists();
-  } else {
-    if (FLSVerifyDictionary) {
-      _dictionary->verify();
-    }
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeLists();
-    }
-  }
-}
-#endif
-
-void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
-  size_t i = 0;
-  for (; i < MinChunkSize; i++) {
-    guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
-  }
-  for (; i < IndexSetSize; i++) {
-    verifyIndexedFreeList(i);
-  }
-}
-
-void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
-  guarantee(size % 2 == 0, "Odd slots should be empty");
-  for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL;
-    fc = fc->next()) {
-    guarantee(fc->size() == size, "Size inconsistency");
-    guarantee(fc->isFree(), "!free?");
-    guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
-  }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::checkFreeListConsistency() const {
-  assert(_dictionary->minSize() <= IndexSetSize,
-    "Some sizes can't be allocated without recourse to"
-    " linear allocation buffers");
-  assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
-    "else MIN_TREE_CHUNK_SIZE is wrong");
-  assert((IndexSetStride == 2 && IndexSetStart == 2) ||
-         (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
-  assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
-      "Some for-loops may be incorrectly initialized");
-  assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
-      "For-loops that iterate over IndexSet with stride 2 may be wrong");
-}
-#endif
-
-void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
-  assert_lock_strong(&_freelistLock);
-  ssize_t bfrSurp     = 0;
-  ssize_t surplus     = 0;
-  ssize_t desired     = 0;
-  ssize_t prevSweep   = 0;
-  ssize_t beforeSweep = 0;
-  ssize_t count       = 0;
-  ssize_t coalBirths  = 0;
-  ssize_t coalDeaths  = 0;
-  ssize_t splitBirths = 0;
-  ssize_t splitDeaths = 0;
-  gclog_or_tty->print("end sweep# %d\n", sweepCt);
-  gclog_or_tty->print("%4s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
-             "%7s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
-             "%7s\t"    "\n",
-             "size",    "bfrsurp",   "surplus",   "desired",   "prvSwep",     
-             "bfrSwep", "count",     "cBirths",   "cDeaths",   "sBirths",
-             "sDeaths");
-
-  size_t totalFree = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    const FreeList *fl = &_indexedFreeList[i];                                                       
-	totalFree += fl->count() * fl->size();
-
-    gclog_or_tty->print("%4d\t"          "%7d\t"             "%7d\t"        "%7d\t"
-               "%7d\t"          "%7d\t"             "%7d\t"        "%7d\t"
-               "%7d\t"          "%7d\t"             "%7d\t"        "\n",
-               fl->size(),       fl->bfrSurp(),     fl->surplus(), fl->desired(), 
-	       fl->prevSweep(),  fl->beforeSweep(), fl->count(),   fl->coalBirths(), 
-	       fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
-    bfrSurp     += fl->bfrSurp();
-    surplus     += fl->surplus();
-    desired     += fl->desired();
-    prevSweep   += fl->prevSweep();
-    beforeSweep += fl->beforeSweep();
-    count       += fl->count();
-    coalBirths  += fl->coalBirths();
-    coalDeaths  += fl->coalDeaths();
-    splitBirths += fl->splitBirths();
-    splitDeaths += fl->splitDeaths();
-  }                                                                                             
-  gclog_or_tty->print("%4s\t"
-            "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t"
-            "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t" "\n",
-            "totl",
-            bfrSurp,     surplus,     desired,     prevSweep,     beforeSweep,
-            count,       coalBirths,  coalDeaths,  splitBirths,   splitDeaths);
-  gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
-  gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
-    (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
-	    (prevSweep != 0 ? (double)prevSweep : 1.0),
-    (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
-  _dictionary->printDictCensus();
-}
-
-// Return the next displaced header, incrementing the pointer and
-// recycling spool area as necessary.
-markOop PromotionInfo::nextDisplacedHeader() {
-  assert(_spoolHead != NULL, "promotionInfo inconsistency");
-  assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
-         "Empty spool space: no displaced header can be fetched");
-  assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
-  markOop hdr = _spoolHead->displacedHdr[_firstIndex];
-  // Spool forward
-  if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
-    // forward to next block, recycling this block into spare spool buffer
-    SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
-    assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
-    _spoolHead->nextSpoolBlock = _spareSpool;
-    _spareSpool = _spoolHead;
-    _spoolHead = tmp;
-    _firstIndex = 1;
-    NOT_PRODUCT(
-      if (_spoolHead == NULL) {  // all buffers fully consumed
-        assert(_spoolTail == NULL && _nextIndex == 1,
-               "spool buffers processing inconsistency");
-      }
-    )
-  } 
-  return hdr;
-}
-
-void PromotionInfo::track(PromotedObject* trackOop) {
-  track(trackOop, oop(trackOop)->klass());
-}
-
-void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
-  // make a copy of header as it may need to be spooled
-  markOop mark = oop(trackOop)->mark();
-  trackOop->clearNext();
-  if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
-    // save non-prototypical header, and mark oop
-    saveDisplacedHeader(mark);
-    trackOop->setDisplacedMark();
-  } else {
-    // we'd like to assert something like the following:
-    // assert(mark == markOopDesc::prototype(), "consistency check");
-    // ... but the above won't work because the age bits have not (yet) been
-    // cleared. The remainder of the check would be identical to the
-    // condition checked in must_be_preserved() above, so we don't really
-    // have anything useful to check here!
-  }
-  if (_promoTail != NULL) {
-    assert(_promoHead != NULL, "List consistency");
-    _promoTail->setNext(trackOop);
-    _promoTail = trackOop;
-  } else {
-    assert(_promoHead == NULL, "List consistency");
-    _promoHead = _promoTail = trackOop;
-  }
-  // Mask as newly promoted, so we can skip over such objects
-  // when scanning dirty cards
-  assert(!trackOop->hasPromotedMark(), "Should not have been marked");
-  trackOop->setPromotedMark();
-}
-
-// Save the given displaced header, incrementing the pointer and
-// obtaining more spool area as necessary.
-void PromotionInfo::saveDisplacedHeader(markOop hdr) {
-  assert(_spoolHead != NULL && _spoolTail != NULL,
-         "promotionInfo inconsistency");
-  assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
-  _spoolTail->displacedHdr[_nextIndex] = hdr;
-  // Spool forward
-  if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
-    // get a new spooling block
-    assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
-    _splice_point = _spoolTail;                   // save for splicing
-    _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
-    _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
-    // ... but will attempt filling before next promotion attempt
-    _nextIndex = 1;
-  }
-}
-
-// Ensure that spooling space exists. Return false if spooling space
-// could not be obtained.
-bool PromotionInfo::ensure_spooling_space_work() {
-  assert(!has_spooling_space(), "Only call when there is no spooling space");
-  // Try and obtain more spooling space
-  SpoolBlock* newSpool = getSpoolBlock();
-  assert(newSpool == NULL ||
-         (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
-        "getSpoolBlock() sanity check");
-  if (newSpool == NULL) {
-    return false;
-  }
-  _nextIndex = 1;
-  if (_spoolTail == NULL) {
-    _spoolTail = newSpool;
-    if (_spoolHead == NULL) {
-      _spoolHead = newSpool;
-      _firstIndex = 1;
-    } else {
-      assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
-             "Splice point invariant");
-      // Extra check that _splice_point is connected to list
-      #ifdef ASSERT
-      {
-        SpoolBlock* blk = _spoolHead;
-        for (; blk->nextSpoolBlock != NULL;
-             blk = blk->nextSpoolBlock);
-        assert(blk != NULL && blk == _splice_point,
-               "Splice point incorrect");
-      }
-      #endif // ASSERT
-      _splice_point->nextSpoolBlock = newSpool;
-    }
-  } else {
-    assert(_spoolHead != NULL, "spool list consistency");
-    _spoolTail->nextSpoolBlock = newSpool;
-    _spoolTail = newSpool;
-  }
-  return true;
-}
-
-// Get a free spool buffer from the free pool, getting a new block
-// from the heap if necessary.
-SpoolBlock* PromotionInfo::getSpoolBlock() {
-  SpoolBlock* res;
-  if ((res = _spareSpool) != NULL) {
-    _spareSpool = _spareSpool->nextSpoolBlock;
-    res->nextSpoolBlock = NULL;
-  } else {  // spare spool exhausted, get some from heap
-    res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
-    if (res != NULL) {
-      res->init();
-    }
-  }
-  assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
-  return res;
-}
-
-void PromotionInfo::startTrackingPromotions() {
-  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
-         "spooling inconsistency?");
-  _firstIndex = _nextIndex = 1;
-  _tracking = true;
-}
-
-void PromotionInfo::stopTrackingPromotions() {
-  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
-         "spooling inconsistency?");
-  _firstIndex = _nextIndex = 1;
-  _tracking = false;
-}
-
-// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
-// points to the next slot available for filling.
-// The set of slots holding displaced headers are then all those in the
-// right-open interval denoted by: 
-// 
-//    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
-// 
-// When _spoolTail is NULL, then the set of slots with displaced headers
-// is all those starting at the slot <_spoolHead, _firstIndex> and
-// going up to the last slot of last block in the linked list.
-// In this lartter case, _splice_point points to the tail block of
-// this linked list of blocks holding displaced headers.
-void PromotionInfo::verify() const {
-  // Verify the following:
-  // 1. the number of displaced headers matches the number of promoted
-  //    objects that have displaced headers
-  // 2. each promoted object lies in this space
-  debug_only(
-    PromotedObject* junk = NULL;
-    assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
-           "Offset of PromotedObject::_next is expected to align with "
-           "  the OopDesc::_mark within OopDesc");
-  )
-  // FIXME: guarantee????
-  guarantee(_spoolHead == NULL || _spoolTail != NULL ||
-            _splice_point != NULL, "list consistency");
-  guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
-  // count the number of objects with displaced headers
-  size_t numObjsWithDisplacedHdrs = 0;
-  for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
-    guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
-    // the last promoted object may fail the mark() != NULL test of is_oop().
-    guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
-    if (curObj->hasDisplacedMark()) {
-      numObjsWithDisplacedHdrs++;
-    }
-  }
-  // Count the number of displaced headers
-  size_t numDisplacedHdrs = 0;
-  for (SpoolBlock* curSpool = _spoolHead;
-       curSpool != _spoolTail && curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    // the first entry is just a self-pointer; indices 1 through
-    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
-    guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
-              "first entry of displacedHdr should be self-referential");
-    numDisplacedHdrs += curSpool->bufferSize - 1;
-  }
-  guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
-            "internal consistency");
-  guarantee(_spoolTail != NULL || _nextIndex == 1,
-            "Inconsistency between _spoolTail and _nextIndex");
-  // We overcounted (_firstIndex-1) worth of slots in block
-  // _spoolHead and we undercounted (_nextIndex-1) worth of
-  // slots in block _spoolTail. We make an appropriate
-  // adjustment by subtracting the first and adding the
-  // second:  - (_firstIndex - 1) + (_nextIndex - 1) 
-  numDisplacedHdrs += (_nextIndex - _firstIndex);
-  guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
-}
-
-
-CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
-  _cfls(cfls)
-{
-  _blocks_to_claim = CMSParPromoteBlocksToClaim;
-  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    _indexedFreeList[i].set_size(i);
-  }
-}
-
-HeapWord* CFLS_LAB::alloc(size_t word_sz) {
-  FreeChunk* res;
-  word_sz = _cfls->adjustObjectSize(word_sz);
-  if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
-    // This locking manages sync with other large object allocations.
-    MutexLockerEx x(_cfls->parDictionaryAllocLock(),
-                    Mutex::_no_safepoint_check_flag);
-    res = _cfls->getChunkFromDictionaryExact(word_sz);
-    if (res == NULL) return NULL;
-  } else {
-    FreeList* fl = &_indexedFreeList[word_sz];
-    bool filled = false; //TRAP
-    if (fl->count() == 0) {
-      bool filled = true; //TRAP
-      // Attempt to refill this local free list.
-      _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
-      // If it didn't work, give up.
-      if (fl->count() == 0) return NULL;
-    }
-    res = fl->getChunkAtHead();
-    assert(res != NULL, "Why was count non-zero?");
-  }
-  res->markNotFree();
-  assert(!res->isFree(), "shouldn't be marked free");
-  assert(oop(res)->klass() == NULL, "should look uninitialized");
-  // mangle a just allocated object with a distinct pattern.
-  debug_only(res->mangleAllocated(word_sz));
-  return (HeapWord*)res;
-}
-
-void CFLS_LAB::retire() {
-  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    if (_indexedFreeList[i].count() > 0) {
-      MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
-                      Mutex::_no_safepoint_check_flag);
-      _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
-      // Reset this list.
-      _indexedFreeList[i] = FreeList();
-      _indexedFreeList[i].set_size(i);
-    }
-  }
-}
-
-void
-CompactibleFreeListSpace::
-par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-	 "Precondition");
-
-  // We'll try all multiples of word_sz in the indexed set (starting with
-  // word_sz itself), then try getting a big chunk and splitting it.
-  int k = 1;
-  size_t cur_sz = k * word_sz;
-  bool found = false;
-  while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
-    FreeList* gfl = &_indexedFreeList[cur_sz];
-    FreeList fl_for_cur_sz;  // Empty.
-    fl_for_cur_sz.set_size(cur_sz);
-    {
-      MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
-                      Mutex::_no_safepoint_check_flag);
-      if (gfl->count() != 0) {
-	size_t nn = MAX2(n/k, (size_t)1);
-	gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
-	found = true;
-      }
-    }
-    // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
-    if (found) {
-      if (k == 1) {
-	fl->prepend(&fl_for_cur_sz);
-      } else {
-	// Divide each block on fl_for_cur_sz up k ways.
-	FreeChunk* fc;
-	while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
-	  // Must do this in reverse order, so that anybody attempting to
-	  // access the main chunk sees it as a single free block until we
-	  // change it.
-          size_t fc_size = fc->size();
-	  for (int i = k-1; i >= 0; i--) {
-	    FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
-	    ffc->setSize(word_sz);
-	    ffc->linkNext(NULL);
-	    ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
-            // Above must occur before BOT is updated below.
-            // splitting from the right, fc_size == (k - i + 1) * wordsize
-	    _bt.mark_block((HeapWord*)ffc, word_sz);
-            fc_size -= word_sz;
-            _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
-            _bt.verify_single_block((HeapWord*)fc, fc_size);
-            _bt.verify_single_block((HeapWord*)ffc, ffc->size());
-	    // Push this on "fl".
-	    fl->returnChunkAtHead(ffc);
-	  }
-	  // TRAP
-	  assert(fl->tail()->next() == NULL, "List invariant.");
-	}
-      }
-      return;
-    }
-    k++; cur_sz = k * word_sz;
-  }
-  // Otherwise, we'll split a block from the dictionary.
-  FreeChunk* fc = NULL;
-  FreeChunk* rem_fc = NULL;
-  size_t rem;
-  {
-    MutexLockerEx x(parDictionaryAllocLock(),
-                    Mutex::_no_safepoint_check_flag);
-    while (n > 0) {
-      fc = dictionary()->getChunk(MAX2(n * word_sz, 
-				  _dictionary->minSize()),
-				  FreeBlockDictionary::atLeast);
-      if (fc != NULL) {
-        _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
-        dictionary()->dictCensusUpdate(fc->size(),
-				       true /*split*/,
-				       false /*birth*/);
-        break;
-      } else {
-        n--;
-      }
-    }
-    if (fc == NULL) return;
-    // Otherwise, split up that block.
-    size_t nn = fc->size() / word_sz;
-    n = MIN2(nn, n);
-    rem = fc->size() - n * word_sz;
-    // If there is a remainder, and it's too small, allocate one fewer.
-    if (rem > 0 && rem < MinChunkSize) {
-      n--; rem += word_sz;
-    }
-    // First return the remainder, if any.
-    // Note that we hold the lock until we decide if we're going to give
-    // back the remainder to the dictionary, since a contending allocator
-    // may otherwise see the heap as empty.  (We're willing to take that
-    // hit if the block is a small block.)
-    if (rem > 0) {
-      size_t prefix_size = n * word_sz;
-      rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
-      rem_fc->setSize(rem);
-      rem_fc->linkNext(NULL);
-      rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
-      // Above must occur before BOT is updated below.
-      _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
-      if (rem >= IndexSetSize) {
-	returnChunkToDictionary(rem_fc);
-	dictionary()->dictCensusUpdate(fc->size(),
-				       true /*split*/,
-				       true /*birth*/);
-	rem_fc = NULL;
-      }
-      // Otherwise, return it to the small list below.
-    }
-  }
-  // 
-  if (rem_fc != NULL) {
-    MutexLockerEx x(_indexedFreeListParLocks[rem],
-                    Mutex::_no_safepoint_check_flag);
-    _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
-    _indexedFreeList[rem].returnChunkAtHead(rem_fc);
-    smallSplitBirth(rem);
-  }
-
-  // Now do the splitting up.
-  // Must do this in reverse order, so that anybody attempting to
-  // access the main chunk sees it as a single free block until we
-  // change it.
-  size_t fc_size = n * word_sz;
-  // All but first chunk in this loop
-  for (ssize_t i = n-1; i > 0; i--) {
-    FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
-    ffc->setSize(word_sz);
-    ffc->linkNext(NULL);
-    ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
-    // Above must occur before BOT is updated below.
-    // splitting from the right, fc_size == (n - i + 1) * wordsize
-    _bt.mark_block((HeapWord*)ffc, word_sz);
-    fc_size -= word_sz;
-    _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
-    _bt.verify_single_block((HeapWord*)ffc, ffc->size());
-    _bt.verify_single_block((HeapWord*)fc, fc_size);
-    // Push this on "fl".
-    fl->returnChunkAtHead(ffc);
-  }
-  // First chunk
-  fc->setSize(word_sz);
-  fc->linkNext(NULL);
-  fc->linkPrev(NULL);
-  _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-  _bt.verify_single_block((HeapWord*)fc, fc->size());
-  fl->returnChunkAtHead(fc);
-
-  {
-    MutexLockerEx x(_indexedFreeListParLocks[word_sz],
-                    Mutex::_no_safepoint_check_flag);
-    ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
-    _indexedFreeList[word_sz].set_splitBirths(new_births);
-    ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
-    _indexedFreeList[word_sz].set_surplus(new_surplus);
-  }
-
-  // TRAP
-  assert(fl->tail()->next() == NULL, "List invariant.");
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel rescan. See CMSParRemarkTask where this is currently used.
-// XXX Need to suitably abstract and generalize this and the next
-// method into one.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_rescan(int n_threads) {
-  // The "size" of each task is fixed according to rescan_task_size.
-  assert(n_threads > 0, "Unexpected n_threads argument");
-  const size_t task_size = rescan_task_size();
-  size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
-  assert((used_region().start() + (n_tasks - 1)*task_size <
-          used_region().end()) &&
-         (used_region().start() + n_tasks*task_size >=
-          used_region().end()), "n_task calculation incorrect");
-  SequentialSubTasksDone* pst = conc_par_seq_tasks();
-  assert(!pst->valid(), "Clobbering existing data?");
-  pst->set_par_threads(n_threads);
-  pst->set_n_tasks((int)n_tasks);
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_marking(int n_threads,
-                                           HeapWord* low) {
-  // The "size" of each task is fixed according to rescan_task_size.
-  assert(n_threads > 0, "Unexpected n_threads argument");
-  const size_t task_size = marking_task_size();
-  assert(task_size > CardTableModRefBS::card_size_in_words &&
-         (task_size %  CardTableModRefBS::card_size_in_words == 0),
-         "Otherwise arithmetic below would be incorrect");
-  MemRegion span = _gen->reserved();
-  if (low != NULL) {
-    if (span.contains(low)) {
-      // Align low down to  a card boundary so that
-      // we can use block_offset_careful() on span boundaries.
-      HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
-                                 CardTableModRefBS::card_size);
-      // Clip span prefix at aligned_low
-      span = span.intersection(MemRegion(aligned_low, span.end()));
-    } else if (low > span.end()) {
-      span = MemRegion(low, low);  // Null region
-    } // else use entire span
-  }
-  assert(span.is_empty() || 
-         ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
-        "span should start at a card boundary");
-  size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
-  assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
-  assert(n_tasks == 0 ||
-         ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
-          (span.start() + n_tasks*task_size >= span.end())),
-         "n_task calculation incorrect");
-  SequentialSubTasksDone* pst = conc_par_seq_tasks();
-  assert(!pst->valid(), "Clobbering existing data?");
-  pst->set_par_threads(n_threads);
-  pst->set_n_tasks((int)n_tasks);
-}
-
--- a/hotspot/src/share/vm/memory/compactibleFreeListSpace.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,751 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)compactibleFreeListSpace.hpp	1.91 07/05/05 17:05:45 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// Classes in support of keeping track of promotions into a non-Contiguous
-// space, in this case a CompactibleFreeListSpace.
-
-#define CFLS_LAB_REFILL_STATS 0
-
-// Forward declarations
-class CompactibleFreeListSpace;
-class BlkClosure;
-class BlkClosureCareful;
-class UpwardsObjectClosure;
-class ObjectClosureCareful;
-class Klass;
-
-class PromotedObject VALUE_OBJ_CLASS_SPEC {
- private:
-  enum {
-    promoted_mask  = right_n_bits(2),   // i.e. 0x3
-    displaced_mark = nth_bit(2),        // i.e. 0x4
-    next_mask      = ~(right_n_bits(3)) // i.e. ~(0x7)
-  };
-  intptr_t _next;
- public:
-  inline PromotedObject* next() const {
-    return (PromotedObject*)(_next & next_mask);
-  }
-  inline void setNext(PromotedObject* x) { 
-    assert(((intptr_t)x & ~next_mask) == 0,
-           "Conflict in bit usage, "
-           " or insufficient alignment of objects");
-    _next |= (intptr_t)x;
-  }
-  inline void setPromotedMark() {
-    _next |= promoted_mask;
-  }
-  inline bool hasPromotedMark() const {
-    return (_next & promoted_mask) == promoted_mask;
-  }
-  inline void setDisplacedMark() {
-    _next |= displaced_mark;
-  }
-  inline bool hasDisplacedMark() const {
-    return (_next & displaced_mark) != 0;
-  }
-  inline void clearNext()        { _next = 0; }
-  debug_only(void *next_addr() { return (void *) &_next; })
-};
-
-class SpoolBlock: public FreeChunk {
-  friend class PromotionInfo;
- protected:
-  SpoolBlock*  nextSpoolBlock;
-  size_t       bufferSize;        // number of usable words in this block
-  markOop*     displacedHdr;      // the displaced headers start here
-
-  // Note about bufferSize: it denotes the number of entries available plus 1;
-  // legal indices range from 1 through BufferSize - 1.  See the verification
-  // code verify() that counts the number of displaced headers spooled.
-  size_t computeBufferSize() {
-    return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
-  }
-
- public:
-  void init() {
-    bufferSize = computeBufferSize();
-    displacedHdr = (markOop*)&displacedHdr;
-    nextSpoolBlock = NULL;
-  }
-};
-
-class PromotionInfo VALUE_OBJ_CLASS_SPEC {
-  bool            _tracking;      // set if tracking
-  CompactibleFreeListSpace* _space; // the space to which this belongs
-  PromotedObject* _promoHead;     // head of list of promoted objects
-  PromotedObject* _promoTail;     // tail of list of promoted objects
-  SpoolBlock*     _spoolHead;     // first spooling block
-  SpoolBlock*     _spoolTail;     // last  non-full spooling block or null
-  SpoolBlock*     _splice_point;  // when _spoolTail is null, holds list tail
-  SpoolBlock*     _spareSpool;    // free spool buffer
-  size_t          _firstIndex;    // first active index in
-                                  // first spooling block (_spoolHead)
-  size_t          _nextIndex;     // last active index + 1 in last
-                                  // spooling block (_spoolTail)
- private:
-  // ensure that spooling space exists; return true if there is spooling space
-  bool ensure_spooling_space_work();
-
- public:
-  PromotionInfo() :
-    _tracking(0), _space(NULL),
-    _promoHead(NULL), _promoTail(NULL),
-    _spoolHead(NULL), _spoolTail(NULL),
-    _spareSpool(NULL), _firstIndex(1),
-    _nextIndex(1) {}
-
-  bool noPromotions() const {
-    assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
-    return _promoHead == NULL;
-  }
-  void startTrackingPromotions();
-  void stopTrackingPromotions();
-  bool tracking() const          { return _tracking;  }
-  void track(PromotedObject* trackOop);      // keep track of a promoted oop
-  // The following variant must be used when trackOop is not fully
-  // initialized and has a NULL klass:
-  void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop
-  void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
-  CompactibleFreeListSpace* space() const     { return _space; }
-  markOop nextDisplacedHeader(); // get next header & forward spool pointer
-  void    saveDisplacedHeader(markOop hdr);
-                                 // save header and forward spool
-
-  inline size_t refillSize() const;
-
-  SpoolBlock* getSpoolBlock();   // return a free spooling block
-  inline bool has_spooling_space() {
-    return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
-  }
-  // ensure that spooling space exists
-  bool ensure_spooling_space() {
-    return has_spooling_space() || ensure_spooling_space_work();
-  }
-  #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix)  \
-    void promoted_oops_iterate##nv_suffix(OopClosureType* cl);
-  ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL)
-  #undef PROMOTED_OOPS_ITERATE_DECL
-  void promoted_oops_iterate(OopsInGenClosure* cl) {
-    promoted_oops_iterate_v(cl);
-  }
-  void verify()  const;
-  void reset() {
-    _promoHead = NULL;
-    _promoTail = NULL;
-    _spoolHead = NULL; 
-    _spoolTail = NULL;
-    _spareSpool = NULL;
-    _firstIndex = 0;
-    _nextIndex = 0;
-
-  }
-};
-
-class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
- public:
-  LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 
-    _allocation_size_limit(0) {}
-  void set(HeapWord* ptr, size_t word_size, size_t refill_size, 
-    size_t allocation_size_limit) {
-    _ptr = ptr;
-    _word_size = word_size;
-    _refillSize = refill_size;
-    _allocation_size_limit = allocation_size_limit; 
-  }
-  HeapWord* _ptr;
-  size_t    _word_size;
-  size_t    _refillSize;
-  size_t    _allocation_size_limit;  // largest size that will be allocated
-};
-
-// Concrete subclass of CompactibleSpace that implements
-// a free list space, such as used in the concurrent mark sweep
-// generation.
-
-class CompactibleFreeListSpace: public CompactibleSpace {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepGeneration;
-  friend class ASConcurrentMarkSweepGeneration;
-  friend class CMSCollector;
-  friend class CMSPermGenGen;
-  // Local alloc buffer for promotion into this space.
-  friend class CFLS_LAB;   
-
-  // "Size" of chunks of work (executed during parallel remark phases
-  // of CMS collection); this probably belongs in CMSCollector, although
-  // it's cached here because it's used in
-  // initialize_sequential_subtasks_for_rescan() which modifies
-  // par_seq_tasks which also lives in Space. XXX
-  const size_t _rescan_task_size;
-  const size_t _marking_task_size;
-
-  // Yet another sequential tasks done structure. This supports
-  // CMS GC, where we have threads dynamically
-  // claiming sub-tasks from a larger parallel task.
-  SequentialSubTasksDone _conc_par_seq_tasks;
-
-  BlockOffsetArrayNonContigSpace _bt;
-
-  CMSCollector* _collector;
-  ConcurrentMarkSweepGeneration* _gen;
-
-  // Data structures for free blocks (used during allocation/sweeping)
-
-  // Allocation is done linearly from two different blocks depending on
-  // whether the request is small or large, in an effort to reduce
-  // fragmentation. We assume that any locking for allocation is done
-  // by the containing generation. Thus, none of the methods in this
-  // space are re-entrant.
-  enum SomeConstants {
-    SmallForLinearAlloc = 16,        // size < this then use _sLAB
-    SmallForDictionary  = 257,       // size < this then use _indexedFreeList
-    IndexSetSize        = SmallForDictionary,  // keep this odd-sized
-    IndexSetStart       = MinObjAlignment,
-    IndexSetStride      = MinObjAlignment
-  };
-
- private:
-  enum FitStrategyOptions {
-    FreeBlockStrategyNone = 0,
-    FreeBlockBestFitFirst
-  };
-
-  PromotionInfo _promoInfo;
-
-  // helps to impose a global total order on freelistLock ranks;
-  // assumes that CFLSpace's are allocated in global total order
-  static int   _lockRank;
-
-  // a lock protecting the free lists and free blocks;
-  // mutable because of ubiquity of locking even for otherwise const methods
-  mutable Mutex _freelistLock; 
-  // locking verifier convenience function
-  void assert_locked() const PRODUCT_RETURN;
-
-  // Linear allocation blocks
-  LinearAllocBlock _smallLinearAllocBlock;
-
-  FreeBlockDictionary::DictionaryChoice _dictionaryChoice;
-  FreeBlockDictionary* _dictionary;    // ptr to dictionary for large size blocks
-
-  FreeList _indexedFreeList[IndexSetSize];
-                                       // indexed array for small size blocks
-  // allocation stategy
-  bool       _fitStrategy;      // Use best fit strategy.
-  bool	     _adaptive_freelists; // Use adaptive freelists
-
-  // This is an address close to the largest free chunk in the heap.
-  // It is currently assumed to be at the end of the heap.  Free
-  // chunks with addresses greater than nearLargestChunk are coalesced
-  // in an effort to maintain a large chunk at the end of the heap.
-  HeapWord*  _nearLargestChunk;
-
-  // Used to keep track of limit of sweep for the space
-  HeapWord* _sweep_limit;
-
-  // Support for compacting cms
-  HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
-  HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
-
-  // Initialization helpers.
-  void initializeIndexedFreeListArray();
-
-  // Extra stuff to manage promotion parallelism.
-
-  // a lock protecting the dictionary during par promotion allocation.
-  mutable Mutex _parDictionaryAllocLock;
-  Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
-
-  // Locks protecting the exact lists during par promotion allocation.
-  Mutex* _indexedFreeListParLocks[IndexSetSize];
-
-#if CFLS_LAB_REFILL_STATS
-  // Some statistics.
-  jint  _par_get_chunk_from_small;
-  jint  _par_get_chunk_from_large;
-#endif
-
-
-  // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
-  // required to be smaller than "IndexSetSize".)  If successful,
-  // adds them to "fl", which is required to be an empty free list.
-  // If the count of "fl" is negative, it's absolute value indicates a
-  // number of free chunks that had been previously "borrowed" from global
-  // list of size "word_sz", and must now be decremented.
-  void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl);
-
-  // Allocation helper functions
-  // Allocate using a strategy that takes from the indexed free lists
-  // first.  This allocation strategy assumes a companion sweeping
-  // strategy that attempts to keep the needed number of chunks in each
-  // indexed free lists.
-  HeapWord* allocate_adaptive_freelists(size_t size);
-  // Allocate from the linear allocation buffers first.  This allocation
-  // strategy assumes maximal coalescing can maintain chunks large enough
-  // to be used as linear allocation buffers.
-  HeapWord* allocate_non_adaptive_freelists(size_t size);
-
-  // Gets a chunk from the linear allocation block (LinAB).  If there 
-  // is not enough space in the LinAB, refills it.
-  HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
-  HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
-  // Get a chunk from the space remaining in the linear allocation block.  Do
-  // not attempt to refill if the space is not available, return NULL.  Do the
-  // repairs on the linear allocation block as appropriate.
-  HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
-  inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
-
-  // Helper function for getChunkFromIndexedFreeList.
-  // Replenish the indexed free list for this "size".  Do not take from an
-  // underpopulated size.
-  FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size);
-
-  // Get a chunk from the indexed free list.  If the indexed free list
-  // does not have a free chunk, try to replenish the indexed free list
-  // then get the free chunk from the replenished indexed free list.
-  inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
-
-  // The returned chunk may be larger than requested (or null).
-  FreeChunk* getChunkFromDictionary(size_t size);
-  // The returned chunk is the exact size requested (or null).
-  FreeChunk* getChunkFromDictionaryExact(size_t size);
-
-  // Find a chunk in the indexed free list that is the best
-  // fit for size "numWords".
-  FreeChunk* bestFitSmall(size_t numWords);
-  // For free list "fl" of chunks of size > numWords, 
-  // remove a chunk, split off a chunk of size numWords
-  // and return it.  The split off remainder is returned to
-  // the free lists.  The old name for getFromListGreater
-  // was lookInListGreater.
-  FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
-  // Get a chunk in the indexed free list or dictionary, 
-  // by considering a larger chunk and splitting it.
-  FreeChunk* getChunkFromGreater(size_t numWords);
-  //  Verify that the given chunk is in the indexed free lists.
-  bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
-  // Remove the specified chunk from the indexed free lists.
-  void       removeChunkFromIndexedFreeList(FreeChunk* fc);
-  // Remove the specified chunk from the dictionary.
-  void       removeChunkFromDictionary(FreeChunk* fc);
-  // Split a free chunk into a smaller free chunk of size "new_size".
-  // Return the smaller free chunk and return the remainder to the 
-  // free lists.
-  FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
-  // Add a chunk to the free lists.
-  void       addChunkToFreeLists(HeapWord* chunk, size_t size);
-  // Add a chunk to the free lists, preferring to suffix it
-  // to the last free chunk at end of space if possible, and
-  // updating the block census stats as well as block offset table.
-  // Take any locks as appropriate if we are multithreaded.
-  void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
-  // Add a free chunk to the indexed free lists.
-  void       returnChunkToFreeList(FreeChunk* chunk);
-  // Add a free chunk to the dictionary.
-  void       returnChunkToDictionary(FreeChunk* chunk);
-
-  // Functions for maintaining the linear allocation buffers (LinAB).
-  // Repairing a linear allocation block refers to operations
-  // performed on the remainder of a LinAB after an allocation
-  // has been made from it.
-  void       repairLinearAllocationBlocks();
-  void       repairLinearAllocBlock(LinearAllocBlock* blk);
-  void       refillLinearAllocBlock(LinearAllocBlock* blk);
-  void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
-  void       refillLinearAllocBlocksIfNeeded();
-
-  void       verify_objects_initialized() const;
-  
-  // Statistics reporting helper functions
-  void       reportFreeListStatistics() const;
-  void       reportIndexedFreeListStatistics() const;
-  size_t     maxChunkSizeInIndexedFreeLists() const;
-  size_t     numFreeBlocksInIndexedFreeLists() const;
-  // Accessor
-  HeapWord* unallocated_block() const {
-    HeapWord* ub = _bt.unallocated_block();
-    assert(ub >= bottom() &&
-           ub <= end(), "space invariant");
-    return ub;
-  }
-  void freed(HeapWord* start, size_t size) {
-    _bt.freed(start, size);
-  }
-
- protected:
-  // reset the indexed free list to its initial empty condition.
-  void resetIndexedFreeListArray();
-  // reset to an initial state with a single free block described
-  // by the MemRegion parameter.
-  void reset(MemRegion mr);
-  // Return the total number of words in the indexed free lists.
-  size_t     totalSizeInIndexedFreeLists() const;
-
- public:
-  // Constructor...
-  CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
-			   bool use_adaptive_freelists,
-                           FreeBlockDictionary::DictionaryChoice);
-  // accessors
-  bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
-  FreeBlockDictionary* dictionary() const { return _dictionary; }
-  HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
-  void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
-
-  // Return the free chunk at the end of the space.  If no such
-  // chunk exists, return NULL.
-  FreeChunk* find_chunk_at_end();
-
-  bool adaptive_freelists() { return _adaptive_freelists; }
-
-  void set_collector(CMSCollector* collector) { _collector = collector; }
-
-  // Support for parallelization of rescan and marking
-  const size_t rescan_task_size()  const { return _rescan_task_size;  }
-  const size_t marking_task_size() const { return _marking_task_size; }
-  SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
-  void initialize_sequential_subtasks_for_rescan(int n_threads);
-  void initialize_sequential_subtasks_for_marking(int n_threads,
-         HeapWord* low = NULL);
-
-#if CFLS_LAB_REFILL_STATS
-  void print_par_alloc_stats();
-#endif
-
-  // Space enquiries
-  size_t used() const;
-  size_t free() const;
-  size_t max_alloc_in_words() const;
-  // XXX: should have a less conservative used_region() than that of
-  // Space; we could consider keeping track of highest allocated
-  // address and correcting that at each sweep, as the sweeper
-  // goes through the entire allocated part of the generation. We
-  // could also use that information to keep the sweeper from
-  // sweeping more than is necessary. The allocator and sweeper will
-  // of course need to synchronize on this, since the sweeper will
-  // try to bump down the address and the allocator will try to bump it up.
-  // For now, however, we'll just use the default used_region()
-  // which overestimates the region by returning the entire
-  // committed region (this is safe, but inefficient).
-
-  // Returns a subregion of the space containing all the objects in
-  // the space.
-  MemRegion used_region() const {
-    return MemRegion(bottom(),
-                     BlockOffsetArrayUseUnallocatedBlock ?
-                     unallocated_block() : end());
-  }
-
-  // This is needed because the default implementation uses block_start()
-  // which can;t be used at certain times (for example phase 3 of mark-sweep).
-  // A better fix is to change the assertions in phase 3 of mark-sweep to
-  // use is_in_reserved(), but that is deferred since the is_in() assertions
-  // are buried through several layers of callers and are used elsewhere
-  // as well.
-  bool is_in(const void* p) const {
-    return used_region().contains(p);
-  }
-    
-  virtual bool is_free_block(const HeapWord* p) const;
-
-  // Resizing support
-  void set_end(HeapWord* value);  // override
-
-  // mutual exclusion support
-  Mutex* freelistLock() const { return &_freelistLock; }
-
-  // Iteration support
-  void oop_iterate(MemRegion mr, OopClosure* cl);
-  void oop_iterate(OopClosure* cl);
-
-  void object_iterate(ObjectClosure* blk);
-  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-
-  // Requires that "mr" be entirely within the space.
-  // Apply "cl->do_object" to all objects that intersect with "mr".
-  // If the iteration encounters an unparseable portion of the region,
-  // terminate the iteration and return the address of the start of the
-  // subregion that isn't done.  Return of "NULL" indicates that the
-  // interation completed.
-  virtual HeapWord*
-       object_iterate_careful_m(MemRegion mr,
-                                ObjectClosureCareful* cl);
-  virtual HeapWord*
-       object_iterate_careful(ObjectClosureCareful* cl);
-
-  // Override: provides a DCTO_CL specific to this kind of space.
-  DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
-				     CardTableModRefBS::PrecisionStyle precision,
-				     HeapWord* boundary);
-
-  void blk_iterate(BlkClosure* cl);
-  void blk_iterate_careful(BlkClosureCareful* cl);
-  HeapWord* block_start(const void* p) const;
-  HeapWord* block_start_careful(const void* p) const;
-  size_t block_size(const HeapWord* p) const;
-  size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
-  bool block_is_obj(const HeapWord* p) const;
-  bool obj_is_alive(const HeapWord* p) const;
-  size_t block_size_nopar(const HeapWord* p) const;
-  bool block_is_obj_nopar(const HeapWord* p) const;
-
-  // iteration support for promotion
-  void save_marks();
-  bool no_allocs_since_save_marks();
-  void object_iterate_since_last_GC(ObjectClosure* cl);
-
-  // iteration support for sweeping
-  void save_sweep_limit() {
-    _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
-                   unallocated_block() : end();
-  }
-  NOT_PRODUCT(
-    void clear_sweep_limit() { _sweep_limit = NULL; }
-  )
-  HeapWord* sweep_limit() { return _sweep_limit; }
-
-  // Apply "blk->do_oop" to the addresses of all reference fields in objects
-  // promoted into this generation since the most recent save_marks() call.
-  // Fields in objects allocated by applications of the closure
-  // *are* included in the iteration. Thus, when the iteration completes
-  // there should be no further such objects remaining.
-  #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
-    void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
-  ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
-  #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
-
-  // Allocation support
-  HeapWord* allocate(size_t size);
-  HeapWord* par_allocate(size_t size);
-
-  oop       promote(oop obj, size_t obj_size, oop* ref);
-  void      gc_prologue();
-  void      gc_epilogue();
-
-  // This call is used by a containing CMS generation / collector
-  // to inform the CFLS space that a sweep has been completed
-  // and that the space can do any related house-keeping functions.
-  void      sweep_completed();
-
-  // For an object in this space, the mark-word's two
-  // LSB's having the value [11] indicates that it has been
-  // promoted since the most recent call to save_marks() on
-  // this generation and has not subsequently been iterated
-  // over (using oop_since_save_marks_iterate() above).
-  bool obj_allocated_since_save_marks(const oop obj) const {
-    assert(is_in_reserved(obj), "Wrong space?");
-    return ((PromotedObject*)obj)->hasPromotedMark();
-  }
-
-  // A worst-case estimate of the space required (in HeapWords) to expand the
-  // heap when promoting an obj of size obj_size.
-  size_t expansionSpaceRequired(size_t obj_size) const;
-
-  FreeChunk* allocateScratch(size_t size);
-
-  // returns true if either the small or large linear allocation buffer is empty.
-  bool       linearAllocationWouldFail();
-
-  // Adjust the chunk for the minimum size.  This version is called in
-  // most cases in CompactibleFreeListSpace methods.
-  inline static size_t adjustObjectSize(size_t size) {
-    return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
-  }
-  // This is a virtual version of adjustObjectSize() that is called
-  // only occasionally when the compaction space changes and the type
-  // of the new compaction space is is only known to be CompactibleSpace.
-  size_t adjust_object_size_v(size_t size) const {
-    return adjustObjectSize(size);
-  }
-  // Minimum size of a free block.
-  virtual size_t minimum_free_block_size() const { return MinChunkSize; }
-  void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
-  void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
-              bool coalesced);
-
-  // Support for compaction
-  void prepare_for_compaction(CompactPoint* cp);
-  void adjust_pointers();
-  void compact();
-  // reset the space to reflect the fact that a compaction of the
-  // space has been done.
-  virtual void reset_after_compaction();
-
-  // Debugging support
-  void print()                            const;
-  void prepare_for_verify();
-  void verify(bool allow_dirty)           const;
-  void verifyFreeLists()                  const PRODUCT_RETURN;
-  void verifyIndexedFreeLists()           const;
-  void verifyIndexedFreeList(size_t size) const;
-  // verify that the given chunk is in the free lists.
-  bool verifyChunkInFreeLists(FreeChunk* fc) const;
-  // Do some basic checks on the the free lists.
-  void checkFreeListConsistency()	  const PRODUCT_RETURN;
-
-  NOT_PRODUCT (
-    void initializeIndexedFreeListArrayReturnedBytes();
-    size_t sumIndexedFreeListArrayReturnedBytes();
-    // Return the total number of chunks in the indexed free lists.
-    size_t totalCountInIndexedFreeLists() const;
-    // Return the total numberof chunks in the space.
-    size_t totalCount();
-  )
-
-  // The census consists of counts of the quantities such as
-  // the current count of the free chunks, number of chunks
-  // created as a result of the split of a larger chunk or
-  // coalescing of smaller chucks, etc.  The counts in the 
-  // census is used to make decisions on splitting and
-  // coalescing of chunks during the sweep of garbage.
-
-  // Print the statistics for the free lists.
-  void printFLCensus(int sweepCt)	  const;
-
-  // Statistics functions
-  // Initialize census for lists before the sweep.
-  void beginSweepFLCensus(float sweep_current,
-                          float sweep_estimate);
-  // Set the surplus for each of the free lists.
-  void setFLSurplus();
-  // Set the hint for each of the free lists.
-  void setFLHints();
-  // Clear the census for each of the free lists.
-  void clearFLCensus();
-  // Perform functions for the census after the end of the sweep.
-  void endSweepFLCensus(int sweepCt);
-  // Return true if the count of free chunks is greater
-  // than the desired number of free chunks.
-  bool coalOverPopulated(size_t size);
-
-
-// Record (for each size):
-// 
-//   split-births = #chunks added due to splits in (prev-sweep-end, 
-// 	this-sweep-start)
-//   split-deaths = #chunks removed for splits in (prev-sweep-end, 
-// 	this-sweep-start)
-//   num-curr     = #chunks at start of this sweep
-//   num-prev     = #chunks at end of previous sweep
-// 
-// The above are quantities that are measured. Now define:
-// 
-//   num-desired := num-prev + split-births - split-deaths - num-curr
-// 
-// Roughly, num-prev + split-births is the supply,
-// split-deaths is demand due to other sizes
-// and num-curr is what we have left.
-// 
-// Thus, num-desired is roughly speaking the "legitimate demand"
-// for blocks of this size and what we are striving to reach at the
-// end of the current sweep.
-// 
-// For a given list, let num-len be its current population.
-// Define, for a free list of a given size:
-// 
-//   coal-overpopulated := num-len >= num-desired * coal-surplus
-// (coal-surplus is set to 1.05, i.e. we allow a little slop when
-// coalescing -- we do not coalesce unless we think that the current
-// supply has exceeded the estimated demand by more than 5%).
-// 
-// For the set of sizes in the binary tree, which is neither dense nor
-// closed, it may be the case that for a particular size we have never
-// had, or do not now have, or did not have at the previous sweep,
-// chunks of that size. We need to extend the definition of
-// coal-overpopulated to such sizes as well:
-// 
-//   For a chunk in/not in the binary tree, extend coal-overpopulated
-//   defined above to include all sizes as follows:
-// 
-//   . a size that is non-existent is coal-overpopulated
-//   . a size that has a num-desired <= 0 as defined above is
-//     coal-overpopulated.  
-// 
-// Also define, for a chunk heap-offset C and mountain heap-offset M:
-// 
-//   close-to-mountain := C >= 0.99 * M
-// 
-// Now, the coalescing strategy is:
-// 
-//    Coalesce left-hand chunk with right-hand chunk if and
-//    only if:
-// 
-//      EITHER
-//        . left-hand chunk is of a size that is coal-overpopulated
-//      OR
-//        . right-hand chunk is close-to-mountain
-  void smallCoalBirth(size_t size);
-  void smallCoalDeath(size_t size);
-  void coalBirth(size_t size);
-  void coalDeath(size_t size);
-  void smallSplitBirth(size_t size);
-  void smallSplitDeath(size_t size);
-  void splitBirth(size_t size);
-  void splitDeath(size_t size);
-  void split(size_t from, size_t to1);
-
-  double flsFrag() const;
-};
-
-// A parallel-GC-thread-local allocation buffer for allocation into a
-// CompactibleFreeListSpace.
-class CFLS_LAB : public CHeapObj {
-  // The space that this buffer allocates into.
-  CompactibleFreeListSpace* _cfls;
-
-  // Our local free lists.
-  FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
-
-  // Initialized from a command-line arg.
-  size_t _blocks_to_claim;
-
-#if CFLS_LAB_REFILL_STATS
-  // Some statistics.
-  int _refills;
-  int _blocksTaken;
-  static int _tot_refills;
-  static int _tot_blocksTaken;
-  static int _next_threshold;
-#endif
-
-public:
-  CFLS_LAB(CompactibleFreeListSpace* cfls);
-
-  // Allocate and return a block of the given size, or else return NULL.
-  HeapWord* alloc(size_t word_sz);
-
-  // Return any unused portions of the buffer to the global pool.
-  void retire();
-};
-
-size_t PromotionInfo::refillSize() const {
-  const size_t CMSSpoolBlockSize = 256;
-  const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
-                                   * CMSSpoolBlockSize);
-  return CompactibleFreeListSpace::adjustObjectSize(sz);
-}
--- a/hotspot/src/share/vm/memory/concurrentMarkSweepGeneration.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8664 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)concurrentMarkSweepGeneration.cpp	1.285 07/05/05 17:05:48 JVM"
-#endif
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_concurrentMarkSweepGeneration.cpp.incl"
-
-// statics
-CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
-bool          CMSCollector::_full_gc_requested          = false;
-
-//////////////////////////////////////////////////////////////////
-// In support of CMS/VM thread synchronization
-//////////////////////////////////////////////////////////////////
-// We split use of the CGC_lock into 2 "levels".
-// The low-level locking is of the usual CGC_lock monitor. We introduce
-// a higher level "token" (hereafter "CMS token") built on top of the
-// low level monitor (hereafter "CGC lock").
-// The token-passing protocol gives priority to the VM thread. The
-// CMS-lock doesn't provide any fairness guarantees, but clients
-// should ensure that it is only held for very short, bounded
-// durations.
-// 
-// When either of the CMS thread or the VM thread is involved in
-// collection operations during which it does not want the other
-// thread to interfere, it obtains the CMS token.
-// 
-// If either thread tries to get the token while the other has
-// it, that thread waits. However, if the VM thread and CMS thread
-// both want the token, then the VM thread gets priority while the
-// CMS thread waits. This ensures, for instance, that the "concurrent"
-// phases of the CMS thread's work do not block out the VM thread
-// for long periods of time as the CMS thread continues to hog
-// the token. (See bug 4616232).
-// 
-// The baton-passing functions are, however, controlled by the
-// flags _foregroundGCShouldWait and _foregroundGCIsActive,
-// and here the low-level CMS lock, not the high level token,
-// ensures mutual exclusion.
-// 
-// Two important conditions that we have to satisfy:
-// 1. if a thread does a low-level wait on the CMS lock, then it
-//    relinquishes the CMS token if it were holding that token
-//    when it acquired the low-level CMS lock.
-// 2. any low-level notifications on the low-level lock
-//    should only be sent when a thread has relinquished the token.
-// 
-// In the absence of either property, we'd have potential deadlock.
-// 
-// We protect each of the CMS (concurrent and sequential) phases
-// with the CMS _token_, not the CMS _lock_.
-// 
-// The only code protected by CMS lock is the token acquisition code
-// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
-// baton-passing code.
-// 
-// Unfortunately, i couldn't come up with a good abstraction to factor and
-// hide the naked CGC_lock manipulation in the baton-passing code
-// further below. That's something we should try to do. Also, the proof
-// of correctness of this 2-level locking scheme is far from obvious,
-// and potentially quite slippery. We have an uneasy supsicion, for instance,
-// that there may be a theoretical possibility of delay/starvation in the
-// low-level lock/wait/notify scheme used for the baton-passing because of
-// potential intereference with the priority scheme embodied in the
-// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
-// invocation further below and marked with "XXX 20011219YSR".
-// Indeed, as we note elsewhere, this may become yet more slippery
-// in the presence of multiple CMS and/or multiple VM threads. XXX
-
-class CMSTokenSync: public StackObj {
- private:
-  bool _is_cms_thread;
- public:
-  CMSTokenSync(bool is_cms_thread):
-    _is_cms_thread(is_cms_thread) {
-    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
-           "Incorrect argument to constructor");
-    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
-  }
-
-  ~CMSTokenSync() {
-    assert(_is_cms_thread ?
-             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
-             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-          "Incorrect state");
-    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
-  }
-};
-
-// Convenience class that does a CMSTokenSync, and then acquires
-// upto three locks.
-class CMSTokenSyncWithLocks: public CMSTokenSync {
- private:
-  // Note: locks are acquired in textual declaration order
-  // and released in the opposite order
-  MutexLockerEx _locker1, _locker2, _locker3;
- public:
-  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
-                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
-    CMSTokenSync(is_cms_thread),
-    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
-    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
-    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
-  { }
-};
-
-
-// Wrapper class to temporarily disable icms during a foreground cms collection.
-class ICMSDisabler: public StackObj {
- public:
-  // The ctor disables icms and wakes up the thread so it notices the change;
-  // the dtor re-enables icms.  Note that the CMSCollector methods will check
-  // CMSIncrementalMode.
-  ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
-  ~ICMSDisabler() { CMSCollector::enable_icms(); }
-};
-
-//////////////////////////////////////////////////////////////////
-//  Concurrent Mark-Sweep Generation /////////////////////////////
-//////////////////////////////////////////////////////////////////
-
-NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
-
-// This struct contains per-thread things necessary to support parallel
-// young-gen collection.
-class CMSParGCThreadState: public CHeapObj {
- public:
-  CFLS_LAB lab;
-  PromotionInfo promo;
-
-  // Constructor.
-  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
-    promo.setSpace(cfls);
-  }
-};
-
-ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
-     ReservedSpace rs, size_t initial_byte_size, int level,
-     CardTableRS* ct, bool use_adaptive_freelists,
-     FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
-  CardGeneration(rs, initial_byte_size, level, ct),
-  _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
-  _debug_collection_type(Concurrent_collection_type)
-{
-  HeapWord* bottom = (HeapWord*) _virtual_space.low();
-  HeapWord* end    = (HeapWord*) _virtual_space.high();
-
-  _direct_allocated_words = 0;
-  NOT_PRODUCT(
-    _numObjectsPromoted = 0;
-    _numWordsPromoted = 0;
-    _numObjectsAllocated = 0;
-    _numWordsAllocated = 0;
-  )
-
-#ifdef JVMPI_SUPPORT
-  /*
-   * if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
-   *   jvmpi::post_arena_new_event(Universe::heap()->addr_to_arena_id(bottom),
-   *                               name());
-   * }
-   */
-#endif // JVMPI_SUPPORT
-
-  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
-                                           use_adaptive_freelists,
-					   dictionaryChoice);
-  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
-  if (_cmsSpace == NULL) {
-    vm_exit_during_initialization(
-      "CompactibleFreeListSpace allocation failure");
-  }
-  _cmsSpace->_gen = this;
-
-  _gc_stats = new CMSGCStats();
-
-  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
-  // offsets match. The ability to tell free chunks from objects
-  // depends on this property.
-  debug_only(
-    FreeChunk* junk = NULL;
-    assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
-           "Offset of FreeChunk::_prev within FreeChunk must match"
-           "  that of OopDesc::_klass within OopDesc");
-  )
-  if (ParallelGCThreads > 0) {
-    typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
-    _par_gc_thread_states =
-      NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
-    if (_par_gc_thread_states == NULL) {
-      vm_exit_during_initialization("Could not allocate par gc structs");
-    }
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
-      if (_par_gc_thread_states[i] == NULL) { 
-        vm_exit_during_initialization("Could not allocate par gc structs");
-      }
-    }
-  } else {
-    _par_gc_thread_states = NULL;
-  }
-  _incremental_collection_failed = false;
-  // The "dilatation_factor" is the expansion that can occur on
-  // account of the fact that the minimum object size in the CMS
-  // generation may be larger than that in, say, a contiguous young
-  //  generation.
-  // Ideally, in the calculation below, we'd compute the dilatation
-  // factor as: MinChunkSize/(promoting_gen's min object size)
-  // Since we do not have such a general query interface for the
-  // promoting generation, we'll instead just use the mimimum
-  // object size (which today is a header's worth of space);
-  // note that all arithmetic is in units of HeapWords.
-  assert(MinChunkSize >= oopDesc::header_size(), "just checking");
-  assert(_dilatation_factor >= 1.0, "from previous assert");
-}
-
-void ConcurrentMarkSweepGeneration::ref_processor_init() {
-  assert(collector() != NULL, "no collector");
-  collector()->ref_processor_init();
-}
-
-void CMSCollector::ref_processor_init() {
-  if (_ref_processor == NULL) {
-    // Allocate and initialize a reference processor
-    _ref_processor = ReferenceProcessor::create_ref_processor(
-        _span,                               // span
-        _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
-        _cmsGen->refs_discovery_is_mt(),     // mt_discovery
-        &_is_alive_closure,
-        ParallelGCThreads,
-        ParallelRefProcEnabled);
-    // Initialize the _ref_processor field of CMSGen
-    _cmsGen->set_ref_processor(_ref_processor);
-
-    // Allocate a dummy ref processor for perm gen.
-    ReferenceProcessor* rp2 = new ReferenceProcessor();
-    if (rp2 == NULL) {
-      vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
-    }
-    _permGen->set_ref_processor(rp2);
-  }
-}
-
-CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "Wrong type of heap");
-  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
-    gch->gen_policy()->size_policy();
-  assert(sp->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-  return sp;
-}
-
-CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
-  CMSGCAdaptivePolicyCounters* results = 
-    (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
-  assert(
-    results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-    "Wrong gc policy counter kind");
-  return results;
-}
-
-
-void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
-
-  const char* gen_name = "old";
-
-  // Generation Counters - generation 1, 1 subspace
-  _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
-
-  _space_counters = new GSpaceCounters(gen_name, 0,
-                                       _virtual_space.reserved_size(),
-                                       this, _gen_counters);
-}
-
-CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
-  _cms_gen(cms_gen)
-{
-  assert(alpha <= 100, "bad value");
-  _saved_alpha = alpha;
-
-  // Initialize the alphas to the bootstrap value of 100.
-  _gc0_alpha = _cms_alpha = 100;
-
-  _cms_begin_time.update();
-  _cms_end_time.update();
-
-  _gc0_duration = 0.0;
-  _gc0_period = 0.0;
-  _gc0_promoted = 0;
-
-  _cms_duration = 0.0;
-  _cms_period = 0.0;
-  _cms_allocated = 0;
-
-  _cms_used_at_gc0_begin = 0;
-  _cms_used_at_gc0_end = 0;
-  _allow_duty_cycle_reduction = false;
-  _valid_bits = 0;
-  _icms_duty_cycle = CMSIncrementalDutyCycle;
-}
-
-// If promotion failure handling is on use
-// the padded average size of the promotion for each
-// young generation collection.
-double CMSStats::time_until_cms_gen_full() const {
-  size_t cms_free = _cms_gen->cmsSpace()->free();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  size_t expected_promotion = gch->get_gen(0)->capacity();
-  if (HandlePromotionFailure) {
-    expected_promotion = MIN2(
-	(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
-	expected_promotion);
-  }
-  if (cms_free > expected_promotion) {
-    // Start a cms collection if there isn't enough space to promote
-    // for the next minor collection.  Use the padded average as
-    // a safety factor.
-    cms_free -= expected_promotion;
-
-    // Adjust by the safety factor.
-    double cms_free_dbl = (double)cms_free;
-    cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
-
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
-	SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
-	cms_free, expected_promotion);
-      gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
-	cms_free_dbl, cms_consumption_rate() + 1.0);
-    }
-    // Add 1 in case the consumption rate goes to zero.
-    return cms_free_dbl / (cms_consumption_rate() + 1.0);
-  }
-  return 0.0;
-}
-
-// Compare the duration of the cms collection to the
-// time remaining before the cms generation is empty.
-// Note that the time from the start of the cms collection
-// to the start of the cms sweep (less than the total
-// duration of the cms collection) can be used.  This
-// has been tried and some applications experienced
-// promotion failures early in execution.  This was
-// possibly because the averages were not accurate
-// enough at the beginning.
-double CMSStats::time_until_cms_start() const {
-  // We add "gc0_period" to the "work" calculation
-  // below because this query is done (mostly) at the
-  // end of a scavenge, so we need to conservatively
-  // account for that much possible delay
-  // in the query so as to avoid concurrent mode failures
-  // due to starting the collection just a wee bit too
-  // late.
-  double work = cms_duration() + gc0_period();
-  double deadline = time_until_cms_gen_full();
-  if (work > deadline) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print(
-        " CMSCollector: collect because of anticipated promotion "
-        "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
-        gc0_period(), time_until_cms_gen_full());
-    }
-    return 0.0;
-  }
-  return work - deadline;
-}
-
-// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
-// amount of change to prevent wild oscillation.
-unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
-					      unsigned int new_duty_cycle) {
-  assert(old_duty_cycle <= 100, "bad input value");
-  assert(new_duty_cycle <= 100, "bad input value");
-
-  // Note:  use subtraction with caution since it may underflow (values are
-  // unsigned).  Addition is safe since we're in the range 0-100.
-  unsigned int damped_duty_cycle = new_duty_cycle;
-  if (new_duty_cycle < old_duty_cycle) {
-    const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
-    if (new_duty_cycle + largest_delta < old_duty_cycle) {
-      damped_duty_cycle = old_duty_cycle - largest_delta;
-    }
-  } else if (new_duty_cycle > old_duty_cycle) {
-    const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
-    if (new_duty_cycle > old_duty_cycle + largest_delta) {
-      damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
-    }
-  }
-  assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
-
-  if (CMSTraceIncrementalPacing) {
-    gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
-			   old_duty_cycle, new_duty_cycle, damped_duty_cycle);
-  }
-  return damped_duty_cycle;
-}
-
-unsigned int CMSStats::icms_update_duty_cycle_impl() {
-  assert(CMSIncrementalPacing && valid(),
-	 "should be handled in icms_update_duty_cycle()");
-
-  double cms_time_so_far = cms_timer().seconds();
-  double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
-  double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
-
-  // Avoid division by 0.
-  double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
-  double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
-
-  unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
-  if (new_duty_cycle > _icms_duty_cycle) {
-    // Avoid very small duty cycles (1 or 2); 0 is allowed.
-    if (new_duty_cycle > 2) {
-      _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
-						new_duty_cycle);
-    }
-  } else if (_allow_duty_cycle_reduction) {
-    // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
-    new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
-    // Respect the minimum duty cycle.
-    unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
-    _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
-  }
-
-  if (PrintGCDetails || CMSTraceIncrementalPacing) {
-    gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
-  }
-
-  _allow_duty_cycle_reduction = false;
-  return _icms_duty_cycle;
-}
-
-#ifndef PRODUCT
-void CMSStats::print_on(outputStream *st) const {
-  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
-  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
-	       gc0_duration(), gc0_period(), gc0_promoted());
-  st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
-	    cms_duration(), cms_duration_per_mb(),
-	    cms_period(), cms_allocated());
-  st->print(",cms_since_beg=%g,cms_since_end=%g",
-	    cms_time_since_begin(), cms_time_since_end());
-  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
-	    _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
-  if (CMSIncrementalMode) {
-    st->print(",dc=%d", icms_duty_cycle());
-  }
-
-  if (valid()) {
-    st->print(",promo_rate=%g,cms_alloc_rate=%g",
-	      promotion_rate(), cms_allocation_rate());
-    st->print(",cms_consumption_rate=%g,time_until_full=%g",
-	      cms_consumption_rate(), time_until_cms_gen_full());
-  }
-  st->print(" ");
-}
-#endif // #ifndef PRODUCT
-
-CMSCollector::CollectorState CMSCollector::_collectorState =
-                             CMSCollector::Idling;
-bool CMSCollector::_foregroundGCIsActive = false;
-bool CMSCollector::_foregroundGCShouldWait = false;
-
-CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
-                           ConcurrentMarkSweepGeneration* permGen,
-                           CardTableRS*                   ct,
-			   ConcurrentMarkSweepPolicy*	  cp):
-  _cmsGen(cmsGen),
-  _permGen(permGen),
-  _ct(ct),
-  _ref_processor(NULL),    // will be set later
-  _conc_workers(NULL),     // may be set later
-  _abort_preclean(false),
-  _start_sampling(false),
-  _between_prologue_and_epilogue(false),
-  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
-  _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
-  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
-                 -1 /* lock-free */, "No_lock" /* dummy */),
-  _modUnionClosure(&_modUnionTable),
-  _modUnionClosurePar(&_modUnionTable),
-  _is_alive_closure(&_markBitMap),
-  _restart_addr(NULL),
-  _overflow_list(NULL),
-  _preserved_oop_stack(NULL),
-  _preserved_mark_stack(NULL),
-  _stats(cmsGen),
-  _eden_chunk_array(NULL),     // may be set in ctor body
-  _eden_chunk_capacity(0),     // -- ditto --
-  _eden_chunk_index(0),        // -- ditto --
-  _survivor_plab_array(NULL),  // -- ditto --
-  _survivor_chunk_array(NULL), // -- ditto --
-  _survivor_chunk_capacity(0), // -- ditto --
-  _survivor_chunk_index(0),    // -- ditto --
-  _ser_pmc_preclean_ovflw(0),
-  _ser_pmc_remark_ovflw(0),
-  _par_pmc_remark_ovflw(0),
-  _ser_kac_ovflw(0),
-  _par_kac_ovflw(0),
-  _collection_count_start(0),
-  _verifying(false),
-  _icms_start_limit(NULL),
-  _icms_stop_limit(NULL),
-  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
-  _completed_initialization(false),
-  _collector_policy(cp),
-  _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
-{
-  // Adjust any global flags for consistency:
-  // Perm Gen shouldn't be swept if class unloading is disabled
-  CMSPermGenSweepingEnabled = CMSClassUnloadingEnabled &&
-                              CMSPermGenSweepingEnabled;
-  CMSClassUnloadingEnabled  = CMSPermGenSweepingEnabled;
-
-  // Now expand the span and allocate the collection support structures
-  // (MUT, marking bit map etc.) to cover both generations subject to
-  // collection.
-
-  // First check that _permGen is adjacent to _cmsGen and above it.
-  assert(   _cmsGen->reserved().word_size()  > 0
-         && _permGen->reserved().word_size() > 0,
-         "generations should not be of zero size");
-  assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
-         "_cmsGen and _permGen should not overlap");
-  assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
-         "_cmsGen->end() different from _permGen->start()");
-
-  // For use by dirty card to oop closures.
-  _cmsGen->cmsSpace()->set_collector(this);
-  _permGen->cmsSpace()->set_collector(this);
-
-  // Adjust my span to cover old (cms) gen and perm gen
-  _span = _cmsGen->reserved()._union(_permGen->reserved());
-  // Initialize the span of is_alive_closure
-  _is_alive_closure.set_span(_span);
-
-  // Allocate MUT and marking bit map
-  {
-    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
-    if (!_markBitMap.allocate(_span)) {
-      warning("Failed to allocate CMS Bit Map");
-      return;
-    }
-    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
-  }
-  {
-    _modUnionTable.allocate(_span);
-    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
-  }
-
-  if (!_markStack.allocate(CMSMarkStackSize)) {
-    warning("Failed to allocate CMS Marking Stack");
-    return;
-  }
-  if (!_revisitStack.allocate(CMSRevisitStackSize)) {
-    warning("Failed to allocate CMS Revisit Stack");
-    return;
-  }
-
-  // Support for multi-threaded concurrent phases
-  if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
-    if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
-      // just for now
-      FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
-    }
-    if (ParallelCMSThreads > 1) {
-      _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
-                                 ParallelCMSThreads, true);
-      if (_conc_workers == NULL) {
-        warning("GC/CMS: _conc_workers allocation failure: "
-              "forcing -CMSConcurrentMTEnabled");
-        CMSConcurrentMTEnabled = false;
-      }
-    } else {
-      CMSConcurrentMTEnabled = false;
-    }
-  }
-  if (!CMSConcurrentMTEnabled) {
-    ParallelCMSThreads = 0;
-  } else {
-    // Turn off CMSCleanOnEnter optimization temporarily for
-    // the MT case where it's not fixed yet; see 6178663.
-    CMSCleanOnEnter = false;
-  }
-  assert((_conc_workers != NULL) == (ParallelCMSThreads > 1), 
-         "Inconsistency");
-
-  // Parallel task queues; these are shared for the
-  // concurrent and stop-world phases of CMS, but
-  // are not shared with parallel scavenge (ParNew).
-  {
-    uint i;
-    uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
-  
-    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
-         || ParallelRefProcEnabled)
-        && num_queues > 0) {
-      _task_queues = new OopTaskQueueSet(num_queues);
-      if (_task_queues == NULL) {
-        warning("task_queues allocation failure.");
-        return;
-      }
-      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
-      if (_hash_seed == NULL) {
-        warning("_hash_seed array allocation failure");
-        return;
-      }
-
-      // XXX use a global constant instead of 64!
-      typedef struct OopTaskQueuePadded {
-        OopTaskQueue work_queue;
-        char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
-      } OopTaskQueuePadded;
-    
-      for (i = 0; i < num_queues; i++) {
-        OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
-        if (q_padded == NULL) {
-          warning("work_queue allocation failure.");
-          return;
-        }
-        _task_queues->register_queue(i, &q_padded->work_queue);
-      }
-      for (i = 0; i < num_queues; i++) {
-        _task_queues->queue(i)->initialize();
-        _hash_seed[i] = 17;  // copied from ParNew
-      }
-    }
-  }
-
-  // "initiatingOccupancy" is the occupancy ratio at which we trigger
-  // a new collection cycle.  Unless explicitly specified via
-  // CMSTriggerRatio, it is calculated by:
-  //   Let "f" be MinHeapFreeRatio in
-  //
-  //    intiatingOccupancy = 100-f +
-  //                         f * (CMSTriggerRatio/100)
-  // That is, if we assume the heap is at its desired maximum occupancy at the
-  // end of a collection, we let CMSTriggerRatio of the (purported) free
-  // space be allocated before initiating a new collection cycle.
-  if (CMSInitiatingOccupancyFraction > 0) {
-    _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
-  } else {
-    _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
-                           (double)(CMSTriggerRatio *
-                                    MinHeapFreeRatio) / 100.0)
-			   / 100.0;
-  }
-  // Clip CMSBootstrapOccupancy between 0 and 100.
-  _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
-                         /(double)100;
-
-  _full_gcs_since_conc_gc = 0;
-
-  // Now tell CMS generations the identity of their collector
-  ConcurrentMarkSweepGeneration::set_collector(this);
-
-  // Create & start a CMS thread for this CMS collector
-  _cmsThread = ConcurrentMarkSweepThread::start(this);
-  assert(cmsThread() != NULL, "CMS Thread should have been created");
-  assert(cmsThread()->collector() == this,
-         "CMS Thread should refer to this gen");
-  assert(CGC_lock != NULL, "Where's the CGC_lock?");
-
-  // Support for parallelizing young gen rescan
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  _young_gen = gch->prev_gen(_cmsGen);
-  if (gch->supports_inline_contig_alloc()) {
-    _top_addr = gch->top_addr();
-    _end_addr = gch->end_addr();
-    assert(_young_gen != NULL, "no _young_gen");
-    _eden_chunk_index = 0;
-    _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
-    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
-    if (_eden_chunk_array == NULL) {
-      _eden_chunk_capacity = 0;
-      warning("GC/CMS: _eden_chunk_array allocation failure");
-    }
-  }
-  assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
-
-  // Support for parallelizing survivor space rescan
-  if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
-    size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
-    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
-    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
-    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
-    if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
-        || _cursor == NULL) {
-      warning("Failed to allocate survivor plab/chunk array");
-      if (_survivor_plab_array  != NULL) {
-        FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
-        _survivor_plab_array = NULL;
-      }
-      if (_survivor_chunk_array != NULL) {
-        FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
-        _survivor_chunk_array = NULL;
-      }
-      if (_cursor != NULL) {
-        FREE_C_HEAP_ARRAY(size_t, _cursor);
-        _cursor = NULL;
-      }
-    } else {
-      _survivor_chunk_capacity = 2*max_plab_samples;
-      for (uint i = 0; i < ParallelGCThreads; i++) {
-        HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
-        if (vec == NULL) {
-          warning("Failed to allocate survivor plab array");
-          for (int j = i; j > 0; j--) {
-            FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
-          }
-          FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
-          FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
-          _survivor_plab_array = NULL;
-          _survivor_chunk_array = NULL;
-          _survivor_chunk_capacity = 0;
-          break;
-        } else {
-          ChunkArray* cur =
-            ::new (&_survivor_plab_array[i]) ChunkArray(vec,
-                                                        max_plab_samples);
-          assert(cur->end() == 0, "Should be 0");
-          assert(cur->array() == vec, "Should be vec");
-          assert(cur->capacity() == max_plab_samples, "Error");
-        }
-      }
-    }
-  }
-  assert(   (   _survivor_plab_array  != NULL
-             && _survivor_chunk_array != NULL)
-         || (   _survivor_chunk_capacity == 0
-             && _survivor_chunk_index == 0),
-         "Error");
-
-  // Choose what strong roots should be scanned depending on verification options
-  // and perm gen collection mode.
-  if (!CMSClassUnloadingEnabled) {
-    // If class unloading is disabled we want to include all classes into the root set.
-    add_root_scanning_option(SharedHeap::SO_AllClasses);
-  } else {
-    add_root_scanning_option(SharedHeap::SO_SystemClasses);
-  }
-
-  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
-  _gc_counters = new CollectorCounters("CMS", 1);
-  _completed_initialization = true;
-  _sweep_timer.start();  // start of time
-}
-
-const char* ConcurrentMarkSweepGeneration::name() const {
-  return "concurrent mark-sweep generation";
-}
-void ConcurrentMarkSweepGeneration::update_counters() {
-  if (UsePerfData) {
-    _space_counters->update_all();
-    _gen_counters->update_all();
-  }
-}
-
-// this is an optimized version of update_counters(). it takes the
-// used value as a parameter rather than computing it. 
-//
-void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
-  if (UsePerfData) {
-    _space_counters->update_used(used);
-    _space_counters->update_capacity();
-    _gen_counters->update_all();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::print() const {
-  Generation::print();
-  cmsSpace()->print();
-}
-
-#ifndef PRODUCT
-void ConcurrentMarkSweepGeneration::print_statistics() {
-  cmsSpace()->printFLCensus(0);
-}
-#endif
-
-void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (PrintGCDetails) {
-    if (Verbose) {
-      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", 
-	level(), short_name(), s, used(), capacity());
-    } else {
-      gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", 
-	level(), short_name(), s, used() / K, capacity() / K);
-    }
-  }
-  if (Verbose) {
-    gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
-              gch->used(), gch->capacity());
-  } else {
-    gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
-              gch->used() / K, gch->capacity() / K);
-  }
-}
-
-size_t
-ConcurrentMarkSweepGeneration::contiguous_available() const {
-  // dld proposes an improvement in precision here. If the committed
-  // part of the space ends in a free block we should add that to
-  // uncommitted size in the calculation below. Will make this
-  // change later, staying with the approximation below for the
-  // time being. -- ysr.
-  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
-}
-
-size_t
-ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
-  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
-}
-
-size_t ConcurrentMarkSweepGeneration::max_available() const {
-  return free() + _virtual_space.uncommitted_size();
-}
-
-bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
-    size_t max_promotion_in_bytes,
-    bool younger_handles_promotion_failure) const {
-
-  // This is the most conservative test.  Full promotion is 
-  // guaranteed if this is used. The multiplicative factor is to
-  // account for the worst case "dilatation".
-  double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
-  if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
-    adjusted_max_promo_bytes = (double)max_uintx;
-  }
-  bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
-
-  if (younger_handles_promotion_failure && !result) {
-    // Full promotion is not guaranteed because fragmentation
-    // of the cms generation can prevent the full promotion.
-    result = (max_available() >= (size_t)adjusted_max_promo_bytes);
-
-    if (!result) {
-      // With promotion failure handling the test for the ability
-      // to support the promotion does not have to be guaranteed.
-      // Use an average of the amount promoted.
-      result = max_available() >= (size_t) 
-	gc_stats()->avg_promoted()->padded_average();
-      if (PrintGC && Verbose && result) {
-        gclog_or_tty->print_cr(
-	  "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-          " max_available: " SIZE_FORMAT
-          " avg_promoted: " SIZE_FORMAT,
-          max_available(), (size_t)
-          gc_stats()->avg_promoted()->padded_average());
-      }
-    } else {
-      if (PrintGC && Verbose) {
-        gclog_or_tty->print_cr(
-          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-          " max_available: " SIZE_FORMAT
-          " adj_max_promo_bytes: " SIZE_FORMAT,
-          max_available(), (size_t)adjusted_max_promo_bytes);
-      }
-    }
-  } else {
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr(
-        "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-        " contiguous_available: " SIZE_FORMAT
-        " adj_max_promo_bytes: " SIZE_FORMAT,
-        max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
-    }
-  }
-  return result;
-}
-
-CompactibleSpace*
-ConcurrentMarkSweepGeneration::first_compaction_space() const {
-  return _cmsSpace;
-}
-
-void ConcurrentMarkSweepGeneration::reset_after_compaction() {
-  // Clear the promotion information.  These pointers can be adjusted
-  // along with all the other pointers into the heap but
-  // compaction is expected to be a rare event with 
-  // a heap using cms so don't do it without seeing the need.
-  if (ParallelGCThreads > 0) {
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _par_gc_thread_states[i]->promo.reset();
-    }
-  }
-}
-
-void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
-  blk->do_space(_cmsSpace);
-}
-
-void ConcurrentMarkSweepGeneration::compute_new_size() {
-  assert_locked_or_safepoint(Heap_lock);
-
-  // If incremental collection failed, we just want to expand
-  // to the limit.
-  if (incremental_collection_failed()) {
-    clear_incremental_collection_failed();
-    grow_to_reserved();
-    return;
-  }
-
-  size_t expand_bytes = 0;
-  double free_percentage = ((double) free()) / capacity();
-  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
-  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
-
-  // compute expansion delta needed for reaching desired free percentage
-  if (free_percentage < desired_free_percentage) {
-    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-    assert(desired_capacity >= capacity(), "invalid expansion size");
-    expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
-  }
-  if (expand_bytes > 0) {
-    if (PrintGCDetails && Verbose) {
-      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-      gclog_or_tty->print_cr("\nFrom compute_new_size: ");
-      gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
-      gclog_or_tty->print_cr("  Desired free fraction %f", 
-        desired_free_percentage);
-      gclog_or_tty->print_cr("  Maximum free fraction %f", 
-        maximum_free_percentage);
-      gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
-      gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT, 
-        desired_capacity/1000);
-      int prev_level = level() - 1;
-      if (prev_level >= 0) {
-        size_t prev_size = 0;
-        GenCollectedHeap* gch = GenCollectedHeap::heap();
-        Generation* prev_gen = gch->_gens[prev_level];
-        prev_size = prev_gen->capacity();
-          gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
-                                 prev_size/1000);
-      }
-      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
-	unsafe_max_alloc_nogc()/1000);
-      gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT, 
-	contiguous_available()/1000);
-      gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
-        expand_bytes);
-    }
-    // safe if expansion fails
-    expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); 
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("  Expanded free fraction %f", 
-	((double) free()) / capacity());
-    }
-  }
-}
-
-Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
-  return cmsSpace()->freelistLock();
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
-                                                  bool   tlab) {
-  CMSSynchronousYieldRequest yr;
-  MutexLockerEx x(freelistLock(),
-                  Mutex::_no_safepoint_check_flag);
-  return have_lock_and_allocate(size, tlab);
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
-                                                  bool   tlab) {
-  assert_lock_strong(freelistLock());
-  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
-  HeapWord* res = cmsSpace()->allocate(adjustedSize);
-  // Allocate the object live (grey) if the background collector has
-  // started marking. This is necessary because the marker may
-  // have passed this address and consequently this object will
-  // not otherwise be greyed and would be incorrectly swept up.
-  // Note that if this object contains references, the writing
-  // of those references will dirty the card containing this object
-  // allowing the object to be blackened (and its references scanned)
-  // either during a preclean phase or at the final checkpoint.
-  if (res != NULL) {
-    collector()->direct_allocated(res, adjustedSize);
-    _direct_allocated_words += adjustedSize;
-    // allocation counters
-    NOT_PRODUCT(
-      _numObjectsAllocated++;
-      _numWordsAllocated += (int)adjustedSize;
-    )
-  }
-  return res;
-}
-
-// In the case of direct allocation by mutators in a generation that
-// is being concurrently collected, the object must be allocated
-// live (grey) if the background collector has started marking.
-// This is necessary because the marker may
-// have passed this address and consequently this object will
-// not otherwise be greyed and would be incorrectly swept up.
-// Note that if this object contains references, the writing
-// of those references will dirty the card containing this object
-// allowing the object to be blackened (and its references scanned)
-// either during a preclean phase or at the final checkpoint.
-void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
-  assert(_markBitMap.covers(start, size), "Out of bounds");
-  if (_collectorState >= Marking) {
-    MutexLockerEx y(_markBitMap.lock(),
-                    Mutex::_no_safepoint_check_flag);
-    // [see comments preceding SweepClosure::do_blk() below for details]
-    // 1. need to mark the object as live so it isn't collected
-    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
-    // 3. need to mark the end of the object so sweeper can skip over it
-    //    if it's uninitialized when the sweeper reaches it.
-    _markBitMap.mark(start);          // object is live
-    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
-    _markBitMap.mark(start + size - 1);
-                                      // mark end of object
-  }
-  // check that oop looks uninitialized
-  assert(oop(start)->klass() == NULL, "_klass should be NULL");
-}
-
-void CMSCollector::promoted(bool par, HeapWord* start,
-                            bool is_obj_array, size_t obj_size) {
-  assert(_markBitMap.covers(start), "Out of bounds");
-  // See comment in direct_allocated() about when objects should
-  // be allocated live.
-  if (_collectorState >= Marking) {
-    // we already hold the marking bit map lock, taken in
-    // the prologue
-    if (par) {
-      _markBitMap.par_mark(start);
-    } else {
-      _markBitMap.mark(start);
-    }
-    // We don't need to mark the object as uninitialized (as
-    // in direct_allocated above) because this is being done with the
-    // world stopped and the object will be initialized by the
-    // time the sweeper gets to look at it.
-    assert(SafepointSynchronize::is_at_safepoint(),
-           "expect promotion only at safepoints");
-
-    if (_collectorState < Sweeping) {
-      // Mark the appropriate cards in the modUnionTable, so that
-      // this object gets scanned before the sweep. If this is
-      // not done, CMS generation references in the object might
-      // not get marked.
-      // For the case of arrays, which are otherwise precisely
-      // marked, we need to dirty the entire array, not just its head.
-      if (is_obj_array) {
-        // The [par_]mark_range() method expects mr.end() below to
-        // be aligned to the granularity of a bit's representation
-        // in the heap. In the case of the MUT below, that's a
-        // card size.
-        MemRegion mr(start,
-                     (HeapWord*)round_to((intptr_t)(start + obj_size),
-                        CardTableModRefBS::card_size /* bytes */));
-        if (par) {
-          _modUnionTable.par_mark_range(mr);
-        } else {
-	  _modUnionTable.mark_range(mr);
-        }
-      } else {  // not an obj array; we can just mark the head
-        if (par) {
-	  _modUnionTable.par_mark(start);
-        } else {
-	  _modUnionTable.mark(start);
-        }
-      }
-    }
-  }
-}
-
-static inline size_t percent_of_space(Space* space, HeapWord* addr)
-{
-  size_t delta = pointer_delta(addr, space->bottom());
-  return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
-}
-
-void CMSCollector::icms_update_allocation_limits()
-{
-  Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
-  EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
-
-  const unsigned int duty_cycle = stats().icms_update_duty_cycle();
-  if (CMSTraceIncrementalPacing) {
-    stats().print();
-  }
-
-  assert(duty_cycle <= 100, "invalid duty cycle");
-  if (duty_cycle != 0) {
-    // The duty_cycle is a percentage between 0 and 100; convert to words and
-    // then compute the offset from the endpoints of the space.
-    size_t free_words = eden->free() / HeapWordSize;
-    double free_words_dbl = (double)free_words;
-    size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
-    size_t offset_words = (free_words - duty_cycle_words) / 2;
-
-    _icms_start_limit = eden->top() + offset_words;
-    _icms_stop_limit = eden->end() - offset_words;
-
-    // The limits may be adjusted (shifted to the right) by
-    // CMSIncrementalOffset, to allow the application more mutator time after a
-    // young gen gc (when all mutators were stopped) and before CMS starts and
-    // takes away one or more cpus.
-    if (CMSIncrementalOffset != 0) {
-      double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
-      size_t adjustment = (size_t)adjustment_dbl;
-      HeapWord* tmp_stop = _icms_stop_limit + adjustment;
-      if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
-	_icms_start_limit += adjustment;
-	_icms_stop_limit = tmp_stop;
-      }
-    }
-  }
-  if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
-    _icms_start_limit = _icms_stop_limit = eden->end();
-  }
-
-  // Install the new start limit.
-  eden->set_soft_end(_icms_start_limit);
-
-  if (CMSTraceIncrementalMode) {
-    gclog_or_tty->print(" icms alloc limits:  "
-			   PTR_FORMAT "," PTR_FORMAT
-			   " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
-			   _icms_start_limit, _icms_stop_limit,
-			   percent_of_space(eden, _icms_start_limit),
-			   percent_of_space(eden, _icms_stop_limit));
-    if (Verbose) {
-      gclog_or_tty->print("eden:  ");
-      eden->print_on(gclog_or_tty);
-    }
-  }
-}
-
-// Any changes here should try to maintain the invariant
-// that if this method is called with _icms_start_limit
-// and _icms_stop_limit both NULL, then it should return NULL
-// and not notify the icms thread.
-HeapWord* 
-CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
-				       size_t word_size)
-{
-  // A start_limit equal to end() means the duty cycle is 0, so treat that as a
-  // nop.
-  if (CMSIncrementalMode && _icms_start_limit != space->end()) {
-    if (top <= _icms_start_limit) {
-      if (CMSTraceIncrementalMode) {
-	space->print_on(gclog_or_tty);
-	gclog_or_tty->stamp();
-	gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
-			       ", new limit=" PTR_FORMAT
-			       " (" SIZE_FORMAT "%%)",
-			       top, _icms_stop_limit,
-			       percent_of_space(space, _icms_stop_limit));
-      }
-      ConcurrentMarkSweepThread::start_icms();
-      assert(top < _icms_stop_limit, "Tautology"); 
-      if (word_size < pointer_delta(_icms_stop_limit, top)) { 
-	return _icms_stop_limit;
-      }
-
-      // The allocation will cross both the _start and _stop limits, so do the
-      // stop notification also and return end().
-      if (CMSTraceIncrementalMode) {
-	space->print_on(gclog_or_tty);
-	gclog_or_tty->stamp();
-	gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
-			       ", new limit=" PTR_FORMAT
-			       " (" SIZE_FORMAT "%%)",
-			       top, space->end(),
-			       percent_of_space(space, space->end()));
-      }
-      ConcurrentMarkSweepThread::stop_icms();
-      return space->end();
-    }
-
-    if (top <= _icms_stop_limit) {
-      if (CMSTraceIncrementalMode) {
-	space->print_on(gclog_or_tty);
-	gclog_or_tty->stamp();
-	gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
-			       ", new limit=" PTR_FORMAT
-			       " (" SIZE_FORMAT "%%)",
-			       top, space->end(),
-			       percent_of_space(space, space->end()));
-      }
-      ConcurrentMarkSweepThread::stop_icms();
-      return space->end();
-    }
-
-    if (CMSTraceIncrementalMode) {
-      space->print_on(gclog_or_tty);
-      gclog_or_tty->stamp();
-      gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
-			     ", new limit=" PTR_FORMAT,
-			     top, NULL);
-    }
-  }
-
-  return NULL;
-}
-
-oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
-  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
-  // allocate, copy and if necessary update promoinfo --
-  // delegate to underlying space.
-  assert_lock_strong(freelistLock());
-
-#ifndef	PRODUCT
-  if (Universe::heap()->promotion_should_fail()) {
-    return NULL;
-  }
-#endif	// #ifndef PRODUCT
-
-  oop res = _cmsSpace->promote(obj, obj_size, ref);
-  if (res == NULL) {
-    // expand and retry
-    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
-    expand(s*HeapWordSize, MinHeapDeltaBytes, 
-      CMSExpansionCause::_satisfy_promotion);
-    // Since there's currently no next generation, we don't try to promote
-    // into a more senior generation.
-    assert(next_gen() == NULL, "assumption, based upon which no attempt "
-                               "is made to pass on a possibly failing "
-                               "promotion to next generation");
-    res = _cmsSpace->promote(obj, obj_size, ref);
-  }
-  if (res != NULL) {
-    // See comment in allocate() about when objects should
-    // be allocated live.
-    assert(obj->is_oop(), "Will dereference klass pointer below");
-    collector()->promoted(false,           // Not parallel
-                          (HeapWord*)res, obj->is_objArray(), obj_size);
-    // promotion counters
-    NOT_PRODUCT(
-      _numObjectsPromoted++;
-      _numWordsPromoted +=
-        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
-    )
-  }
-  return res;
-}
-
-
-HeapWord*
-ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
-					     HeapWord* top,
-					     size_t word_sz)
-{
-  return collector()->allocation_limit_reached(space, top, word_sz);
-}
-
-// Things to support parallel young-gen collection.
-oop
-ConcurrentMarkSweepGeneration::par_promote(int thread_num,
-					   oop old, markOop m,
-					   size_t word_sz) {
-#ifndef	PRODUCT
-  if (Universe::heap()->promotion_should_fail()) {
-    return NULL;
-  }
-#endif	// #ifndef PRODUCT
-
-  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
-  PromotionInfo* promoInfo = &ps->promo;
-  // if we are tracking promotions, then first ensure space for
-  // promotion (including spooling space for saving header if necessary).
-  // then allocate and copy, then track promoted info if needed.
-  // When tracking (see PromotionInfo::track()), the mark word may
-  // be displaced and in this case restoration of the mark word
-  // occurs in the (oop_since_save_marks_)iterate phase.
-  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
-    // Out of space for allocating spooling buffers;
-    // try expanding and allocating spooling buffers.
-    if (!expand_and_ensure_spooling_space(promoInfo)) {
-      return NULL;
-    }
-  }
-  assert(promoInfo->has_spooling_space(), "Control point invariant");
-  HeapWord* obj_ptr = ps->lab.alloc(word_sz);
-  if (obj_ptr == NULL) {
-     obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
-     if (obj_ptr == NULL) {
-       return NULL;
-     }
-  }
-  oop obj = oop(obj_ptr);
-  assert(obj->klass() == NULL, "Object should be uninitialized here.");
-  // Otherwise, copy the object.  Here we must be careful to insert the
-  // klass pointer last, since this marks the block as an allocated object.
-  HeapWord* old_ptr = (HeapWord*)old;
-  if (word_sz > (size_t)oopDesc::header_size()) {
-    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
-				 obj_ptr + oopDesc::header_size(),
-				 word_sz - oopDesc::header_size());
-  }
-  // Restore the mark word copied above.
-  obj->set_mark(m);
-  // Now we can track the promoted object, if necessary.  We take care 
-  // To delay the transition from uninitialized to full object
-  // (i.e., insertion of klass pointer) until after, so that it
-  // atomically becomes a promoted object.
-  if (promoInfo->tracking()) {
-    promoInfo->track((PromotedObject*)obj, old->klass());
-  }
-  // Finally, install the klass pointer.
-  obj->set_klass(old->klass());
-
-  assert(old->is_oop(), "Will dereference klass ptr below");
-  collector()->promoted(true,          // parallel
-                        obj_ptr, old->is_objArray(), word_sz);
-  
-  NOT_PRODUCT(
-    Atomic::inc(&_numObjectsPromoted);
-    Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
-                &_numWordsPromoted);
-  )
-
-  return obj; 
-}
-
-void
-ConcurrentMarkSweepGeneration::
-par_promote_alloc_undo(int thread_num,
-		       HeapWord* obj, size_t word_sz) {
-  // CMS does not support promotion undo.
-  ShouldNotReachHere();
-}
-
-void
-ConcurrentMarkSweepGeneration::
-par_promote_alloc_done(int thread_num) {
-  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
-  ps->lab.retire();
-#if CFLS_LAB_REFILL_STATS
-  if (thread_num == 0) {
-    _cmsSpace->print_par_alloc_stats();
-  }
-#endif
-}
-
-void
-ConcurrentMarkSweepGeneration::
-par_oop_since_save_marks_iterate_done(int thread_num) {
-  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
-  ParScanWithoutBarrierClosure* dummy_cl = NULL;
-  ps->promo.promoted_oops_iterate_nv(dummy_cl);
-}
-
-// XXXPERM
-bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
-                                                   size_t size,
-                                                   bool   tlab)
-{
-  // We allow a STW collection only if a full
-  // collection was requested.
-  return full || should_allocate(size, tlab); // FIX ME !!!
-  // This and promotion failure handling are connected at the
-  // hip and should be fixed by untying them.
-}
-
-bool CMSCollector::shouldConcurrentCollect() {
-  if (_full_gc_requested) {
-    assert(ExplicitGCInvokesConcurrent, "Unexpected state");
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
-                             " gc request");
-    }
-    return true;
-  }
-
-  // For debugging purposes, change the type of collection.
-  // If the rotation is not on the concurrent collection
-  // type, don't start a concurrent collection.
-  NOT_PRODUCT(
-    if (RotateCMSCollectionTypes && 
-	(_cmsGen->debug_collection_type() != 
-	  ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
-      assert(_cmsGen->debug_collection_type() != 
-	ConcurrentMarkSweepGeneration::Unknown_collection_type,
-	"Bad cms collection type");
-      return false;
-    }
-  )
-
-  FreelistLocker x(this);
-  // ------------------------------------------------------------------
-  // Print out lots of information which affects the initiation of
-  // a collection.
-  if (PrintCMSInitiationStatistics && stats().valid()) {
-    gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("");
-    stats().print_on(gclog_or_tty);
-    gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
-      stats().time_until_cms_gen_full());
-    gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
-    gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
-                           _cmsGen->contiguous_available());
-    gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
-    gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
-    gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
-    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
-  }
-  // ------------------------------------------------------------------
-
-  // If the estimated time to complete a cms collection (cms_duration())
-  // is less than the estimated time remaining until the cms generation
-  // is full, start a collection.
-  if (!UseCMSInitiatingOccupancyOnly) {
-    if (stats().valid()) {
-      if (stats().time_until_cms_start() == 0.0) {
-        return true;
-      }
-    } else {
-      // We want to conservatively collect somewhat early in order
-      // to try and "bootstrap" our CMS/promotion statistics;
-      // this branch will not fire after the first successful CMS
-      // collection because the stats should then be valid.
-      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
-        if (Verbose && PrintGCDetails) {
-          gclog_or_tty->print_cr(
-            " CMSCollector: collect for bootstrapping statistics:"
-            " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
-            _bootstrap_occupancy);
-        }
-        return true;
-      }
-    }
-  }
-
-  // Otherwise, we start a collection cycle if either the perm gen or
-  // old gen want a collection cycle started. Each may use
-  // an appropriate criterion for making this decision.
-  // XXX We need to make sure that the gen expansion
-  // criterion dovetails well with this.
-  if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print_cr("CMS old gen initiated");
-    }
-    return true;
-  }
-
-  if (CMSClassUnloadingEnabled && CMSPermGenSweepingEnabled &&
-      _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
-    if (Verbose && PrintGCDetails) {
-     gclog_or_tty->print_cr("CMS perm gen initiated");
-    }
-    return true;
-  }
-
-  return false;
-}
-
-// Clear _expansion_cause fields of constituent generations
-void CMSCollector::clear_expansion_cause() {
-  _cmsGen->clear_expansion_cause();
-  _permGen->clear_expansion_cause();
-}
-
-bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
-  double initiatingOccupancy) {
-  // We should be conservative in starting a collection cycle.  To
-  // start too eagerly runs the risk of collecting too often in the
-  // extreme.  To collect too rarely falls back on full collections,
-  // which works, even if not optimum in terms of concurrent work.
-  // As a work around for too eagerly collecting, use the flag
-  // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
-  // giving the user an easily understandable way of controlling the
-  // collections.
-  // We want to start a new collection cycle if any of the following
-  // conditions hold:
-  // . our current occupancy exceeds the initiating occupancy, or
-  // . we recently needed to expand and have not since that expansion,
-  //   collected, or
-  // . we are not using adaptive free lists and linear allocation is
-  //   going to fail, or
-  // . (for old gen) incremental collection has already failed or
-  //   may soon fail in the near future as we may not be able to absorb
-  //   promotions.
-  assert_lock_strong(freelistLock());
-
-  if (occupancy() > initiatingOccupancy) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
-	short_name(), occupancy(), initiatingOccupancy);
-    }
-    return true;
-  }
-  if (UseCMSInitiatingOccupancyOnly) {
-    return false;
-  }
-  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print(" %s: collect because expanded for allocation ",
-	short_name());
-    }
-    return true;
-  }
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_two_generation_policy(),
-         "You may want to check the correctness of the following");
-  if (gch->incremental_collection_will_fail()) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print(" %s: collect because incremental collection will fail ",
-	short_name());
-    }
-    return true;
-  }
-  if (!_cmsSpace->adaptive_freelists() && 
-      _cmsSpace->linearAllocationWouldFail()) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print(" %s: collect because of linAB ",
-	short_name());
-    }
-    return true;
-  }
-  return false;
-}
-
-void ConcurrentMarkSweepGeneration::collect(bool   full,
-                                            bool   clear_all_soft_refs,
-                                            size_t size,
-                                            bool   tlab)
-{
-  collector()->collect(full, clear_all_soft_refs, size, tlab);
-}
-
-void CMSCollector::collect(bool   full,
-                           bool   clear_all_soft_refs,
-                           size_t size,
-                           bool   tlab)
-{
-  if (!UseCMSCollectionPassing && _collectorState > Idling) {
-    // For debugging purposes skip the collection if the state
-    // is not currently idle
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", 
-	Thread::current(), full, _collectorState);
-    }
-    return;
-  }
-
-  // The following "if" branch is present for defensive reasons.
-  // In the current uses of this interface, it can be replaced with:
-  // assert(!GC_locker.is_active(), "Can't be called otherwise");
-  // But I am not placing that assert here to allow future
-  // generality in invoking this interface.
-  if (GC_locker::is_active()) {
-    // A consistency test for GC_locker
-    assert(GC_locker::needs_gc(), "Should have been set already");
-    // Skip this foreground collection, instead
-    // expanding the heap if necessary.
-    // Need the free list locks for the call to free() in compute_new_size()
-    compute_new_size();
-    return;
-  }
-  acquire_control_and_collect(full, clear_all_soft_refs);
-  _full_gcs_since_conc_gc++;
-
-}
-
-void CMSCollector::request_full_gc(unsigned int full_gc_count) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  unsigned int gc_count = gch->total_full_collections();
-  if (gc_count == full_gc_count) {
-    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
-    _full_gc_requested = true;
-    CGC_lock->notify();   // nudge CMS thread
-  }
-}
-  
-
-// The foreground and background collectors need to coordinate in order
-// to make sure that they do not mutually interfere with CMS collections.
-// When a background collection is active,
-// the foreground collector may need to take over (preempt) and
-// synchronously complete an ongoing collection. Depending on the 
-// frequency of the background collections and the heap usage
-// of the application, this preemption can be seldom or frequent.
-// There are only certain
-// points in the background collection that the "collection-baton"
-// can be passed to the foreground collector.
-//
-// The foreground collector will wait for the baton before
-// starting any part of the collection.  The foreground collector
-// will only wait at one location.
-//
-// The background collector will yield the baton before starting a new
-// phase of the collection (e.g., before initial marking, marking from roots,
-// precleaning, final re-mark, sweep etc.)  This is normally done at the head
-// of the loop which switches the phases. The background collector does some
-// of the phases (initial mark, final re-mark) with the world stopped.
-// Because of locking involved in stopping the world,
-// the foreground collector should not block waiting for the background
-// collector when it is doing a stop-the-world phase.  The background
-// collector will yield the baton at an additional point just before
-// it enters a stop-the-world phase.  Once the world is stopped, the
-// background collector checks the phase of the collection.  If the
-// phase has not changed, it proceeds with the collection.  If the
-// phase has changed, it skips that phase of the collection.  See
-// the comments on the use of the Heap_lock in collect_in_background().
-//
-// Variable used in baton passing.
-//   _foregroundGCIsActive - Set to true by the foreground collector when
-//	it wants the baton.  The foreground clears it when it has finished
-//	the collection.
-//   _foregroundGCShouldWait - Set to true by the background collector
-//        when it is running.  The foreground collector waits while
-//	_foregroundGCShouldWait is true.
-//  CGC_lock - monitor used to protect access to the above variables
-//	and to notify the foreground and background collectors.
-//  _collectorState - current state of the CMS collection.
-// 
-// The foreground collector 
-//   acquires the CGC_lock
-//   sets _foregroundGCIsActive
-//   waits on the CGC_lock for _foregroundGCShouldWait to be false
-//     various locks acquired in preparation for the collection
-//     are released so as not to block the background collector
-//     that is in the midst of a collection
-//   proceeds with the collection
-//   clears _foregroundGCIsActive
-//   returns
-//
-// The background collector in a loop iterating on the phases of the
-//	collection
-//   acquires the CGC_lock
-//   sets _foregroundGCShouldWait
-//   if _foregroundGCIsActive is set
-//     clears _foregroundGCShouldWait, notifies _CGC_lock
-//     waits on _CGC_lock for _foregroundGCIsActive to become false
-//     and exits the loop.
-//   otherwise
-//     proceed with that phase of the collection
-//     if the phase is a stop-the-world phase,
-//	 yield the baton once more just before enqueueing
-//	 the stop-world CMS operation (executed by the VM thread).
-//   returns after all phases of the collection are done
-//   
-
-void CMSCollector::acquire_control_and_collect(bool full,
-	bool clear_all_soft_refs) {
-  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
-  assert(!Thread::current()->is_ConcurrentGC_thread(),
-         "shouldn't try to acquire control from self!");
-
-  // Start the protocol for acquiring control of the
-  // collection from the background collector (aka CMS thread).
-  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-         "VM thread should have CMS token");
-  // Remember the possibly interrupted state of an ongoing
-  // concurrent collection
-  CollectorState first_state = _collectorState;
-
-  // Signal to a possibly ongoing concurrent collection that
-  // we want to do a foreground collection.
-  _foregroundGCIsActive = true;
-
-  // Disable incremental mode during a foreground collection.
-  ICMSDisabler icms_disabler;
-
-  // release locks and wait for a notify from the background collector
-  // releasing the locks in only necessary for phases which
-  // do yields to improve the granularity of the collection.
-  assert_lock_strong(bitMapLock());
-  // We need to lock the Free list lock for the space that we are
-  // currently collecting.
-  assert(haveFreelistLocks(), "Must be holding free list locks");
-  bitMapLock()->unlock();
-  releaseFreelistLocks();
-  {
-    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    if (_foregroundGCShouldWait) {
-      // We are going to be waiting for action for the CMS thread;
-      // it had better not be gone (for instance at shutdown)!
-      assert(ConcurrentMarkSweepThread::cmst() != NULL,
-             "CMS thread must be running");
-      // Wait here until the background collector gives us the go-ahead
-      ConcurrentMarkSweepThread::clear_CMS_flag(
-        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
-      // Get a possibly blocked CMS thread going:
-      //   Note that we set _foregroundGCIsActive true above,
-      //   without protection of the CGC_lock.
-      CGC_lock->notify();
-      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
-             "Possible deadlock");
-      while (_foregroundGCShouldWait) {
-        // wait for notification
-        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
-        // Possibility of delay/starvation here, since CMS token does
-        // not know to give priority to VM thread? Actually, i think
-        // there wouldn't be any delay/starvation, but the proof of
-        // that "fact" (?) appears non-trivial. XXX 20011219YSR
-      }
-      ConcurrentMarkSweepThread::set_CMS_flag(
-        ConcurrentMarkSweepThread::CMS_vm_has_token);
-    }
-  }
-  // The CMS_token is already held.  Get back the other locks.
-  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-         "VM thread should have CMS token");
-  getFreelistLocks();
-  bitMapLock()->lock_without_safepoint_check();
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS foreground collector has asked for control "
-      INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
-    gclog_or_tty->print_cr("	gets control with state %d", _collectorState);
-  }
-
-  // Check if we need to do a compaction, or if not, whether
-  // we need to start the mark-sweep from scratch.
-  bool should_compact    = false;
-  bool should_start_over = false;
-  decide_foreground_collection_type(clear_all_soft_refs,
-    &should_compact, &should_start_over);
-
-NOT_PRODUCT(
-  if (RotateCMSCollectionTypes) {
-    if (_cmsGen->debug_collection_type() == 
-	ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
-      should_compact = true;
-    } else if (_cmsGen->debug_collection_type() == 
-	       ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
-      should_compact = false;
-    }
-  }
-)
-
-  if (PrintGCDetails && first_state > Idling) {
-    GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
-    if (GCCause::is_user_requested_gc(cause) ||
-	GCCause::is_serviceability_requested_gc(cause)) {
-      gclog_or_tty->print(" (concurrent mode interrupted)");
-    } else {
-      gclog_or_tty->print(" (concurrent mode failure)");
-    }
-  }
-
-  if (should_compact) {
-    // If the collection is being acquired from the background
-    // collector, there may be references on the discovered
-    // references lists that have NULL referents (being those
-    // that were concurrently cleared by a mutator) or
-    // that are no longer active (having been enqueued concurrently
-    // by the mutator).
-    // Scrub the list of those references because Mark-Sweep-Compact
-    // code assumes referents are not NULL and that all discovered
-    // Reference objects are active.
-    ref_processor()->clean_up_discovered_references();
-
-    do_compaction_work(clear_all_soft_refs);
-
-    // Has the GC time limit been exceeded?
-    check_gc_time_limit();
-
-  } else {
-    do_mark_sweep_work(clear_all_soft_refs, first_state,
-      should_start_over);
-  }
-  // Reset the expansion cause, now that we just completed
-  // a collection cycle.
-  clear_expansion_cause();
-  _foregroundGCIsActive = false;
-  return;
-}
-
-void CMSCollector::check_gc_time_limit() {
-
-  // Ignore explicit GC's.  Exiting here does not set the flag and
-  // does not reset the count.  Updating of the averages for system
-  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
-  GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
-  if (GCCause::is_user_requested_gc(gc_cause) ||
-      GCCause::is_serviceability_requested_gc(gc_cause)) {
-    return;
-  }
-
-  // Calculate the fraction of the CMS generation was freed during
-  // the last collection. 
-  // Only consider the STW compacting cost for now.
-  //
-  // Note that the gc time limit test only works for the collections
-  // of the young gen + tenured gen and not for collections of the
-  // permanent gen.  That is because the calculation of the space
-  // freed by the collection is the free space in the young gen +
-  // tenured gen.
-
-  double fraction_free = 
-    ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
-  if ((100.0 * size_policy()->compacting_gc_cost()) > 
-	 ((double) GCTimeLimit) &&
-	((fraction_free * 100) < GCHeapFreeLimit)) {
-    size_policy()->inc_gc_time_limit_count();
-    if (UseGCOverheadLimit && 
-	(size_policy()->gc_time_limit_count() > 
-	 AdaptiveSizePolicyGCTimeLimitThreshold)) {
-      size_policy()->set_gc_time_limit_exceeded(true);
-      // Avoid consecutive OOM due to the gc time limit by resetting
-      // the counter.
-      size_policy()->reset_gc_time_limit_count();
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("      GC is exceeding overhead limit "
-          "of %d%%", GCTimeLimit);
-      }
-    } else {
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("      GC would exceed overhead limit "
-          "of %d%%", GCTimeLimit);
-      }
-    }
-  } else {
-    size_policy()->reset_gc_time_limit_count();
-  }
-}
-
-// Resize the perm generation and the tenured generation
-// after obtaining the free list locks for the
-// two generations.
-void CMSCollector::compute_new_size() {
-  assert_locked_or_safepoint(Heap_lock);
-  FreelistLocker z(this);
-  _permGen->compute_new_size();
-  _cmsGen->compute_new_size();
-}
-
-// A work method used by foreground collection to determine
-// what type of collection (compacting or not, continuing or fresh)
-// it should do.
-// NOTE: the intent is to make UseCMSCompactAtFullCollection
-// and CMSCompactWhenClearAllSoftRefs the default in the future
-// and do away with the flags after a suitable period.
-void CMSCollector::decide_foreground_collection_type(
-  bool clear_all_soft_refs, bool* should_compact,
-  bool* should_start_over) {
-  // Normally, we'll compact only if the UseCMSCompactAtFullCollection
-  // flag is set, and we have either requested a System.gc() or
-  // the number of full gc's since the last concurrent cycle
-  // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
-  // or if an incremental collection has failed
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_two_generation_policy(),
-         "You may want to check the correctness of the following");
-  // Inform cms gen if this was due to partial collection failing.
-  // The CMS gen may use this fact to determine its expansion policy.
-  if (gch->incremental_collection_will_fail()) {
-    assert(!_cmsGen->incremental_collection_failed(),
-           "Should have been noticed, reacted to and cleared");
-    _cmsGen->set_incremental_collection_failed();
-  }
-  *should_compact =
-    UseCMSCompactAtFullCollection &&
-    ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
-     GCCause::is_user_requested_gc(gch->gc_cause()) ||
-     gch->incremental_collection_will_fail());
-  *should_start_over = false;
-  if (clear_all_soft_refs && !*should_compact) {
-    // We are about to do a last ditch collection attempt
-    // so it would normally make sense to do a compaction
-    // to reclaim as much space as possible.
-    if (CMSCompactWhenClearAllSoftRefs) {
-      // Default: The rationale is that in this case either
-      // we are past the final marking phase, in which case
-      // we'd have to start over, or so little has been done
-      // that there's little point in saving that work. Compaction
-      // appears to be the sensible choice in either case.
-      *should_compact = true;
-    } else {
-      // We have been asked to clear all soft refs, but not to
-      // compact. Make sure that we aren't past the final checkpoint
-      // phase, for that is where we process soft refs. If we are already
-      // past that phase, we'll need to redo the refs discovery phase and
-      // if necessary clear soft refs that weren't previously
-      // cleared. We do so by remembering the phase in which
-      // we came in, and if we are past the refs processing
-      // phase, we'll choose to just redo the mark-sweep
-      // collection from scratch.
-      if (_collectorState > FinalMarking) {
-        // We are past the refs processing phase;
-        // start over and do a fresh synchronous CMS cycle
-        _collectorState = Resetting; // skip to reset to start new cycle
-        reset(false /* == !asynch */);
-        *should_start_over = true;
-      } // else we can continue a possibly ongoing current cycle
-    }
-  }
-}
-
-// A work method used by the foreground collector to do
-// a mark-sweep-compact.
-void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
-  if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
-    gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
-      "collections passed to foreground collector", _full_gcs_since_conc_gc);
-  }
-
-  // Sample collection interval time and reset for collection pause.
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->msc_collection_begin();
-  }
-
-  // Temporarily widen the span of the weak reference processing to
-  // the entire heap.
-  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
-  ReferenceProcessorSpanMutator x(ref_processor(), new_span);
-
-  // Temporarily, clear the "is_alive_non_header" field of the
-  // reference processor.
-  ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
-
-  // Temporarily make reference _processing_ single threaded (non-MT).
-  ReferenceProcessorMTProcMutator z(ref_processor(), false);
-
-  // Temporarily make refs discovery atomic
-  ReferenceProcessorAtomicMutator w(ref_processor(), true);
-
-  ref_processor()->set_enqueuing_is_done(false);
-  ref_processor()->enable_discovery();
-  // If an asynchronous collection finishes, the _modUnionTable is
-  // all clear.  If we are assuming the collection from an asynchronous
-  // collection, clear the _modUnionTable.
-  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
-    "_modUnionTable should be clear if the baton was not passed");
-  _modUnionTable.clear_all();
-
-  // We must adjust the allocation statistics being maintained
-  // in the free list space. We do so by reading and clearing
-  // the sweep timer and updating the block flux rate estimates below.
-  assert(_sweep_timer.is_active(), "We should never see the timer inactive");
-  _sweep_timer.stop();
-  // Note that we do not use this sample to update the _sweep_estimate.
-  _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
-                                          _sweep_estimate.padded_average());
-  
-  GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
-    ref_processor(), clear_all_soft_refs);
-  #ifdef ASSERT
-    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
-    size_t free_size = cms_space->free();
-    assert(free_size ==
-           pointer_delta(cms_space->end(), cms_space->compaction_top())
-           * HeapWordSize,
-      "All the free space should be compacted into one chunk at top");
-    assert(cms_space->dictionary()->totalChunkSize(
-                                      debug_only(cms_space->freelistLock())) == 0 ||
-           cms_space->totalSizeInIndexedFreeLists() == 0,
-      "All the free space should be in a single chunk");
-    size_t num = cms_space->totalCount();
-    assert((free_size == 0 && num == 0) ||
-           (free_size > 0  && (num == 1 || num == 2)),
-         "There should be at most 2 free chunks after compaction");
-  #endif // ASSERT
-  _collectorState = Resetting;
-  assert(_restart_addr == NULL,
-         "Should have been NULL'd before baton was passed");
-  reset(false /* == !asynch */);
-  _cmsGen->reset_after_compaction();
-
-  if (verifying() && !CMSPermGenSweepingEnabled) {
-    perm_gen_verify_bit_map()->clear_all();
-  }
-
-  // Clear any data recorded in the PLAB chunk arrays.
-  if (_survivor_plab_array != NULL) {
-    reset_survivor_plab_arrays();
-  }
-
-  // Adjust the per-size allocation stats for the next epoch.
-  _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
-  // Restart the "sweep timer" for next epoch.
-  _sweep_timer.reset();
-  _sweep_timer.start();
-  
-  // Sample collection pause time and reset for collection interval.
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->msc_collection_end(gch->gc_cause());
-  }
-
-  // For a mark-sweep-compact, compute_new_size() will be called
-  // in the heap's do_collection() method.
-}
-
-// A work method used by the foreground collector to do
-// a mark-sweep, after taking over from a possibly on-going
-// concurrent mark-sweep collection.
-void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
-  CollectorState first_state, bool should_start_over) {
-  if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("Pass concurrent collection to foreground "
-      "collector with count %d",
-      _full_gcs_since_conc_gc);
-  }
-  switch (_collectorState) {
-    case Idling:
-      if (first_state == Idling || should_start_over) {
-        // The background GC was not active, or should
-        // restarted from scratch;  start the cycle.
-        _collectorState = InitialMarking;
-      }
-      // If first_state was not Idling, then a background GC
-      // was in progress and has now finished.  No need to do it
-      // again.  Leave the state as Idling.
-      break;
-    case Precleaning:
-      // In the foreground case don't do the precleaning since
-      // it is not done concurrently and there is extra work
-      // required.
-      _collectorState = FinalMarking;
-  }
-  if (PrintGCDetails &&
-      (_collectorState > Idling ||
-       !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
-    gclog_or_tty->print(" (concurrent mode failure)");
-  }
-  collect_in_foreground(clear_all_soft_refs);
-
-  // For a mark-sweep, compute_new_size() will be called
-  // in the heap's do_collection() method.
-}
-
-
-void CMSCollector::getFreelistLocks() const {
-  // Get locks for all free lists in all generations that this
-  // collector is responsible for
-  _cmsGen->freelistLock()->lock_without_safepoint_check();
-  _permGen->freelistLock()->lock_without_safepoint_check();
-}
-
-void CMSCollector::releaseFreelistLocks() const {
-  // Release locks for all free lists in all generations that this
-  // collector is responsible for
-  _cmsGen->freelistLock()->unlock();
-  _permGen->freelistLock()->unlock();
-}
-
-bool CMSCollector::haveFreelistLocks() const {
-  // Check locks for all free lists in all generations that this
-  // collector is responsible for
-  assert_lock_strong(_cmsGen->freelistLock());
-  assert_lock_strong(_permGen->freelistLock());
-  PRODUCT_ONLY(ShouldNotReachHere());
-  return true;
-}
-
-// A utility class that is used by the CMS collector to
-// temporarily "release" the foreground collector from its
-// usual obligation to wait for the background collector to
-// complete an ongoing phase before proceeding.
-class ReleaseForegroundGC: public StackObj {
- private:
-  CMSCollector* _c;
- public:
-  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
-    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
-    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    // allow a potentially blocked foreground collector to proceed
-    _c->_foregroundGCShouldWait = false;
-    if (_c->_foregroundGCIsActive) {
-      CGC_lock->notify();
-    }
-    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-           "Possible deadlock");
-  }
-
-  ~ReleaseForegroundGC() {
-    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
-    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    _c->_foregroundGCShouldWait = true;
-  }
-};
-
-// There are separate collect_in_background and collect_in_foreground because of
-// the different locking requirements of the background collector and the
-// foreground collector.  There was originally an attempt to share
-// one "collect" method between the background collector and the foreground
-// collector but the if-then-else required made it cleaner to have
-// separate methods.
-void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
-  assert(Thread::current()->is_ConcurrentGC_thread(),
-    "A CMS asynchronous collection is only allowed on a CMS thread.");
-    
-  GenCollectedHeap* gch = GenCollectedHeap::heap(); 
-  {
-    bool safepoint_check = Mutex::_no_safepoint_check_flag;
-    MutexLockerEx hl(Heap_lock, safepoint_check);
-    MutexLockerEx x(CGC_lock, safepoint_check);
-    if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
-      // The foreground collector is active or we're
-      // not using asynchronous collections.  Skip this
-      // background collection.
-      assert(!_foregroundGCShouldWait, "Should be clear");
-      return;
-    } else {
-      assert(_collectorState == Idling, "Should be idling before start.");
-      _collectorState = InitialMarking;
-      // Reset the expansion cause, now that we are about to begin
-      // a new cycle.
-      clear_expansion_cause();
-    }
-    _full_gc_requested = false;   // clear all outstanding requests
-    // Signal that we are about to start a collection
-    gch->increment_total_full_collections();  // ... starting a collection cycle
-    _collection_count_start = gch->total_full_collections();
-  }
-
-  // Used for PrintGC
-  size_t prev_used;
-  if (PrintGC && Verbose) {
-    prev_used = _cmsGen->used(); // XXXPERM
-  }
-
-  // The change of the collection state is normally done at this level;
-  // the exceptions are phases that are executed while the world is
-  // stopped.  For those phases the change of state is done while the
-  // world is stopped.  For baton passing purposes this allows the 
-  // background collector to finish the phase and change state atomically.
-  // The foreground collector cannot wait on a phase that is done
-  // while the world is stopped because the foreground collector already
-  // has the world stopped and would deadlock.
-  while (_collectorState != Idling) {
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 
-	Thread::current(), _collectorState);
-    }
-    // The foreground collector 
-    //   holds the Heap_lock throughout its collection.
-    //	 holds the CMS token (but not the lock)
-    //     except while it is waiting for the background collector to yield.
-    //
-    // The foreground collector should be blocked (not for long)
-    //   if the background collector is about to start a phase
-    //   executed with world stopped.  If the background
-    //   collector has already started such a phase, the
-    //   foreground collector is blocked waiting for the
-    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
-    //   are executed in the VM thread.
-    //
-    // The locking order is
-    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
-    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
-    //   CMS token  (claimed in
-    //                stop_world_and_do() -->
-    //                  safepoint_synchronize() -->
-    //                    CMSThread::synchronize())
-
-    {
-      // Check if the FG collector wants us to yield.
-      CMSTokenSync x(true); // is cms thread
-      if (waitForForegroundGC()) {
-        // We yielded to a foreground GC, nothing more to be
-        // done this round.
-        assert(_foregroundGCShouldWait == false, "We set it to false in "
-               "waitForForegroundGC()");
-        if (TraceCMSState) {
-          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
-            " exiting collection CMS state %d", 
-            Thread::current(), _collectorState);
-        }
-        return;
-      } else {
-        // The background collector can run but check to see if the
-        // foreground collector has done a collection while the
-        // background collector was waiting to get the CGC_lock
-        // above.  If yes, break so that _foregroundGCShouldWait
-        // is cleared before returning.
-        if (_collectorState == Idling) {
-          break;
-        }
-      }
-    }
-
-    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
-      "should be waiting");
-
-    switch (_collectorState) {
-      case InitialMarking:
-        {
-          ReleaseForegroundGC x(this);
-	  stats().record_cms_begin();
-
-          VM_CMS_Initial_Mark_Operation initial_mark_op(this);
-	  VMThread::execute(&initial_mark_op);
-        }
-	// The collector state may be any legal state at this point
-	// since the background collector may have yielded to the
-	// foreground collector.
-	break;
-      case Marking:
-	// initial marking in checkpointRootsInitialWork has been completed
-        if (markFromRoots(true)) { // we were successful
-	  assert(_collectorState == Precleaning, "Collector state should "
-	    "have changed");
-        } else {
-          assert(_foregroundGCIsActive, "Internal state inconsistency");
-        }
-	break;
-      case Precleaning:
-	if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_begin();
-	}
-	// marking from roots in markFromRoots has been completed
-	preclean();
-	if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_end();
-	}
-	assert(_collectorState == AbortablePreclean ||
-               _collectorState == FinalMarking,
-               "Collector state should have changed");
-	break;
-      case AbortablePreclean:
-	if (UseAdaptiveSizePolicy) {
-        size_policy()->concurrent_phases_resume();
-	}
-        abortable_preclean();
-	if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_end();
-	}
-        assert(_collectorState == FinalMarking, "Collector state should "
-          "have changed");
-        break;
-      case FinalMarking:
-        {
-          ReleaseForegroundGC x(this);
-
-          VM_CMS_Final_Remark_Operation final_remark_op(this);
-          VMThread::execute(&final_remark_op);
-	  }
-        assert(_foregroundGCShouldWait, "block post-condition");
-	break;
-      case Sweeping:
-	if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_sweeping_begin();
-	}
-	// final marking in checkpointRootsFinal has been completed
-        sweep(true);
-	assert(_collectorState == Resizing, "Collector state change "
-	  "to Resizing must be done under the free_list_lock");
-        _full_gcs_since_conc_gc = 0;
-
-        // Stop the timers for adaptive size policy for the concurrent phases
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_sweeping_end();
-          size_policy()->concurrent_phases_end(gch->gc_cause(),
-					     gch->prev_gen(_cmsGen)->capacity(),
-                                             _cmsGen->free());
-	}
-
-      case Resizing: {
-        // Sweeping has been completed...
-        // At this point the background collection has completed.
-        // Don't move the call to compute_new_size() down
-        // into code that might be executed if the background
-        // collection was preempted.
-        {
-          ReleaseForegroundGC x(this);   // unblock FG collection
-          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
-          CMSTokenSync        z(true);   // not strictly needed.
-          if (_collectorState == Resizing) {
-            compute_new_size();
-            _collectorState = Resetting;
-          } else {
-            assert(_collectorState == Idling, "The state should only change"
-                   " because the foreground collector has finished the collection");
-          }
-        }
-        break;
-      }
-      case Resetting:
-	// CMS heap resizing has been completed
-        reset(true);
-	assert(_collectorState == Idling, "Collector state should "
-	  "have changed");
-	stats().record_cms_end();
-	// Don't move the concurrent_phases_end() and compute_new_size()
-	// calls to here because a preempted background collection
-	// has it's state set to "Resetting".
-	break;
-      case Idling:
-      default:
-	ShouldNotReachHere();
-	break;
-    }
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("	Thread " INTPTR_FORMAT " done - next CMS state %d", 
-	Thread::current(), _collectorState);
-    }
-    assert(_foregroundGCShouldWait, "block post-condition");
-  }
-
-  // Should this be in gc_epilogue? 
-  collector_policy()->counters()->update_counters();
-
-  {
-    // Clear _foregroundGCShouldWait and, in the event that the
-    // foreground collector is waiting, notify it, before
-    // returning.
-    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    _foregroundGCShouldWait = false;
-    if (_foregroundGCIsActive) {
-      CGC_lock->notify();
-    }
-    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-           "Possible deadlock");
-  }
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
-      " exiting collection CMS state %d", 
-      Thread::current(), _collectorState);
-  }
-  if (PrintGC && Verbose) {
-    _cmsGen->print_heap_change(prev_used);
-  }
-}
-
-void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
-  assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
-         "Foreground collector should be waiting, not executing");
-  assert(Thread::current()->is_VM_thread(), "A foreground collection" 
-    "may only be done by the VM Thread with the world stopped");
-  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-         "VM thread should have CMS token");
-
-  NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, 
-    true, gclog_or_tty);)
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->ms_collection_begin();
-  }
-  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
-
-  HandleMark hm;  // Discard invalid handles created during verification
-
-  if (VerifyBeforeGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    Universe::verify(true);
-  }
-
-  bool init_mark_was_synchronous = false; // until proven otherwise
-  while (_collectorState != Idling) {
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 
-	Thread::current(), _collectorState);
-    }
-    switch (_collectorState) {
-      case InitialMarking:
-        init_mark_was_synchronous = true;  // fact to be exploited in re-mark
-        checkpointRootsInitial(false);
-	assert(_collectorState == Marking, "Collector state should have changed"
-	  " within checkpointRootsInitial()");
-	break;
-      case Marking:
-	// initial marking in checkpointRootsInitialWork has been completed
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before initial mark: ");
-          Universe::verify(true);
-        }
-        { 
-          bool res = markFromRoots(false);
-	  assert(res && _collectorState == FinalMarking, "Collector state should "
-	    "have changed");
-	  break;
-        }
-      case FinalMarking:
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before re-mark: ");
-          Universe::verify(true);
-        }
-        checkpointRootsFinal(false, clear_all_soft_refs,
-                             init_mark_was_synchronous);
-	assert(_collectorState == Sweeping, "Collector state should not "
-	  "have changed within checkpointRootsFinal()");
-	break;
-      case Sweeping:
-	// final marking in checkpointRootsFinal has been completed
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before sweep: ");
-          Universe::verify(true);
-        }
-        sweep(false);
-	assert(_collectorState == Resizing, "Incorrect state");
-	break;
-      case Resizing: {
-        // Sweeping has been completed; the actual resize in this case
-        // is done separately; nothing to be done in this state.
-        _collectorState = Resetting;
-        break;
-      }
-      case Resetting:
-	// The heap has been resized.
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before reset: ");
-          Universe::verify(true);
-        }
-        reset(false);
-	assert(_collectorState == Idling, "Collector state should "
-	  "have changed");
-	break;
-      case Precleaning:
-      case AbortablePreclean:
-        // Elide the preclean phase
-        _collectorState = FinalMarking;
-        break;
-      default:
-	ShouldNotReachHere();
-    }
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("	Thread " INTPTR_FORMAT " done - next CMS state %d", 
-	Thread::current(), _collectorState);
-    }
-  }
-
-  if (UseAdaptiveSizePolicy) {
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    size_policy()->ms_collection_end(gch->gc_cause());
-  }
-
-  if (VerifyAfterGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    Universe::verify(true);
-  }
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 
-      " exiting collection CMS state %d", 
-      Thread::current(), _collectorState);
-  }
-}
-
-bool CMSCollector::waitForForegroundGC() {
-  bool res = false;
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should have CMS token");
-  // Block the foreground collector until the
-  // background collectors decides whether to
-  // yield.
-  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  _foregroundGCShouldWait = true;
-  if (_foregroundGCIsActive) {
-    // The background collector yields to the
-    // foreground collector and returns a value
-    // indicating that it has yielded.  The foreground
-    // collector can proceed.
-    res = true;
-    _foregroundGCShouldWait = false;
-    ConcurrentMarkSweepThread::clear_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_has_token);
-    ConcurrentMarkSweepThread::set_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_wants_token);
-    // Get a possibly blocked foreground thread going
-    CGC_lock->notify();
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
-        Thread::current(), _collectorState);
-    }
-    while (_foregroundGCIsActive) {
-      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
-    }
-    ConcurrentMarkSweepThread::set_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_has_token);
-    ConcurrentMarkSweepThread::clear_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_wants_token);
-  }
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
-      Thread::current(), _collectorState);
-  }
-  return res;
-}
-
-// Because of the need to lock the free lists and other structures in
-// the collector, common to all the generations that the collector is
-// collecting, we need the gc_prologues of individual CMS generations
-// delegate to their collector. It may have been simpler had the
-// current infrastructure allowed one to call a prologue on a
-// collector. In the absence of that we have the generation's
-// prologue delegate to the collector, which delegates back
-// some "local" work to a worker method in the individual generations
-// that it's responsible for collecting, while itself doing any
-// work common to all generations it's responsible for. A similar
-// comment applies to the  gc_epilogue()'s.
-// The role of the varaible _between_prologue_and_epilogue is to
-// enforce the invocation protocol.
-void CMSCollector::gc_prologue(bool full) {
-  // Call gc_prologue_work() for each CMSGen and PermGen that
-  // we are responsible for.
-
-  // The following locking discipline assumes that we are only called
-  // when the world is stopped.
-  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
-
-  // The CMSCollector prologue must call the gc_prologues for the
-  // "generations" (including PermGen if any) that it's responsible
-  // for.
-
-  assert(   Thread::current()->is_VM_thread()
-         || (   CMSScavengeBeforeRemark
-             && Thread::current()->is_ConcurrentGC_thread()),
-         "Incorrect thread type for prologue execution");
-
-  if (_between_prologue_and_epilogue) {
-    // We have already been invoked; this is a gc_prologue delegation
-    // from yet another CMS generation that we are responsible for, just
-    // ignore it since all relevant work has already been done.
-    return;
-  }
-  
-  // set a bit saying prologue has been called; cleared in epilogue
-  _between_prologue_and_epilogue = true;
-  // Claim locks for common data structures, then call gc_prologue_work()
-  // for each CMSGen and PermGen that we are responsible for.
-
-  getFreelistLocks();   // gets free list locks on constituent spaces
-  bitMapLock()->lock_without_safepoint_check();
-
-  // Should call gc_prologue_work() for all cms gens we are responsible for
-  bool registerClosure =    _collectorState >= Marking
-                         && _collectorState < Sweeping;
-  ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
-                                               : &_modUnionClosure;
-  _cmsGen->gc_prologue_work(full, registerClosure, muc);
-  _permGen->gc_prologue_work(full, registerClosure, muc);
-
-  if (!full) {
-    stats().record_gc0_begin();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
-  // Delegate to CMScollector which knows how to coordinate between
-  // this and any other CMS generations that it is responsible for
-  // collecting.
-  collector()->gc_prologue(full);
-}
-
-// This is a "private" interface for use by this generation's CMSCollector.
-// Not to be called directly by any other entity (for instance,
-// GenCollectedHeap, which calls the "public" gc_prologue method above).
-void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
-  bool registerClosure, ModUnionClosure* modUnionClosure) {
-  assert(!incremental_collection_failed(), "Shouldn't be set yet");
-  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
-    "Should be NULL");
-  if (registerClosure) {
-    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
-  }
-  cmsSpace()->gc_prologue();
-  // Clear stat counters
-  NOT_PRODUCT(
-    assert(_numObjectsPromoted == 0, "check");
-    assert(_numWordsPromoted   == 0, "check");
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
-                          SIZE_FORMAT" bytes concurrently",
-      _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
-    }
-    _numObjectsAllocated = 0;
-    _numWordsAllocated   = 0;
-  )
-}
-
-void CMSCollector::gc_epilogue(bool full) {
-  // The following locking discipline assumes that we are only called
-  // when the world is stopped.
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world is stopped assumption");
-
-  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
-  // if linear allocation blocks need to be appropriately marked to allow the
-  // the blocks to be parsable. We also check here whether we need to nudge the
-  // CMS collector thread to start a new cycle (if it's not already active).
-  assert(   Thread::current()->is_VM_thread()
-         || (   CMSScavengeBeforeRemark
-             && Thread::current()->is_ConcurrentGC_thread()),
-         "Incorrect thread type for epilogue execution");
-  
-  if (!_between_prologue_and_epilogue) {
-    // We have already been invoked; this is a gc_epilogue delegation
-    // from yet another CMS generation that we are responsible for, just
-    // ignore it since all relevant work has already been done.
-    return;
-  }
-  assert(haveFreelistLocks(), "must have freelist locks");
-  assert_lock_strong(bitMapLock());
-
-  _cmsGen->gc_epilogue_work(full);
-  _permGen->gc_epilogue_work(full);
-
-  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
-    // in case sampling was not already enabled, enable it
-    _start_sampling = true;
-  }
-  // reset _eden_chunk_array so sampling starts afresh
-  _eden_chunk_index = 0;
-
-  size_t cms_used   = _cmsGen->cmsSpace()->used();
-  size_t perm_used  = _permGen->cmsSpace()->used();
-
-  // update performance counters - this uses a special version of
-  // update_counters() that allows the utilization to be passed as a
-  // parameter, avoiding multiple calls to used().
-  //
-  _cmsGen->update_counters(cms_used);
-  _permGen->update_counters(perm_used);
-
-  if (CMSIncrementalMode) {
-    icms_update_allocation_limits();
-  }
-
-  bitMapLock()->unlock();
-  releaseFreelistLocks();
-
-  _between_prologue_and_epilogue = false;  // ready for next cycle
-}
-
-void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
-  collector()->gc_epilogue(full);
-
-  // Also reset promotion tracking in par gc thread states.
-  if (ParallelGCThreads > 0) {
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _par_gc_thread_states[i]->promo.stopTrackingPromotions();
-    }
-  }
-}
-
-void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
-  assert(!incremental_collection_failed(), "Should have been cleared");
-  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
-  cmsSpace()->gc_epilogue();
-    // Print stat counters
-  NOT_PRODUCT(
-    assert(_numObjectsAllocated == 0, "check");
-    assert(_numWordsAllocated == 0, "check");
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
-                          SIZE_FORMAT" bytes",
-                 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
-    }
-    _numObjectsPromoted = 0;
-    _numWordsPromoted   = 0;
-  )
-
-  if (PrintGC && Verbose) {
-    // Call down the chain in contiguous_available needs the freelistLock
-    // so print this out before releasing the freeListLock.
-    gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
-                        contiguous_available());
-  }
-}
-
-#ifndef PRODUCT
-bool CMSCollector::have_cms_token() {
-  Thread* thr = Thread::current();
-  if (thr->is_VM_thread()) {
-    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
-  } else if (thr->is_ConcurrentGC_thread()) {
-    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
-  } else if (thr->is_GC_task_thread()) {
-    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
-           ParGCRareEvent_lock->owned_by_self();
-  }
-  return false;
-}
-#endif
-
-// Check reachability of the given heap address in CMS generation,
-// treating all other generations as roots.
-bool CMSCollector::is_cms_reachable(HeapWord* addr) {
-  // We could "guarantee" below, rather than assert, but i'll
-  // leave these as "asserts" so that an adventurous debugger
-  // could try this in the product build provided some subset of
-  // the conditions were met, provided they were intersted in the
-  // results and knew that the computation below wouldn't interfere
-  // with other concurrent computations mutating the structures
-  // being read or written.
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "Else mutations in object graph will make answer suspect");
-  assert(have_cms_token(), "Should hold cms token");
-  assert(haveFreelistLocks(), "must hold free list locks");
-  assert_lock_strong(bitMapLock());
-
-  // Clear the marking bit map array before starting, but, just
-  // for kicks, first report if the given address is already marked
-  gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
-                _markBitMap.isMarked(addr) ? "" : " not");
-
-  if (verify_after_remark()) {
-    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
-    bool result = verification_mark_bm()->isMarked(addr);
-    gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
-                           result ? "IS" : "is NOT");
-    return result;
-  } else {
-    gclog_or_tty->print_cr("Could not compute result");
-    return false;
-  }
-}
-
-////////////////////////////////////////////////////////
-// CMS Verification Support
-////////////////////////////////////////////////////////
-// Following the remark phase, the following invariant
-// should hold -- each object in the CMS heap which is
-// marked in markBitMap() should be marked in the verification_mark_bm().
-
-class VerifyMarkedClosure: public BitMapClosure {
-  CMSBitMap* _marks;
-  bool       _failed;
-
- public:
-  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
-
-  void do_bit(size_t offset) {
-    HeapWord* addr = _marks->offsetToHeapWord(offset);
-    if (!_marks->isMarked(addr)) {
-      oop(addr)->print();
-      gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
-      _failed = true;
-    }
-  }
-
-  bool failed() { return _failed; }
-};
-
-bool CMSCollector::verify_after_remark() {
-  gclog_or_tty->print(" [Verifying CMS Marking... ");
-  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
-  static bool init = false;
-
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "Else mutations in object graph will make answer suspect");
-  assert(have_cms_token(),
-         "Else there may be mutual interference in use of "
-         " verification data structures");
-  assert(_collectorState > Marking && _collectorState <= Sweeping,
-         "Else marking info checked here may be obsolete");
-  assert(haveFreelistLocks(), "must hold free list locks");
-  assert_lock_strong(bitMapLock());
-
-
-  // Allocate marking bit map if not already allocated
-  if (!init) { // first time
-    if (!verification_mark_bm()->allocate(_span)) {
-      return false;
-    }
-    init = true;
-  }
-
-  assert(verification_mark_stack()->isEmpty(), "Should be empty");
-
-  // Turn off refs discovery -- so we will be tracing through refs.
-  // This is as intended, because by this time
-  // GC must already have cleared any refs that need to be cleared,
-  // and traced those that need to be marked; moreover,
-  // the marking done here is not going to intefere in any
-  // way with the marking information used by GC.
-  NoRefDiscovery no_discovery(ref_processor());
-
-  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
-
-  // Clear any marks from a previous round
-  verification_mark_bm()->clear_all();
-  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
-  assert(overflow_list_is_empty(), "overflow list should be empty");
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
-  // Update the saved marks which may affect the root scans.
-  gch->save_marks();
-  
-  if (CMSRemarkVerifyVariant == 1) {
-    // In this first variant of verification, we complete
-    // all marking, then check if the new marks-verctor is
-    // a subset of the CMS marks-vector.
-    verify_after_remark_work_1();
-  } else if (CMSRemarkVerifyVariant == 2) {
-    // In this second variant of verification, we flag an error
-    // (i.e. an object reachable in the new marks-vector not reachable
-    // in the CMS marks-vector) immediately, also indicating the
-    // identify of an object (A) that references the unmarked object (B) --
-    // presumably, a mutation to A failed to be picked up by preclean/remark?
-    verify_after_remark_work_2();
-  } else {
-    warning("Unrecognized value %d for CMSRemarkVerifyVariant",
-            CMSRemarkVerifyVariant);
-  }
-  gclog_or_tty->print(" done] ");
-  return true;
-}
-
-void CMSCollector::verify_after_remark_work_1() {
-  ResourceMark rm;
-  HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-
-  // Mark from roots one level into CMS
-  MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-  
-  gch->gen_process_strong_roots(_cmsGen->level(),
-                                true,   // younger gens are roots
-                                true,   // collecting perm gen
-                                SharedHeap::ScanningOption(roots_scanning_options()),
-                                NULL, &notOlder);
-
-  // Now mark from the roots
-  assert(_revisitStack.isEmpty(), "Should be empty");
-  MarkFromRootsClosure markFromRootsClosure(this, _span,
-    verification_mark_bm(), verification_mark_stack(), &_revisitStack,
-    false /* don't yield */, true /* verifying */);
-  assert(_restart_addr == NULL, "Expected pre-condition");
-  verification_mark_bm()->iterate(&markFromRootsClosure);
-  while (_restart_addr != NULL) {
-    // Deal with stack overflow: by restarting at the indicated
-    // address.
-    HeapWord* ra = _restart_addr;
-    markFromRootsClosure.reset(ra);
-    _restart_addr = NULL;
-    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
-  }
-  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
-  // Should reset the revisit stack above, since no class tree
-  // surgery is forthcoming.
-  _revisitStack.reset(); // throwing away all contents
-
-  // Marking completed -- now verify that each bit marked in
-  // verification_mark_bm() is also marked in markBitMap(); flag all
-  // errors by printing corresponding objects.
-  VerifyMarkedClosure vcl(markBitMap());
-  verification_mark_bm()->iterate(&vcl);
-  if (vcl.failed()) {
-    gclog_or_tty->print("Verification failed");
-    Universe::heap()->print();
-    fatal(" ... aborting");
-  }
-}
-
-void CMSCollector::verify_after_remark_work_2() {
-  ResourceMark rm;
-  HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-
-  // Mark from roots one level into CMS
-  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
-                                     markBitMap(), true /* nmethods */);
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-  gch->gen_process_strong_roots(_cmsGen->level(),
-				true,   // younger gens are roots
-				true,   // collecting perm gen
-                                SharedHeap::ScanningOption(roots_scanning_options()),
-				NULL, &notOlder);
-
-  // Now mark from the roots
-  assert(_revisitStack.isEmpty(), "Should be empty");
-  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
-    verification_mark_bm(), markBitMap(), verification_mark_stack());
-  assert(_restart_addr == NULL, "Expected pre-condition");
-  verification_mark_bm()->iterate(&markFromRootsClosure);
-  while (_restart_addr != NULL) {
-    // Deal with stack overflow: by restarting at the indicated
-    // address.
-    HeapWord* ra = _restart_addr;
-    markFromRootsClosure.reset(ra);
-    _restart_addr = NULL;
-    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
-  }
-  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
-  // Should reset the revisit stack above, since no class tree
-  // surgery is forthcoming.
-  _revisitStack.reset(); // throwing away all contents
-
-  // Marking completed -- now verify that each bit marked in
-  // verification_mark_bm() is also marked in markBitMap(); flag all
-  // errors by printing corresponding objects.
-  VerifyMarkedClosure vcl(markBitMap());
-  verification_mark_bm()->iterate(&vcl);
-  assert(!vcl.failed(), "Else verification above should not have succeeded");
-}
-
-void ConcurrentMarkSweepGeneration::save_marks() {
-  // delegate to CMS space
-  cmsSpace()->save_marks();
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _par_gc_thread_states[i]->promo.startTrackingPromotions();
-  }
-}
-
-bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
-  return cmsSpace()->no_allocs_since_save_marks();
-}
-
-#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
-                                                                \
-void ConcurrentMarkSweepGeneration::                            \
-oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
-  cl->set_generation(this);                                     \
-  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
-  cl->reset_generation();                                       \
-  save_marks();                                                 \
-}
-
-ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
-
-void
-ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
-{
-  // Not currently implemented; need to do the following. -- ysr.
-  // dld -- I think that is used for some sort of allocation profiler.  So it
-  // really means the objects allocated by the mutator since the last
-  // GC.  We could potentially implement this cheaply by recording only
-  // the direct allocations in a side data structure.
-  //
-  // I think we probably ought not to be required to support these
-  // iterations at any arbitrary point; I think there ought to be some
-  // call to enable/disable allocation profiling in a generation/space,
-  // and the iterator ought to return the objects allocated in the
-  // gen/space since the enable call, or the last iterator call (which
-  // will probably be at a GC.)  That way, for gens like CM&S that would
-  // require some extra data structure to support this, we only pay the
-  // cost when it's in use...
-  cmsSpace()->object_iterate_since_last_GC(blk);
-}
-
-void
-ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
-  cl->set_generation(this);
-  younger_refs_in_space_iterate(_cmsSpace, cl);
-  cl->reset_generation();
-}
-
-void
-ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
-  if (freelistLock()->owned_by_self()) {
-    Generation::oop_iterate(mr, cl);
-  } else {
-    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
-    Generation::oop_iterate(mr, cl);
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
-  if (freelistLock()->owned_by_self()) {
-    Generation::oop_iterate(cl);
-  } else {
-    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
-    Generation::oop_iterate(cl);
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
-  if (freelistLock()->owned_by_self()) {
-    Generation::object_iterate(cl);
-  } else {
-    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
-    Generation::object_iterate(cl);
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
-}
-
-void
-ConcurrentMarkSweepGeneration::post_compact() {
-}
-
-void
-ConcurrentMarkSweepGeneration::prepare_for_verify() {
-  // Fix the linear allocation blocks to look like free blocks.
-
-  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
-  // are not called when the heap is verified during universe initialization and
-  // at vm shutdown.
-  if (freelistLock()->owned_by_self()) {
-    cmsSpace()->prepare_for_verify();
-  } else {
-    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
-    cmsSpace()->prepare_for_verify();
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
-  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
-  // are not called when the heap is verified during universe initialization and
-  // at vm shutdown.
-  if (freelistLock()->owned_by_self()) {
-    cmsSpace()->verify(false /* ignored */);
-  } else {
-    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
-    cmsSpace()->verify(false /* ignored */);
-  }
-}
-
-void CMSCollector::verify(bool allow_dirty /* ignored */) {
-  _cmsGen->verify(allow_dirty);
-  _permGen->verify(allow_dirty);
-}
-
-void CMSCollector::reset_cms_verification_state() {
-  const bool flags = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit;
-  const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
-
-  // We were not verifying, but some verification options got enabled.
-  if (!verifying() && flags) {
-    if (!CMSPermGenSweepingEnabled) {
-      // perm gen verification bitmap could have been previously allocated. 
-      // CMSBitMap::sizeInBits() is used to determine if it's allocated.
-      if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
-        if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
-          warning("Failed to allocate permanent generation verification CMS Bit Map,\n"
-                  "permanent generation verification disabled");
-          return;
-        }
-        assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
-                "_perm_gen_ver_bit_map inconsistency?");
-      } else {
-        perm_gen_verify_bit_map()->clear_all();
-      }
-      // Include symbols, strings and code cache elements to prevent their resurrection.
-      add_root_scanning_option(rso);
-    }
-    set_verifying(true);
-  } else {
-      if (verifying() && !flags) { // We were verifying, but some verification flags got disabled.
-        set_verifying(false);
-        if (!CMSPermGenSweepingEnabled) {
-          // Exclude symbols, strings and code cache elements from root scanning to reduce
-          // IM and RM pauses.
-          remove_root_scanning_option(rso);
-        }
-      }
-  }
-}
-
-
-#ifndef PRODUCT
-HeapWord* CMSCollector::block_start(const void* p) const {
-  const HeapWord* addr = (HeapWord*)p;
-  if (_span.contains(p)) {
-    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
-      return _cmsGen->cmsSpace()->block_start(p);
-    } else {
-      assert(_permGen->cmsSpace()->is_in_reserved(addr),
-	     "Inconsistent _span?");
-      return _permGen->cmsSpace()->block_start(p);
-    }
-  }
-  return NULL;
-}
-#endif
-
-HeapWord*
-ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
-                                                   bool   tlab,
-						   bool   parallel) {
-  assert(!tlab, "Can't deal with TLAB allocation");
-  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
-  expand(word_size*HeapWordSize, MinHeapDeltaBytes,
-    CMSExpansionCause::_satisfy_allocation);
-  if (GCExpandToAllocateDelayMillis > 0) {
-    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
-  }
-  size_t adj_word_sz = CompactibleFreeListSpace::adjustObjectSize(word_size);
-  if (parallel) {
-    return cmsSpace()->par_allocate(adj_word_sz);
-  } else {
-    return cmsSpace()->allocate(adj_word_sz);
-  }
-}
-
-// YSR: All of this generation expansion/shrinking stuff is an exact copy of
-// OneContigSpaceCardGeneration, which makes me wonder if we should move this
-// to CardGeneration and share it...
-void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
-  CMSExpansionCause::Cause cause)
-{
-  assert_locked_or_safepoint(Heap_lock);
-
-  size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
-  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
-  bool success = false;
-  if (aligned_expand_bytes > aligned_bytes) {
-    success = grow_by(aligned_expand_bytes);
-  }
-  if (!success) {
-    success = grow_by(aligned_bytes);
-  }
-  if (!success) {
-    size_t remaining_bytes = _virtual_space.uncommitted_size();
-    if (remaining_bytes > 0) {
-      success = grow_by(remaining_bytes);
-    }
-  }
-  if (GC_locker::is_active()) {
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
-    }
-  }
-  // remember why we expanded; this information is used
-  // by shouldConcurrentCollect() when making decisions on whether to start
-  // a new CMS cycle.
-  if (success) {
-    set_expansion_cause(cause);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("Expanded CMS gen for %s", 
-	CMSExpansionCause::to_string(cause));
-    }
-  }
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
-  HeapWord* res = NULL;
-  MutexLocker x(ParGCRareEvent_lock);
-  while (true) {
-    // Expansion by some other thread might make alloc OK now:
-    res = ps->lab.alloc(word_sz);
-    if (res != NULL) return res;
-    // If there's not enough expansion space available, give up.
-    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
-      return NULL;
-    }
-    // Otherwise, we try expansion.
-    expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
-      CMSExpansionCause::_allocate_par_lab);
-    // Now go around the loop and try alloc again;
-    // A competing par_promote might beat us to the expansion space,
-    // so we may go around the loop again if promotion fails agaion.
-    if (GCExpandToAllocateDelayMillis > 0) {
-      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
-    }
-  }
-}
-
-
-bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
-  PromotionInfo* promo) {
-  MutexLocker x(ParGCRareEvent_lock);
-  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
-  while (true) {
-    // Expansion by some other thread might make alloc OK now:
-    if (promo->ensure_spooling_space()) {
-      assert(promo->has_spooling_space(),
-             "Post-condition of successful ensure_spooling_space()");
-      return true;
-    }
-    // If there's not enough expansion space available, give up.
-    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
-      return false;
-    }
-    // Otherwise, we try expansion.
-    expand(refill_size_bytes, MinHeapDeltaBytes,
-      CMSExpansionCause::_allocate_par_spooling_space);
-    // Now go around the loop and try alloc again;
-    // A competing allocation might beat us to the expansion space,
-    // so we may go around the loop again if allocation fails again.
-    if (GCExpandToAllocateDelayMillis > 0) {
-      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
-    }
-  }
-}
-
-
-
-void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  size_t size = ReservedSpace::page_align_size_down(bytes);
-  if (size > 0) {
-    shrink_by(size);
-  }
-}
-
-bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  bool result = _virtual_space.expand_by(bytes);
-  if (result) {
-    HeapWord* old_end = _cmsSpace->end();
-    size_t new_word_size = 
-      heap_word_size(_virtual_space.committed_size());
-    MemRegion mr(_cmsSpace->bottom(), new_word_size);
-    _bts->resize(new_word_size);  // resize the block offset shared array
-    Universe::heap()->barrier_set()->resize_covered_region(mr);
-    // Hmmmm... why doesn't CFLS::set_end verify locking?
-    // This is quite ugly; FIX ME XXX
-    _cmsSpace->assert_locked();
-    _cmsSpace->set_end((HeapWord*)_virtual_space.high());
-
-    // update the space and generation capacity counters
-    if (UsePerfData) {
-      _space_counters->update_capacity();
-      _gen_counters->update_all();
-    }
-
-    if (Verbose && PrintGC) {
-      size_t new_mem_size = _virtual_space.committed_size();
-      size_t old_mem_size = new_mem_size - bytes;
-      gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
-                    name(), old_mem_size/K, bytes/K, new_mem_size/K);
-    }
-  }
-  return result;
-}
-
-bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
-  assert_locked_or_safepoint(Heap_lock);
-  bool success = true;
-  const size_t remaining_bytes = _virtual_space.uncommitted_size();
-  if (remaining_bytes > 0) {
-    success = grow_by(remaining_bytes);
-    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
-  }
-  return success;
-}
-
-void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  assert_lock_strong(freelistLock());
-  // XXX Fix when compaction is implemented.
-  warning("Shrinking of CMS not yet implemented");
-  return;
-}
-
-
-// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
-// phases.
-class CMSPhaseAccounting: public StackObj {
- public:
-  CMSPhaseAccounting(CMSCollector *collector, 
-		     const char *phase, 
-		     bool print_cr = true);
-  ~CMSPhaseAccounting();
-
- private:
-  CMSCollector *_collector;
-  const char *_phase;
-  elapsedTimer _wallclock;
-  bool _print_cr;
-
- public:
-  // Not MT-safe; so do not pass around these StackObj's
-  // where they may be accessed by other threads.
-  jlong wallclock_millis() {
-    assert(_wallclock.is_active(), "Wall clock should not stop");
-    _wallclock.stop();  // to record time
-    jlong ret = _wallclock.milliseconds();
-    _wallclock.start(); // restart
-    return ret;
-  }
-};
-
-CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
-				       const char *phase,
-				       bool print_cr) :
-  _collector(collector), _phase(phase), _print_cr(print_cr) {
-
-  if (PrintCMSStatistics != 0) {
-    _collector->resetYields();
-  }
-  if (PrintGCDetails && PrintGCTimeStamps) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr(": [%s-concurrent-%s-start]", 
-      _collector->cmsGen()->short_name(), _phase);
-  }
-  _collector->resetTimer();
-  _wallclock.start();
-  _collector->startTimer();
-}
-
-CMSPhaseAccounting::~CMSPhaseAccounting() {
-  assert(_wallclock.is_active(), "Wall clock should not have stopped");
-  _collector->stopTimer();
-  _wallclock.stop();
-  if (PrintGCDetails) {
-    if (PrintGCTimeStamps) {
-      gclog_or_tty->stamp();
-      gclog_or_tty->print(": ");
-    }
-    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", 
-		 _collector->cmsGen()->short_name(),
-		 _phase, _collector->timerValue(), _wallclock.seconds());
-    if (_print_cr) {
-      gclog_or_tty->print_cr("");
-    }
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
-		    _collector->yields());
-    }
-  }
-}
-
-// CMS work
-
-// Checkpoint the roots into this generation from outside
-// this generation. [Note this initial checkpoint need only
-// be approximate -- we'll do a catch up phase subsequently.]
-void CMSCollector::checkpointRootsInitial(bool asynch) {
-  assert(_collectorState == InitialMarking, "Wrong collector state");
-  check_correct_thread_executing();
-  ReferenceProcessor* rp = ref_processor();
-  SpecializationStats::clear();
-  assert(_restart_addr == NULL, "Control point invariant");
-  if (asynch) {
-    // acquire locks for subsequent manipulations
-    MutexLockerEx x(bitMapLock(),
-                    Mutex::_no_safepoint_check_flag);
-    checkpointRootsInitialWork(asynch);
-    rp->verify_no_references_recorded();
-    rp->enable_discovery(); // enable ("weak") refs discovery
-    _collectorState = Marking;
-  } else {
-    // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
-    // which recognizes if we are a CMS generation, and doesn't try to turn on
-    // discovery; verify that they aren't meddling.
-    assert(!rp->discovery_is_atomic(),
-           "incorrect setting of discovery predicate");
-    assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
-           "ref discovery for this generation kind");
-    // already have locks
-    checkpointRootsInitialWork(asynch);
-    rp->enable_discovery(); // now enable ("weak") refs discovery
-    _collectorState = Marking;
-  }
-  SpecializationStats::print();
-}
-
-void CMSCollector::checkpointRootsInitialWork(bool asynch) {
-  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
-  assert(_collectorState == InitialMarking, "just checking");
-
-  // If there has not been a GC[n-1] since last GC[n] cycle completed,
-  // precede our marking with a collection of all
-  // younger generations to keep floating garbage to a minimum.
-  // XXX: we won't do this for now -- it's an optimization to be done later.
-
-  // already have locks
-  assert_lock_strong(bitMapLock());
-  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
-
-  // Fix the verification state.
-  reset_cms_verification_state();
-
-  NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", 
-    PrintGCDetails && Verbose, true, gclog_or_tty);)
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_initial_begin();
-  }
-
-  // Reset all the PLAB chunk arrays if necessary.
-  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
-    reset_survivor_plab_arrays();
-  }
-
-  ResourceMark rm;
-  HandleMark  hm;
-
-  FalseClosure falseClosure;
-  // In the case of a synchronous collection, we will elide the
-  // remark step, so it's important to catch all the nmethod oops
-  // in this step; hence the last argument to the constrcutor below.
-  MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-
-  assert(_markStack.isEmpty(), "markStack should be empty");
-  assert(overflow_list_is_empty(), "overflow list should be empty");
-  assert(no_preserved_marks(), "no preserved marks");
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
-  // Update the saved marks which may affect the root scans.
-  gch->save_marks();
-
-  // weak reference processing has not started yet.
-  ref_processor()->set_enqueuing_is_done(false);
-
-  {
-    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
-    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-    gch->gen_process_strong_roots(_cmsGen->level(),
-				  true,   // younger gens are roots
-				  true,   // collecting perm gen
-                                  SharedHeap::ScanningOption(roots_scanning_options()),
-				  NULL, &notOlder);
-  }
-
-  // Clear mod-union table; it will be dirtied in the prologue of
-  // CMS generation per each younger generation collection.
-
-  assert(_modUnionTable.isAllClear(),
-       "Was cleared in most recent final checkpoint phase"
-       " or no bits are set in the gc_prologue before the start of the next "
-       "subsequent marking phase.");
-
-  // Temporarily disabled, since pre/post-consumption closures don't
-  // care about precleaned cards
-  #if 0
-  {
-    MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
-			     (HeapWord*)_virtual_space.high());
-    _ct->ct_bs()->preclean_dirty_cards(mr);
-  }
-  #endif
-
-  // Save the end of the used_region of the constituent generations
-  // to be used to limit the extent of sweep in each generation.
-  save_sweep_limits();
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
-  }
-}
-
-bool CMSCollector::markFromRoots(bool asynch) {
-  // we might be tempted to assert that:
-  // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
-  //        "inconsistent argument?");
-  // However that wouldn't be right, because it's possible that
-  // a safepoint is indeed in progress as a younger generation
-  // stop-the-world GC happens even as we mark in this generation.
-  assert(_collectorState == Marking, "inconsistent state?");
-  check_correct_thread_executing();
-
-  bool res;
-  if (asynch) {
-
-    // Start the timers for adaptive size policy for the concurrent phases
-    // Do it here so that the foreground MS can use the concurrent
-    // timer since a foreground MS might has the sweep done concurrently
-    // or STW.
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->concurrent_marking_begin();
-    }
-
-    // Weak ref discovery note: We may be discovering weak
-    // refs in this generation concurrent (but interleaved) with
-    // weak ref discovery by a younger generation collector.
-
-    CMSTokenSyncWithLocks ts(true, bitMapLock());
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
-    res = markFromRootsWork(asynch);
-    if (res) {
-      _collectorState = Precleaning;
-    } else { // We failed and a foreground collection wants to take over
-      assert(_foregroundGCIsActive, "internal state inconsistency");
-      assert(_restart_addr == NULL,  "foreground will restart from scratch");
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("bailing out to foreground collection");
-      }
-    }
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->concurrent_marking_end();
-    }
-  } else {
-    assert(SafepointSynchronize::is_at_safepoint(),
-           "inconsistent with asynch == false");
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->ms_collection_marking_begin();
-    }
-    // already have locks
-    res = markFromRootsWork(asynch);
-    _collectorState = FinalMarking;
-    if (UseAdaptiveSizePolicy) {
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
-      size_policy()->ms_collection_marking_end(gch->gc_cause());
-    }
-  }
-  return res;
-}
-
-bool CMSCollector::markFromRootsWork(bool asynch) {
-  // iterate over marked bits in bit map, doing a full scan and mark
-  // from these roots using the following algorithm:
-  // . if oop is to the right of the current scan pointer,
-  //   mark corresponding bit (we'll process it later)
-  // . else (oop is to left of current scan pointer)
-  //   push oop on marking stack
-  // . drain the marking stack
-
-  // Note that when we do a marking step we need to hold the
-  // bit map lock -- recall that direct allocation (by mutators)
-  // and promotion (by younger generation collectors) is also
-  // marking the bit map. [the so-called allocate live policy.]
-  // Because the implementation of bit map marking is not
-  // robust wrt simultaneous marking of bits in the same word,
-  // we need to make sure that there is no such interference
-  // between concurrent such updates.
-
-  // already have locks
-  assert_lock_strong(bitMapLock());
-
-  // Clear the revisit stack, just in case there are any
-  // obsolete contents from a short-circuited previous CMS cycle.
-  _revisitStack.reset();
-  assert(_revisitStack.isEmpty(), "tabula rasa");
-  assert(_markStack.isEmpty(),    "tabula rasa");
-  assert(overflow_list_is_empty(), "tabula rasa");
-  assert(no_preserved_marks(), "no preserved marks");
-
-  bool result = false;
-  if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
-    result = do_marking_mt(asynch);
-  } else {
-    result = do_marking_st(asynch);
-  }
-  return result;
-}
-
-// Forward decl
-class CMSConcMarkingTask;
-
-class CMSConcMarkingTerminator: public ParallelTaskTerminator {
-  CMSCollector*       _collector;
-  CMSConcMarkingTask* _task;
-  bool _yield;
- protected:
-  virtual void yield();
- public:
-  // "n_threads" is the number of threads to be terminated.
-  // "queue_set" is a set of work queues of other threads.
-  // "collector" is the CMS collector associated with this task terminator.
-  // "yield" indicates whether we need the gang as a whole to yield.
-  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
-                           CMSCollector* collector, bool yield) :
-    ParallelTaskTerminator(n_threads, queue_set),
-    _collector(collector),
-    _yield(yield) { }
- 
-  void set_task(CMSConcMarkingTask* task) {
-    _task = task;
-  }
-};
-
-// MT Concurrent Marking Task
-class CMSConcMarkingTask: public YieldingFlexibleGangTask {
-  CMSCollector* _collector;
-  YieldingFlexibleWorkGang* _workers;        // the whole gang
-  int           _n_workers;                  // requested/desired # workers
-  bool          _asynch;
-  bool          _result;
-  CompactibleFreeListSpace*  _cms_space;
-  CompactibleFreeListSpace* _perm_space;
-  HeapWord*     _global_finger;
-
-  //  Exposed here for yielding support
-  Mutex* const _bit_map_lock;
-
-  // The per thread work queues, available here for stealing
-  OopTaskQueueSet*  _task_queues;
-  CMSConcMarkingTerminator _term;
-
- public:
-  CMSConcMarkingTask(CMSCollector* collector, 
-                 CompactibleFreeListSpace* cms_space,
-                 CompactibleFreeListSpace* perm_space,
-                 bool asynch, int n_workers,
-                 YieldingFlexibleWorkGang* workers,
-                 OopTaskQueueSet* task_queues):
-    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
-    _collector(collector),
-    _cms_space(cms_space),
-    _perm_space(perm_space),
-    _asynch(asynch), _n_workers(n_workers), _result(true),
-    _workers(workers), _task_queues(task_queues),
-    _term(n_workers, task_queues, _collector, asynch),
-    _bit_map_lock(collector->bitMapLock())
-  {
-    assert(n_workers <= workers->total_workers(),
-           "Else termination won't work correctly today"); // XXX FIX ME!
-    _requested_size = n_workers;
-    _term.set_task(this);
-    assert(_cms_space->bottom() < _perm_space->bottom(),
-           "Finger incorrectly initialized below");
-    _global_finger = _cms_space->bottom();
-  }
-
-
-  OopTaskQueueSet* task_queues()  { return _task_queues; }
-
-  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
-  HeapWord** global_finger_addr() { return &_global_finger; }
-
-  CMSConcMarkingTerminator* terminator() { return &_term; }
-
-  void work(int i);
-    
-  virtual void coordinator_yield();  // stuff done by coordinator
-  bool result() { return _result; }
-
-  void reset(HeapWord* ra) {
-    _term.reset_for_reuse();
-  }
-
-  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
-                                           OopTaskQueue* work_q);
-
- private:
-  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
-  void do_work_steal(int i);
-  void bump_global_finger(HeapWord* f);
-};
-
-void CMSConcMarkingTerminator::yield() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    _task->yield();
-  } else {
-    ParallelTaskTerminator::yield();
-  }
-}
-
-////////////////////////////////////////////////////////////////
-// Concurrent Marking Algorithm Sketch
-////////////////////////////////////////////////////////////////
-// Until all tasks exhausted (both spaces):
-// -- claim next available chunk
-// -- bump global finger via CAS
-// -- find first object that starts in this chunk
-//    and start scanning bitmap from that position
-// -- scan marked objects for oops
-// -- CAS-mark target, and if successful:
-//    . if target oop is above global finger (volatile read)
-//      nothing to do
-//    . if target oop is in chunk and above local finger
-//        then nothing to do
-//    . else push on work-queue
-// -- Deal with possible overflow issues:
-//    . local work-queue overflow causes stuff to be pushed on
-//      global (common) overflow queue
-//    . always first empty local work queue
-//    . then get a batch of oops from global work queue if any
-//    . then do work stealing
-// -- When all tasks claimed (both spaces)
-//    and local work queue empty, 
-//    then in a loop do:
-//    . check global overflow stack; steal a batch of oops and trace
-//    . try to steal from other threads oif GOS is empty
-//    . if neither is available, offer termination
-// -- Terminate and return result
-//
-void CMSConcMarkingTask::work(int i) {
-  elapsedTimer _timer;
-  ResourceMark rm;
-  HandleMark hm;
-
-  // Before we begin work, our work queue should be empty
-  assert(work_queue(i)->size() == 0, "Expected to be empty");
-  // Scan the bitmap covering _cms_space, tracing through grey objects.
-  _timer.start();
-  do_scan_and_mark(i, _cms_space);
-  _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
-      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
-  }
-
-  // ... do the same for the _perm_space
-  _timer.reset();
-  _timer.start();
-  do_scan_and_mark(i, _perm_space);
-  _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
-      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
-  }
-
-  // ... do work stealing
-  _timer.reset();
-  _timer.start();
-  do_work_steal(i);
-  _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
-      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
-  }
-  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
-  assert(work_queue(i)->size() == 0, "Should have been emptied");
-  // Note that under the current task protocol, the
-  // following assertion is true even of the spaces
-  // expanded since the completion of the concurrent
-  // marking. XXX This will likely change under a strict
-  // ABORT semantics.
-  assert(_global_finger >  _cms_space->end() &&
-         _global_finger >= _perm_space->end(),
-         "All tasks have been completed");
-}
-
-void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
-  HeapWord* read = _global_finger;
-  HeapWord* cur  = read;
-  while (f > read) {
-    cur = read;
-    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
-    if (cur == read) {
-      // our cas succeeded
-      assert(_global_finger >= f, "protocol consistency");
-      break;
-    }
-  }
-}
-
-// This is really inefficient, and should be redone by
-// using (not yet available) block-read and -write interfaces to the
-// stack and the work_queue. XXX FIX ME !!!
-bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
-                                                      OopTaskQueue* work_q) {
-  // Fast lock-free check
-  if (ovflw_stk->length() == 0) {
-    return false;
-  }
-  assert(work_q->size() == 0, "Shouldn't steal");
-  MutexLockerEx ml(ovflw_stk->par_lock(),
-                   Mutex::_no_safepoint_check_flag);
-  // Grab up to 1/4 the size of the work queue
-  size_t num = MIN2((size_t)work_q->max_elems()/4,
-                    (size_t)ParGCDesiredObjsFromOverflowList);
-  num = MIN2(num, ovflw_stk->length());
-  for (int i = (int) num; i > 0; i--) {
-    oop cur = ovflw_stk->pop();
-    assert(cur != NULL, "Counted wrong?");
-    work_q->push(cur);
-  }
-  return num > 0;
-}
-
-void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
-  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
-  int n_tasks = pst->n_tasks();
-  // We allow that there may be no tasks to do here because
-  // we are restarting after a stack overflow.
-  assert(pst->valid() || n_tasks == 0, "Uninitializd use?");
-  int nth_task = 0;
-
-  HeapWord* start = sp->bottom();
-  size_t chunk_size = sp->marking_task_size();
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
-    // Having claimed the nth task in this space,
-    // compute the chunk that it corresponds to:
-    MemRegion span = MemRegion(start + nth_task*chunk_size,
-                               start + (nth_task+1)*chunk_size);
-    // Try and bump the global finger via a CAS;
-    // note that we need to do the global finger bump
-    // _before_ taking the intersection below, because
-    // the task corresponding to that region will be
-    // deemed done even if the used_region() expands
-    // because of allocation -- as it almost certainly will
-    // during start-up while the threads yield in the
-    // closure below.
-    HeapWord* finger = span.end();
-    bump_global_finger(finger);   // atomically
-    // There are null tasks here corresponding to chunks
-    // beyond the "top" address of the space.
-    span = span.intersection(sp->used_region());
-    if (!span.is_empty()) {  // Non-null task
-      // We want to skip the first object because
-      // the protocol is to scan any object in its entirety
-      // that _starts_ in this span; a fortiori, any
-      // object starting in an earlier span is scanned
-      // as part of an earlier claimed task.
-      // Below we use the "careful" version of block_start
-      // so we do not try to navigate uninitialized objects.
-      HeapWord* prev_obj = sp->block_start_careful(span.start());
-      // Below we use a variant of block_size that uses the
-      // Printezis bits to avoid waiting for allocated
-      // objects to become initialized/parsable.
-      while (prev_obj < span.start()) {
-        size_t sz = sp->block_size_no_stall(prev_obj, _collector);
-        if (sz > 0) {
-          prev_obj += sz;
-        } else {
-          // In this case we may end up doing a bit of redundant
-          // scanning, but that appears unavoidable, short of
-          // locking the free list locks; see bug 6324141.
-          break;
-        }
-      }
-      if (prev_obj < span.end()) {
-        MemRegion my_span = MemRegion(prev_obj, span.end());
-        // Do the marking work within a non-empty span --
-        // the last argument to the constructor indicates whether the
-        // iteration should be incremental with periodic yields.
-        Par_MarkFromRootsClosure cl(this, _collector, my_span,
-                                    &_collector->_markBitMap,
-                                    work_queue(i),
-                                    &_collector->_markStack,
-                                    &_collector->_revisitStack,
-                                    _asynch);
-        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
-      } // else nothing to do for this task
-    }   // else nothing to do for this task
-  }
-  // We'd be tempted to assert here that since there are no
-  // more tasks left to claim in this space, the global_finger
-  // must exceed space->top() and a fortiori space->end(). However,
-  // that would not quite be correct because the bumping of
-  // global_finger occurs strictly after the claiming of a task,
-  // so by the time we reach here the global finger may not yet
-  // have been bumped up by the thread that claimed the last
-  // task.
-  pst->all_tasks_completed();
-}
-
-class Par_ConcMarkingClosure: public OopClosure {
-  CMSCollector* _collector;
-  MemRegion     _span;
-  CMSBitMap*    _bit_map;
-  CMSMarkStack* _overflow_stack;
-  CMSMarkStack* _revisit_stack;     // XXXXXX Check proper use
-  OopTaskQueue* _work_queue;
-
- public:
-  Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
-                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
-    _collector(collector),
-    _span(_collector->_span),
-    _work_queue(work_queue),
-    _bit_map(bit_map),
-    _overflow_stack(overflow_stack) { }   // need to initialize revisit stack etc.
-
-  void do_oop(oop* p);
-  void trim_queue(size_t max);
-  void handle_stack_overflow(HeapWord* lost);
-};
-
-// Grey object rescan during work stealing phase --
-// the salient assumption here is that stolen oops must
-// always be initialized, so we do not need to check for
-// uninitialized objects before scanning here.
-void Par_ConcMarkingClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  assert(this_oop->is_oop_or_null(),
-         "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
-  // Check if oop points into the CMS generation
-  // and is not marked
-  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // a white object ...
-    // If we manage to "claim" the object, by being the
-    // first thread to mark it, then we push it on our
-    // marking stack
-    if (_bit_map->par_mark(addr)) {     // ... now grey
-      // push on work queue (grey set)
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow ||
-          !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
-        // stack overflow
-        if (PrintCMSStatistics != 0) {
-          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                                 SIZE_FORMAT, _overflow_stack->capacity());
-        }
-        // We cannot assert that the overflow stack is full because
-        // it may have been emptied since.
-        assert(simulate_overflow ||
-               _work_queue->size() == _work_queue->max_elems(),
-              "Else push should have succeeded");
-        handle_stack_overflow(addr);
-      }
-    } // Else, some other thread got there first
-  }
-}
-
-void Par_ConcMarkingClosure::trim_queue(size_t max) {
-  while (_work_queue->size() > max) {
-    oop new_oop;
-    if (_work_queue->pop_local(new_oop)) {
-      assert(new_oop->is_oop(), "Should be an oop");
-      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
-      assert(_span.contains((HeapWord*)new_oop), "Not in span");
-      assert(new_oop->is_parsable(), "Should be parsable");
-      new_oop->oop_iterate(this);  // do_oop() above
-    }
-  }
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
-  // We need to do this under a mutex to prevent other
-  // workers from interfering with the expansion below.
-  MutexLockerEx ml(_overflow_stack->par_lock(),
-                   Mutex::_no_safepoint_check_flag);
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _overflow_stack->reset();  // discard stack contents
-  _overflow_stack->expand(); // expand the stack if possible
-}
-
-
-void CMSConcMarkingTask::do_work_steal(int i) {
-  OopTaskQueue* work_q = work_queue(i);
-  oop obj_to_scan;
-  CMSBitMap* bm = &(_collector->_markBitMap);
-  CMSMarkStack* ovflw = &(_collector->_markStack);
-  int* seed = _collector->hash_seed(i);
-  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
-  while (true) {
-    cl.trim_queue(0);
-    assert(work_q->size() == 0, "Should have been emptied above");
-    if (get_work_from_overflow_stack(ovflw, work_q)) {
-      // Can't assert below because the work obtained from the
-      // overflow stack may already have been stolen from us.
-      // assert(work_q->size() > 0, "Work from overflow stack");
-      continue;
-    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
-      assert(obj_to_scan->is_oop(), "Should be an oop");
-      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
-      obj_to_scan->oop_iterate(&cl);
-    } else if (terminator()->offer_termination()) {
-      assert(work_q->size() == 0, "Impossible!");
-      break;
-    }
-  }
-}
-
-// This is run by the CMS (coordinator) thread.
-void CMSConcMarkingTask::coordinator_yield() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  
-  // First give up the locks, then yield, then re-lock
-  // We should probably use a constructor/destructor idiom to
-  // do this unlock/lock or modify the MutexUnlocker class to
-  // serve our purpose. XXX
-  assert_lock_strong(_bit_map_lock);
-  _bit_map_lock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-  _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
-  _collector->icms_wait();
-
-  // It is possible for whichever thread initiated the yield request
-  // not to get a chance to wake up and take the bitmap lock between
-  // this thread releasing it and reacquiring it. So, while the
-  // should_yield() flag is on, let's sleep for a bit to give the
-  // other thread a chance to wake up. The limit imposed on the number
-  // of iterations is defensive, to avoid any unforseen circumstances
-  // putting us into an infinite loop. Since it's always been this
-  // (coordinator_yield()) method that was observed to cause the
-  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
-  // which is by default non-zero. For the other seven methods that
-  // also perform the yield operation, as are using a different
-  // parameter (CMSYieldSleepCount) which is by default zero. This way we
-  // can enable the sleeping for those methods too, if necessary.
-  // See 6442774.
-  //
-  // We really need to reconsider the synchronization between the GC
-  // thread and the yield-requesting threads in the future and we
-  // should really use wait/notify, which is the recommended
-  // way of doing this type of interaction. Additionally, we should
-  // consolidate the eight methods that do the yield operation and they
-  // are almost identical into one for better maintenability and
-  // readability. See 6445193.
-  //
-  // Tony 2006.06.29
-  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
-	               ConcurrentMarkSweepThread::should_yield() &&
-	               !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _bit_map_lock->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-bool CMSCollector::do_marking_mt(bool asynch) {
-  assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
-  // In the future this would be determined ergonomically, based
-  // on #cpu's, # active mutator threads (and load), and mutation rate.
-  int num_workers = ParallelCMSThreads;
-
-  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
-  CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
-  
-  CMSConcMarkingTask tsk(this, cms_space, perm_space,
-                         asynch, num_workers /* number requested XXX */,
-                         conc_workers(), task_queues());
-
-  // Since the actual number of workers we get may be different
-  // from the number we requested above, do we need to do anything different
-  // below? In particular, may be we need to subclass the SequantialSubTasksDone
-  // class?? XXX
-  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
-  perm_space->initialize_sequential_subtasks_for_marking(num_workers);
-  
-  // Refs discovery is already non-atomic.
-  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
-  // Mutate the Refs discovery so it is MT during the
-  // multi-threaded marking phase.
-  ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
-  
-  conc_workers()->start_task(&tsk);
-  while (tsk.yielded()) {
-    tsk.coordinator_yield();
-    conc_workers()->continue_task(&tsk);
-  }
-  // If the task was aborted, _restart_addr will be non-NULL
-  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
-  while (_restart_addr != NULL) {
-    // XXX For now we do not make use of ABORTED state and have not
-    // yet implemented the right abort semantics (even in the original
-    // single-threaded CMS case. That needs some more investigation
-    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
-    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
-    // If _restart_addr is non-NULL, a marking stack overflow
-    // occured; we need to do a fresh marking iteration from the
-    // indicated restart address.
-    if (_foregroundGCIsActive && asynch) {
-      // We may be running into repeated stack overflows, having
-      // reached the limit of the stack size, while making very
-      // slow forward progress. It may be best to bail out and
-      // let the foreground collector do its job.
-      // Clear _restart_addr, so that foreground GC
-      // works from scratch. This avoids the headache of
-      // a "rescan" which would otherwise be needed because
-      // of the dirty mod union table & card table.
-      _restart_addr = NULL;
-      return false;
-    }
-    // Adjust the task to restart from _restart_addr
-    tsk.reset(_restart_addr);
-    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
-                  _restart_addr);
-    perm_space->initialize_sequential_subtasks_for_marking(num_workers,
-                  _restart_addr);
-    _restart_addr = NULL;
-    // Get the workers going again
-    conc_workers()->start_task(&tsk);
-    while (tsk.yielded()) {
-      tsk.coordinator_yield();
-      conc_workers()->continue_task(&tsk);
-    }
-  }
-  assert(tsk.completed(), "Inconsistency");
-  assert(tsk.result() == true, "Inconsistency");
-  return true;
-}
-
-bool CMSCollector::do_marking_st(bool asynch) {
-  ResourceMark rm;
-  HandleMark   hm;
-
-  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
-    &_markStack, &_revisitStack, CMSYield && asynch);
-  // the last argument to iterate indicates whether the iteration
-  // should be incremental with periodic yields.
-  _markBitMap.iterate(&markFromRootsClosure);
-  // If _restart_addr is non-NULL, a marking stack overflow
-  // occured; we need to do a fresh iteration from the
-  // indicated restart address.
-  while (_restart_addr != NULL) {
-    if (_foregroundGCIsActive && asynch) {
-      // We may be running into repeated stack overflows, having
-      // reached the limit of the stack size, while making very
-      // slow forward progress. It may be best to bail out and
-      // let the foreground collector do its job.
-      // Clear _restart_addr, so that foreground GC
-      // works from scratch. This avoids the headache of
-      // a "rescan" which would otherwise be needed because
-      // of the dirty mod union table & card table.
-      _restart_addr = NULL;
-      return false;  // indicating failure to complete marking
-    }
-    // Deal with stack overflow:
-    // we restart marking from _restart_addr
-    HeapWord* ra = _restart_addr;
-    markFromRootsClosure.reset(ra);
-    _restart_addr = NULL;
-    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
-  }
-  return true;
-}
-
-void CMSCollector::preclean() {
-  check_correct_thread_executing();
-  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
-  _abort_preclean = false;
-  if (CMSPrecleaningEnabled) {
-    _eden_chunk_index = 0;
-    size_t used = get_eden_used();
-    size_t capacity = get_eden_capacity();
-    // Don't start sampling unless we will get sufficiently
-    // many samples.
-    if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
-                * CMSScheduleRemarkEdenPenetration)) {
-      _start_sampling = true;
-    } else {
-      _start_sampling = false;
-    }
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
-    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
-  }
-  CMSTokenSync x(true); // is cms thread
-  if (CMSPrecleaningEnabled) {
-    sample_eden();
-    _collectorState = AbortablePreclean;
-  } else {
-    _collectorState = FinalMarking;
-  }
-}
-
-// Try and schedule the remark such that young gen
-// occupancy is CMSScheduleRemarkEdenPenetration %.
-void CMSCollector::abortable_preclean() {
-  check_correct_thread_executing();
-  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
-  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
-
-  // If Eden's current occupancy is below this threshold,
-  // immediately schedule the remark; else preclean
-  // past the next scavenge in an effort to
-  // schedule the pause as described avove. By choosing
-  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
-  // we will never do an actual abortable preclean cycle.
-  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
-    // We need more smarts in the abortable preclean
-    // loop below to deal with cases where allocation
-    // in young gen is very very slow, and our precleaning
-    // is running a losing race against a horde of
-    // mutators intent on flooding us with CMS updates
-    // (dirty cards).
-    // One, admittedly dumb, strategy is to give up
-    // after a certain number of abortable precleaning loops
-    // or after a certain maximum time. We want to make
-    // this smarter in the next iteration.
-    // XXX FIX ME!!! YSR
-    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
-    while (!(should_abort_preclean() ||
-             ConcurrentMarkSweepThread::should_terminate())) {
-      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); 
-      cumworkdone += workdone;
-      loops++;
-      // Voluntarily terminate abortable preclean phase if we have
-      // been at it for too long.
-      if ((CMSMaxAbortablePrecleanLoops != 0) &&
-          loops >= CMSMaxAbortablePrecleanLoops) {
-        if (PrintGCDetails) {
-          gclog_or_tty->print(" CMS: abort preclean due to loops ");
-        }
-        break;
-      }
-      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
-        if (PrintGCDetails) {
-          gclog_or_tty->print(" CMS: abort preclean due to time ");
-        }
-        break;
-      }
-      // If we are doing little work each iteration, we should
-      // take a short break.
-      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
-        // Sleep for some time, waiting for work to accumulate
-        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
-        waited++;
-      }
-    }
-    if (PrintCMSStatistics > 0) {
-      gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
-                          loops, waited, cumworkdone);
-    }
-  }
-  CMSTokenSync x(true); // is cms thread
-  if (_collectorState != Idling) {
-    assert(_collectorState == AbortablePreclean,
-           "Spontaneous state transition?");
-    _collectorState = FinalMarking;
-  } // Else, a foreground collection completed this CMS cycle.
-  return;
-}
-
-// Respond to an Eden sampling opportunity
-void CMSCollector::sample_eden() {
-  // Make sure a young gc cannot sneak in between our
-  // reading and recording of a sample.
-  assert(Thread::current()->is_ConcurrentGC_thread(),
-         "Only the cms thread may collect Eden samples");
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "Should collect samples while holding CMS token");
-  if (!_start_sampling) {
-    return;
-  }
-  if (_eden_chunk_array) {
-    if (_eden_chunk_index < _eden_chunk_capacity) {
-      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
-      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
-             "Unexpected state of Eden");
-      // We'd like to check that what we just sampled is an oop-start address;
-      // however, we cannot do that here since the object may not yet have been
-      // initialized. So we'll instead do the check when we _use_ this sample
-      // later.
-      if (_eden_chunk_index == 0 ||
-          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
-                         _eden_chunk_array[_eden_chunk_index-1])
-           >= CMSSamplingGrain)) {
-        _eden_chunk_index++;  // commit sample
-      }
-    }
-  }
-  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
-    size_t used = get_eden_used();
-    size_t capacity = get_eden_capacity();
-    assert(used <= capacity, "Unexpected state of Eden");
-    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
-      _abort_preclean = true;
-    }
-  }
-}
-
-
-size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
-  assert(_collectorState == Precleaning ||
-         _collectorState == AbortablePreclean, "incorrect state");
-  ResourceMark rm;
-  HandleMark   hm;
-  // Do one pass of scrubbing the discovered reference lists
-  // to remove any reference objects with strongly-reachable
-  // referents.
-  if (clean_refs) {
-    ReferenceProcessor* rp = ref_processor();
-    CMSPrecleanRefsYieldClosure yield_cl(this);
-    assert(rp->span().equals(_span), "Spans should be equal");
-    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
-                                   &_markStack);
-    CMSDrainMarkingStackClosure complete_trace(this,
-                                  _span, &_markBitMap, &_markStack,
-                                  &keep_alive);
-
-    // We don't want this step to interfere with a young
-    // collection because we don't want to take CPU
-    // or memory bandwidth away from the young GC threads
-    // (which may be as many as there are CPUs).
-    // Note that we don't need to protect ourselves from
-    // interference with mutators because they can't
-    // manipulate the discovered reference lists nor affect
-    // the computed reachability of the referents, the
-    // only properties manipulated by the precleaning
-    // of these reference lists.
-    CMSTokenSyncWithLocks x(true /* is cms thread */,
-                            bitMapLock());
-    sample_eden();
-    // The following will yield to allow foreground
-    // collection to proceed promptly. XXX YSR:
-    // The code in this method may need further
-    // tweaking for better performance and some restructuring
-    // for cleaner interfaces.
-    rp->preclean_discovered_references(
-          rp->is_alive_non_header(), &keep_alive, &complete_trace,
-          &yield_cl);
-  }
-
-  if (clean_survivor) {  // preclean the active survivor space(s)
-    assert(_young_gen->kind() == Generation::DefNew ||
-           _young_gen->kind() == Generation::ParNew ||
-           _young_gen->kind() == Generation::ASParNew,
-         "incorrect type for cast");
-    DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
-    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
-                             &_markBitMap, &_modUnionTable,
-                             &_markStack, &_revisitStack,
-                             true /* precleaning phase */);
-    CMSTokenSyncWithLocks ts(true /* is cms thread */,
-                             bitMapLock());
-    unsigned int before_count =
-      GenCollectedHeap::heap()->total_collections();
-    SurvivorSpacePrecleanClosure
-      sss_cl(this, _span, &_markBitMap, &_markStack,
-             &pam_cl, before_count, CMSYield);
-    dng->from()->object_iterate_careful(&sss_cl);
-    dng->to()->object_iterate_careful(&sss_cl);
-  }
-  MarkRefsIntoAndScanClosure
-    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
-             &_markStack, &_revisitStack, this, CMSYield,
-             true /* precleaning phase */);
-  // CAUTION: The following closure has persistent state that may need to
-  // be reset upon a decrease in the sequence of addresses it
-  // processes.
-  ScanMarkedObjectsAgainCarefullyClosure
-    smoac_cl(this, _span,
-      &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
-
-  // Preclean dirty cards in ModUnionTable and CardTable using
-  // appropriate convergence criterion;
-  // repeat CMSPrecleanIter times unless we find that
-  // we are losing.
-  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
-  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
-         "Bad convergence multiplier");
-  assert(CMSPrecleanThreshold >= 100,
-         "Unreasonably low CMSPrecleanThreshold");
-
-  size_t numIter, cumNumCards, lastNumCards, curNumCards;
-  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
-       numIter < CMSPrecleanIter;
-       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
-    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
-    if (CMSPermGenPrecleaningEnabled) {
-      curNumCards  += preclean_mod_union_table(_permGen, &smoac_cl);
-    }
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
-    }
-    // Either there are very few dirty cards, so re-mark
-    // pause will be small anyway, or our pre-cleaning isn't
-    // that much faster than the rate at which cards are being
-    // dirtied, so we might as well stop and re-mark since
-    // precleaning won't improve our re-mark time by much.
-    if (curNumCards <= CMSPrecleanThreshold ||
-        (numIter > 0 &&
-         (curNumCards * CMSPrecleanDenominator >
-         lastNumCards * CMSPrecleanNumerator))) {
-      numIter++;
-      cumNumCards += curNumCards;
-      break;
-    }
-  }
-  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
-  if (CMSPermGenPrecleaningEnabled) {
-    curNumCards += preclean_card_table(_permGen, &smoac_cl);
-  }
-  cumNumCards += curNumCards;
-  if (PrintGCDetails && PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
-		  curNumCards, cumNumCards, numIter);
-  }
-  return cumNumCards;   // as a measure of useful work done
-}
-
-// PRECLEANING NOTES:
-// Precleaning involves:
-// . reading the bits of the modUnionTable and clearing the set bits.
-// . For the cards corresponding to the set bits, we scan the
-//   objects on those cards. This means we need the free_list_lock
-//   so that we can safely iterate over the CMS space when scanning
-//   for oops.
-// . When we scan the objects, we'll be both reading and setting
-//   marks in the marking bit map, so we'll need the marking bit map.
-// . For protecting _collector_state transitions, we take the CGC_lock.
-//   Note that any races in the reading of of card table entries by the
-//   CMS thread on the one hand and the clearing of those entries by the
-//   VM thread or the setting of those entries by the mutator threads on the
-//   other are quite benign. However, for efficiency it makes sense to keep
-//   the VM thread from racing with the CMS thread while the latter is
-//   dirty card info to the modUnionTable. We therefore also use the
-//   CGC_lock to protect the reading of the card table and the mod union
-//   table by the CM thread.
-// . We run concurrently with mutator updates, so scanning
-//   needs to be done carefully  -- we should not try to scan
-//   potentially uninitialized objects.
-//
-// Locking strategy: While holding the CGC_lock, we scan over and
-// reset a maximal dirty range of the mod union / card tables, then lock
-// the free_list_lock and bitmap lock to do a full marking, then
-// release these locks; and repeat the cycle. This allows for a
-// certain amount of fairness in the sharing of these locks between
-// the CMS collector on the one hand, and the VM thread and the
-// mutators on the other.
-
-// NOTE: preclean_mod_union_table() and preclean_card_table()
-// further below are largely identical; if you need to modify
-// one of these methods, please check the other method too.
-
-size_t CMSCollector::preclean_mod_union_table(
-  ConcurrentMarkSweepGeneration* gen,
-  ScanMarkedObjectsAgainCarefullyClosure* cl) {
-  assert(_markStack.isEmpty(), "should be empty");
-  assert(overflow_list_is_empty(), "should be empty");
-  assert(no_preserved_marks(), "no preserved marks");
-  // strategy: starting with the first card, accumulate contiguous
-  // ranges of dirty cards; clear these cards, then scan the region
-  // covered by these cards.
-
-  // Since all of the MUT is committed ahead, we can just use
-  // that, in case the generations expand while we are precleaning.
-  // It might also be fine to just use the committed part of the
-  // generation, but we might potentially miss cards when the
-  // generation is rapidly expanding while we are in the midst
-  // of precleaning.
-  HeapWord* startAddr = gen->reserved().start();
-  HeapWord* endAddr   = gen->reserved().end();
-
-  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
-
-  size_t numDirtyCards, cumNumDirtyCards;
-  HeapWord *nextAddr, *lastAddr;
-  for (cumNumDirtyCards = numDirtyCards = 0,
-       nextAddr = lastAddr = startAddr;
-       nextAddr < endAddr;
-       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
-
-    ResourceMark rm;
-    HandleMark   hm;
-
-    MemRegion dirtyRegion;
-    {
-      CMSTokenSync ts(true);
-      sample_eden();
-
-      if (PrintGCDetails) {
-        startTimer();
-      }
-
-      // Get dirty region starting at nextOffset (inclusive),
-      // simultaneously clearing it.
-      dirtyRegion = 
-        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
-      assert(dirtyRegion.start() >= nextAddr,
-             "returned region inconsistent?");
-    }
-    // Remember where the next search should begin.
-    // The returned region (if non-empty) is a right open interval,
-    // so lastOffset is obtained from the right end of that
-    // interval.
-    lastAddr = dirtyRegion.end();
-    // Should do something more transparent and less hacky XXX
-    numDirtyCards =
-      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
-
-    // We'll scan the cards in the dirty region (with periodic
-    // yields for foreground GC as needed).
-    if (!dirtyRegion.is_empty()) {
-      if (PrintGCDetails) {
-        stopTimer();
-      }
-      assert(numDirtyCards > 0, "consistency check");
-      HeapWord* stop_point = NULL;
-      {
-        CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
-                                 bitMapLock());
-        assert(_markStack.isEmpty(), "should be empty");
-        assert(overflow_list_is_empty(), "should be empty");
-        assert(no_preserved_marks(), "no preserved marks");
-        sample_eden();
-        if (PrintGCDetails) {
-          startTimer();
-        }
-        stop_point =
-          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
-      }
-      if (stop_point != NULL) {
-        // The careful iteration stopped early either because it found an
-        // uninitialized object, or because we were in the midst of an
-        // "abortable preclean", which should now be aborted. Redirty
-        // the bits corresponding to the partially-scanned or unscanned
-        // cards. We'll either restart at the next block boundary or
-        // abort the preclean.
-        assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
-               (_collectorState == AbortablePreclean && should_abort_preclean()),
-               "Unparsable objects should only be in perm gen.");
-
-        CMSTokenSyncWithLocks ts(true, bitMapLock());
-        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
-        if (should_abort_preclean()) {
-          break; // out of preclean loop
-        } else {
-          // Compute the next address at which preclean should pick up;
-          // might need bitMapLock in order to read P-bits.
-          lastAddr = next_card_start_after_block(stop_point);
-        }
-      }
-      if (PrintGCDetails) {
-        stopTimer();
-      }
-    } else {
-      assert(lastAddr == endAddr, "consistency check");
-      assert(numDirtyCards == 0, "consistency check");
-      break;
-    }
-  }
-  if (PrintGCDetails) {
-    stopTimer();
-  }
-  assert(_markStack.isEmpty(), "should be empty");
-  assert(overflow_list_is_empty(), "should be empty");
-  assert(no_preserved_marks(), "no preserved marks");
-  return cumNumDirtyCards;
-}
-
-// NOTE: preclean_mod_union_table() above and preclean_card_table()
-// below are largely identical; if you need to modify
-// one of these methods, please check the other method too.
-
-size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
-  ScanMarkedObjectsAgainCarefullyClosure* cl) {
-  // strategy: it's similar to precleamModUnionTable above, in that
-  // we accumulate contiguous ranges of dirty cards, mark these cards
-  // precleaned, then scan the region covered by these cards.
-  HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
-  HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
-
-  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
-
-  size_t numDirtyCards, cumNumDirtyCards;
-  HeapWord *lastAddr, *nextAddr;
-
-  for (cumNumDirtyCards = numDirtyCards = 0,
-       nextAddr = lastAddr = startAddr;
-       nextAddr < endAddr;
-       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
-
-    ResourceMark rm;
-    HandleMark   hm;
-
-    MemRegion dirtyRegion;
-    {
-      // See comments in "Precleaning notes" above on why we
-      // do this locking. XXX Could the locking overheads be
-      // too high when dirty cards are sparse? [I don't think so.]
-      CMSTokenSync x(true); // is cms thread
-      sample_eden();
-
-      if (PrintGCDetails) {
-        startTimer();
-      }
-
-      // Get and clear dirty region from card table
-      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
-                                    MemRegion(nextAddr, endAddr));
-      assert(dirtyRegion.start() >= nextAddr,
-             "returned region inconsistent?");
-    }
-    lastAddr = dirtyRegion.end();
-    numDirtyCards =
-      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
-
-    if (!dirtyRegion.is_empty()) {
-      if (PrintGCDetails) {
-        stopTimer();
-      }
-      CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
-      sample_eden();
-      assert(_markStack.isEmpty(), "should be empty");
-      assert(overflow_list_is_empty(), "should be empty");
-      assert(no_preserved_marks(), "no preserved marks");
-      if (PrintGCDetails) {
-        startTimer();
-      }
-      HeapWord* stop_point =
-        gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
-      if (stop_point != NULL) {
-        // The careful iteration stopped early because it found an
-        // uninitialized object.  Redirty the bits corresponding to the
-        // partially-scanned or unscanned cards, and start again at the
-        // next block boundary.
-        assert(CMSPermGenPrecleaningEnabled ||
-               (_collectorState == AbortablePreclean && should_abort_preclean()),
-               "Unparsable objects should only be in perm gen.");
-        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
-        if (should_abort_preclean()) {
-          break; // out of preclean loop
-        } else {
-          // Compute the next address at which preclean should pick up.
-          lastAddr = next_card_start_after_block(stop_point);
-        }
-      }
-      if (PrintGCDetails) {
-        stopTimer();
-      }
-    } else {
-      break;
-    }
-  }
-  if (PrintGCDetails) {
-    stopTimer();
-  }
-  assert(_markStack.isEmpty(), "should be empty");
-  assert(overflow_list_is_empty(), "should be empty");
-  assert(no_preserved_marks(), "no preserved marks");
-  return cumNumDirtyCards;
-}
-
-void CMSCollector::checkpointRootsFinal(bool asynch,
-  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
-  assert(_collectorState == FinalMarking, "incorrect state transition?");
-  check_correct_thread_executing();
-  // world is stopped at this checkpoint
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-
-  SpecializationStats::clear();
-  if (PrintGCDetails) {
-    gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
-                        _young_gen->used() / K,
-                        _young_gen->capacity() / K);
-  }
-  if (asynch) {
-    if (CMSScavengeBeforeRemark) {
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
-      // Temporarily set flag to false, GCH->do_collection will
-      // expect it to be false and set to true
-      FlagSetting fl(gch->_is_gc_active, false);
-      NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", 
-	PrintGCDetails && Verbose, true, gclog_or_tty);)
-      int level = _cmsGen->level() - 1;
-      if (level >= 0) {
-        gch->do_collection(true,        // full (i.e. force, see below)
-                           false,       // !clear_all_soft_refs
-                           0,           // size
-                           false,       // is_tlab
-                           level        // max_level
-                          );
-      }
-    }
-    FreelistLocker x(this);
-    MutexLockerEx y(bitMapLock(),
-                    Mutex::_no_safepoint_check_flag);
-    assert(!init_mark_was_synchronous, "but that's impossible!");
-    checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
-  } else {
-    // already have all the locks
-    checkpointRootsFinalWork(asynch, clear_all_soft_refs,
-                             init_mark_was_synchronous);
-  }
-  SpecializationStats::print();
-}
-
-void CMSCollector::checkpointRootsFinalWork(bool asynch,
-  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
-
-  NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
-
-  assert(haveFreelistLocks(), "must have free list locks");
-  assert_lock_strong(bitMapLock());
-
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_final_begin();
-  }
-
-  ResourceMark rm;
-  HandleMark   hm;
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-
-  if (CMSClassUnloadingEnabled) {
-    CodeCache::gc_prologue();
-  }
-  assert(haveFreelistLocks(), "must have free list locks");
-  assert_lock_strong(bitMapLock());
-
-  if (!init_mark_was_synchronous) {
-    if (CMSScavengeBeforeRemark) {
-      // Heap already made parsable as a result of scavenge
-    } else {
-      gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
-    }
-    // Update the saved marks which may affect the root scans.
-    gch->save_marks();
-  
-    {
-      COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
-  
-      // Note on the role of the mod union table:
-      // Since the marker in "markFromRoots" marks concurrently with
-      // mutators, it is possible for some reachable objects not to have been
-      // scanned. For instance, an only reference to an object A was
-      // placed in object B after the marker scanned B. Unless B is rescanned,
-      // A would be collected. Such updates to references in marked objects
-      // are detected via the mod union table which is the set of all cards
-      // dirtied since the first checkpoint in this GC cycle and prior to
-      // the most recent young generation GC, minus those cleaned up by the
-      // concurrent precleaning.
-      if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
-        TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
-        do_remark_parallel();
-      } else {
-        TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
-                    gclog_or_tty);
-        do_remark_non_parallel();
-      }
-    }
-  } else {
-    assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
-    // The initial mark was stop-world, so there's no rescanning to
-    // do; go straight on to the next step below.
-  }
-  assert(_markStack.isEmpty(), "should be empty");
-  assert(overflow_list_is_empty(), "should be empty");
-  assert(no_preserved_marks(), "no preserved marks");
-
-  {
-    NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
-    refProcessingWork(asynch, clear_all_soft_refs);
-  }
-  assert(_markStack.isEmpty(), "should be empty");
-  assert(overflow_list_is_empty(), "should be empty");
-  assert(no_preserved_marks(), "no preserved marks");
-
-  if (CMSClassUnloadingEnabled) {
-    CodeCache::gc_epilogue();
-  }
-
-  // If we encountered any (marking stack / work queue) overflow
-  // events during the current CMS cycle, take appropriate
-  // remedial measures, where possible, so as to try and avoid
-  // recurrence of that condition.
-  assert(_markStack.isEmpty(), "No grey objects");
-  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
-                     _ser_kac_ovflw;
-  if (ser_ovflw > 0) {
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr("Marking stack overflow (benign) "
-        "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
-        _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
-        _ser_kac_ovflw);
-    }
-    _markStack.expand();
-    _ser_pmc_remark_ovflw = 0;
-    _ser_pmc_preclean_ovflw = 0;
-    _ser_kac_ovflw = 0;
-  }
-  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr("Work queue overflow (benign) "
-        "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
-        _par_pmc_remark_ovflw, _par_kac_ovflw);
-    }
-    _par_pmc_remark_ovflw = 0;
-    _par_kac_ovflw = 0;
-  }
-  if (PrintCMSStatistics != 0) {
-     if (_markStack._hit_limit > 0) {
-       gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
-                              _markStack._hit_limit);
-     }
-     if (_markStack._failed_double > 0) {
-       gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
-                              " current capacity "SIZE_FORMAT,
-                              _markStack._failed_double,
-                              _markStack.capacity());
-     }
-  }
-  _markStack._hit_limit = 0;
-  _markStack._failed_double = 0;
-
-  if (VerifyAfterGC || VerifyDuringGC) {
-    verify_after_remark();
-  }
-
-  // Change under the freelistLocks.
-  _collectorState = Sweeping;
-  // Call isAllClear() under bitMapLock
-  assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
-    " final marking");
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_final_end(gch->gc_cause());
-  }
-}
-
-// Parallel remark task
-class CMSParRemarkTask: public AbstractGangTask {
-  CMSCollector* _collector;
-  WorkGang*     _workers;
-  int           _n_workers;
-  CompactibleFreeListSpace* _cms_space;
-  CompactibleFreeListSpace* _perm_space;
-
-  // The per-thread work queues, available here for stealing.
-  OopTaskQueueSet*       _task_queues;
-  ParallelTaskTerminator _term;
-
- public:
-  CMSParRemarkTask(CMSCollector* collector,
-                   CompactibleFreeListSpace* cms_space,
-                   CompactibleFreeListSpace* perm_space,
-                   int n_workers, WorkGang* workers,
-                   OopTaskQueueSet* task_queues):
-    AbstractGangTask("Rescan roots and grey objects in parallel"),
-    _collector(collector),
-    _cms_space(cms_space), _perm_space(perm_space),
-    _n_workers(n_workers),
-    _workers(workers),
-    _task_queues(task_queues),
-    _term(workers->total_workers(), task_queues) { }
-
-  OopTaskQueueSet* task_queues() { return _task_queues; }
-
-  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
-  ParallelTaskTerminator* terminator() { return &_term; }
-
-  void work(int i);
-
- private:
-  // Work method in support of parallel rescan ... of young gen spaces
-  void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
-                             ContiguousSpace* space,
-                             HeapWord** chunk_array, size_t chunk_top);
-
-  // ... of  dirty cards in old space
-  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
-                                  Par_MarkRefsIntoAndScanClosure* cl);
-
-  // ... work stealing for the above
-  void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
-};
-
-void CMSParRemarkTask::work(int i) {
-  elapsedTimer _timer;
-  ResourceMark rm;
-  HandleMark   hm;
-
-  // ---------- rescan from roots --------------
-  _timer.start();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
-    _collector->_span, _collector->ref_processor(),
-    &(_collector->_markBitMap),
-    work_queue(i), &(_collector->_revisitStack));
-
-  // Rescan young gen roots first since these are likely
-  // coarsely partitioned and may, on that account, constitute
-  // the critical path; thus, it's best to start off that
-  // work first.
-  // ---------- young gen roots --------------
-  {
-    DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
-    EdenSpace* eden_space = dng->eden();
-    ContiguousSpace* from_space = dng->from();
-    ContiguousSpace* to_space   = dng->to();
-
-    HeapWord** eca = _collector->_eden_chunk_array;
-    size_t     ect = _collector->_eden_chunk_index;
-    HeapWord** sca = _collector->_survivor_chunk_array;
-    size_t     sct = _collector->_survivor_chunk_index;
-
-    assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
-    assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
-
-    do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
-    do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
-    do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
-
-    _timer.stop();
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr(
-        "Finished young gen rescan work in %dth thread: %3.3f sec",
-        i, _timer.seconds());
-    }
-  }
-
-  // ---------- remaining roots --------------
-  _timer.reset();
-  _timer.start();
-  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
-				false,     // yg was scanned above
-				true,      // collecting perm gen
-                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-				NULL, &par_mrias_cl);
-  _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "Finished remaining root rescan work in %dth thread: %3.3f sec",
-      i, _timer.seconds());
-  }
-
-  // ---------- rescan dirty cards ------------
-  _timer.reset();
-  _timer.start();
-
-  // Do the rescan tasks for each of the two spaces
-  // (cms_space and perm_space) in turn.
-  do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
-  do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
-  _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "Finished dirty card rescan work in %dth thread: %3.3f sec",
-      i, _timer.seconds());
-  }
-
-  // ---------- steal work from other threads ...
-  // ---------- ... and drain overflow list.
-  _timer.reset();
-  _timer.start();
-  do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
-  _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "Finished work stealing in %dth thread: %3.3f sec",
-      i, _timer.seconds());
-  }
-}
-
-void
-CMSParRemarkTask::do_young_space_rescan(int i,
-  Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
-  HeapWord** chunk_array, size_t chunk_top) {
-  // Until all tasks completed:
-  // . claim an unclaimed task
-  // . compute region boundaries corresponding to task claimed
-  //   using chunk_array
-  // . par_oop_iterate(cl) over that region
-
-  ResourceMark rm;
-  HandleMark   hm;
-
-  SequentialSubTasksDone* pst = space->par_seq_tasks();
-  assert(pst->valid(), "Uninitialized use?");
-
-  int nth_task = 0;
-  int n_tasks  = pst->n_tasks();
-
-  HeapWord *start, *end;
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
-    // We claimed task # nth_task; compute its boundaries.
-    if (chunk_top == 0) {  // no samples were taken
-      assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
-      start = space->bottom();
-      end   = space->top();
-    } else if (nth_task == 0) {
-      start = space->bottom();
-      end   = chunk_array[nth_task];
-    } else if (nth_task < (jint)chunk_top) {
-      assert(nth_task >= 1, "Control point invariant");
-      start = chunk_array[nth_task - 1];
-      end   = chunk_array[nth_task];
-    } else {
-      assert(nth_task == (jint)chunk_top, "Control point invariant");
-      start = chunk_array[chunk_top - 1];
-      end   = space->top();
-    }
-    MemRegion mr(start, end);
-    // Verify that mr is in space
-    assert(mr.is_empty() || space->used_region().contains(mr),
-           "Should be in space");
-    // Verify that "start" is an object boundary
-    assert(mr.is_empty() || oop(mr.start())->is_oop(),
-           "Should be an oop");
-    space->par_oop_iterate(mr, cl);
-  }
-  pst->all_tasks_completed();
-}
-
-void
-CMSParRemarkTask::do_dirty_card_rescan_tasks(
-  CompactibleFreeListSpace* sp, int i,
-  Par_MarkRefsIntoAndScanClosure* cl) {
-  // Until all tasks completed:
-  // . claim an unclaimed task
-  // . compute region boundaries corresponding to task claimed
-  // . transfer dirty bits ct->mut for that region
-  // . apply rescanclosure to dirty mut bits for that region
-
-  ResourceMark rm;
-  HandleMark   hm;
-
-  OopTaskQueue* work_q = work_queue(i);
-  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
-  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
-  // CAUTION: This closure has state that persists across calls to
-  // the work method dirty_range_iterate_clear() in that it has
-  // imbedded in it a (subtype of) UpwardsObjectClosure. The
-  // use of that state in the imbedded UpwardsObjectClosure instance
-  // assumes that the cards are always iterated (even if in parallel
-  // by several threads) in monotonically increasing order per each
-  // thread. This is true of the implementation below which picks
-  // card ranges (chunks) in monotonically increasing order globally
-  // and, a-fortiori, in monotonically increasing order per thread
-  // (the latter order being a subsequence of the former).
-  // If the work code below is ever reorganized into a more chaotic
-  // work-partitioning form than the current "sequential tasks"
-  // paradigm, the use of that persistent state will have to be
-  // revisited and modified appropriately. See also related
-  // bug 4756801 work on which should examine this code to make
-  // sure that the changes there do not run counter to the
-  // assumptions made here and necessary for correctness and
-  // efficiency. Note also that this code might yield inefficient
-  // behaviour in the case of very large objects that span one or
-  // more work chunks. Such objects would potentially be scanned 
-  // several times redundantly. Work on 4756801 should try and
-  // address that performance anomaly if at all possible. XXX
-  MemRegion  full_span  = _collector->_span;
-  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
-  CMSMarkStack* rs = &(_collector->_revisitStack);   // shared
-  MarkFromDirtyCardsClosure
-    greyRescanClosure(_collector, full_span, // entire span of interest
-                      sp, bm, work_q, rs, cl);
-
-  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
-  assert(pst->valid(), "Uninitialized use?");
-  int nth_task = 0;
-  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
-  MemRegion span = sp->used_region();
-  HeapWord* start_addr = span.start();
-  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
-                                           alignment);
-  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
-  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
-         start_addr, "Check alignment");
-  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
-         chunk_size, "Check alignment");
-
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
-    // Having claimed the nth_task, compute corresponding mem-region,
-    // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
-    // The alignment restriction ensures that we do not need any
-    // synchronization with other gang-workers while setting or
-    // clearing bits in thus chunk of the MUT.
-    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
-                                    start_addr + (nth_task+1)*chunk_size);
-    // The last chunk's end might be way beyond end of the
-    // used region. In that case pull back appropriately.
-    if (this_span.end() > end_addr) {
-      this_span.set_end(end_addr);
-      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
-    }
-    // Iterate over the dirty cards covering this chunk, marking them
-    // precleaned, and setting the corresponding bits in the mod union
-    // table. Since we have been careful to partition at Card and MUT-word
-    // boundaries no synchronization is needed between parallel threads.
-    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
-                                                 &modUnionClosure);
-
-    // Having transferred these marks into the modUnionTable,
-    // rescan the marked objects on the dirty cards in the modUnionTable.
-    // Even if this is at a synchronous collection, the initial marking
-    // may have been done during an asynchronous collection so there
-    // may be dirty bits in the mod-union table.
-    _collector->_modUnionTable.dirty_range_iterate_clear(
-                  this_span, &greyRescanClosure);
-    _collector->_modUnionTable.verifyNoOneBitsInRange(
-                                 this_span.start(),
-                                 this_span.end());
-  }
-  pst->all_tasks_completed();  // declare that i am done
-}
-
-// . see if we can share work_queues with ParNew? XXX
-void
-CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
-                                int* seed) {
-  OopTaskQueue* work_q = work_queue(i);
-  NOT_PRODUCT(int num_steals = 0;)
-  oop obj_to_scan;
-  CMSBitMap* bm = &(_collector->_markBitMap);
-  size_t num_from_overflow_list =
-           MIN2((size_t)work_q->max_elems()/4,
-                (size_t)ParGCDesiredObjsFromOverflowList);
-
-  while (true) {
-    // Completely finish any left over work from (an) earlier round(s)
-    cl->trim_queue(0);
-    // Now check if there's any work in the overflow list
-    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
-                                                work_q)) {
-      // found something in global overflow list;
-      // not yet ready to go stealing work from others.
-      // We'd like to assert(work_q->size() != 0, ...)
-      // because we just took work from the overflow list,
-      // but of course we can't since all of that could have
-      // been already stolen from us.
-      // "He giveth and He taketh away."
-      continue;
-    }
-    // Verify that we have no work before we resort to stealing
-    assert(work_q->size() == 0, "Have work, shouldn't steal");
-    // Try to steal from other queues that have work
-    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
-      NOT_PRODUCT(num_steals++;)
-      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
-      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
-      // Do scanning work
-      obj_to_scan->oop_iterate(cl);
-      // Loop around, finish this work, and try to steal some more
-    } else if (terminator()->offer_termination()) {
-        break;  // nirvana from the infinite cycle
-    }
-  }
-  NOT_PRODUCT(
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
-    }
-  )
-  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
-         "Else our work is not yet done");
-}
-
-// Return a thread-local PLAB recording array, as appropriate.
-void* CMSCollector::get_data_recorder(int thr_num) {
-  if (_survivor_plab_array != NULL &&
-      (CMSPLABRecordAlways ||
-       (_collectorState > Marking && _collectorState < FinalMarking))) {
-    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
-    ChunkArray* ca = &_survivor_plab_array[thr_num];
-    ca->reset();   // clear it so that fresh data is recorded
-    return (void*) ca;
-  } else {
-    return NULL;
-  }
-}
-
-// Reset all the thread-local PLAB recording arrays
-void CMSCollector::reset_survivor_plab_arrays() {
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _survivor_plab_array[i].reset();
-  }
-}
-
-// Merge the per-thread plab arrays into the global survivor chunk
-// array which will provide the partitioning of the survivor space
-// for CMS rescan.
-void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
-  assert(_survivor_plab_array  != NULL, "Error");
-  assert(_survivor_chunk_array != NULL, "Error");
-  assert(_collectorState == FinalMarking, "Error");
-  for (uint j = 0; j < ParallelGCThreads; j++) {
-    _cursor[j] = 0;
-  }
-  HeapWord* top = surv->top();
-  size_t i;
-  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
-    HeapWord* min_val = top;          // Higher than any PLAB address
-    uint      min_tid = 0;            // position of min_val this round
-    for (uint j = 0; j < ParallelGCThreads; j++) {
-      ChunkArray* cur_sca = &_survivor_plab_array[j];
-      if (_cursor[j] == cur_sca->end()) {
-        continue;
-      }
-      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
-      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
-      if (cur_val < min_val) {
-        min_tid = j;
-        min_val = cur_val;
-      } else {
-        assert(cur_val < top, "All recorded addresses should be less");
-      }
-    }
-    // At this point min_val and min_tid are respectively
-    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
-    // and the thread (j) that witnesses that address.
-    // We record this address in the _survivor_chunk_array[i]
-    // and increment _cursor[min_tid] prior to the next round i.
-    if (min_val == top) {
-      break;
-    }
-    _survivor_chunk_array[i] = min_val;
-    _cursor[min_tid]++;
-  }
-  // We are all done; record the size of the _survivor_chunk_array
-  _survivor_chunk_index = i; // exclusive: [0, i)
-  if (PrintCMSStatistics > 0) {
-    gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
-  }
-  // Verify that we used up all the recorded entries
-  #ifdef ASSERT
-    size_t total = 0;
-    for (uint j = 0; j < ParallelGCThreads; j++) {
-      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
-      total += _cursor[j];
-    }
-    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
-    // Check that the merged array is in sorted order
-    if (total > 0) {
-      for (size_t i = 0; i < total - 1; i++) {
-        if (PrintCMSStatistics > 0) {
-          gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
-                              i, _survivor_chunk_array[i]);
-        }
-        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
-               "Not sorted");
-      }
-    }
-  #endif // ASSERT
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel rescan of young gen.
-// See ParRescanTask where this is currently used.
-void
-CMSCollector::
-initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
-  assert(n_threads > 0, "Unexpected n_threads argument");
-  DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
-
-  // Eden space
-  {
-    SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
-    assert(!pst->valid(), "Clobbering existing data?");
-    // Each valid entry in [0, _eden_chunk_index) represents a task.
-    size_t n_tasks = _eden_chunk_index + 1;
-    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
-    pst->set_par_threads(n_threads);
-    pst->set_n_tasks((int)n_tasks);
-  }
-
-  // Merge the survivor plab arrays into _survivor_chunk_array
-  if (_survivor_plab_array != NULL) {
-    merge_survivor_plab_arrays(dng->from());
-  } else {
-    assert(_survivor_chunk_index == 0, "Error");
-  }
-
-  // To space
-  {
-    SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
-    assert(!pst->valid(), "Clobbering existing data?");
-    pst->set_par_threads(n_threads);
-    pst->set_n_tasks(1);
-    assert(pst->valid(), "Error");
-  }
-
-  // From space
-  {
-    SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
-    assert(!pst->valid(), "Clobbering existing data?");
-    size_t n_tasks = _survivor_chunk_index + 1;
-    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
-    pst->set_par_threads(n_threads);
-    pst->set_n_tasks((int)n_tasks);
-    assert(pst->valid(), "Error");
-  }
-}
-
-// Parallel version of remark
-void CMSCollector::do_remark_parallel() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  int n_workers = workers->total_workers();
-  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
-  CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
-
-  CMSParRemarkTask tsk(this,
-    cms_space, perm_space,
-    n_workers, workers, task_queues());
-
-  // Set up for parallel process_strong_roots work.
-  gch->set_par_threads(n_workers);
-  gch->change_strong_roots_parity();
-  // We won't be iterating over the cards in the card table updating
-  // the younger_gen cards, so we shouldn't call the following else
-  // the verification code as well as subsequent younger_refs_iterate
-  // code would get confused. XXX
-  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
-
-  // The young gen rescan work will not be done as part of
-  // process_strong_roots (which currently doesn't knw how to
-  // parallelize such a scan), but rather will be broken up into
-  // a set of parallel tasks (via the sampling that the [abortable]
-  // preclean phase did of EdenSpace, plus the [two] tasks of
-  // scanning the [two] survivor spaces. Further fine-grain
-  // parallelization of the scanning of the survivor spaces
-  // themselves, and of precleaning of the younger gen itself
-  // is deferred to the future.
-  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
-
-  // The dirty card rescan work is broken up into a "sequence"
-  // of parallel tasks (per constituent space) that are dynamically
-  // claimed by the parallel threads.
-  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
-  perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
-  
-  // It turns out that even when we're using 1 thread, doing the work in a
-  // separate thread causes wide variance in run times.  We can't help this
-  // in the multi-threaded case, but we special-case n=1 here to get
-  // repeatable measurements of the 1-thread overhead of the parallel code.
-  if (n_workers > 1) {
-    // Make refs discovery MT-safe
-    ReferenceProcessorMTMutator mt(ref_processor(), true);
-    workers->run_task(&tsk);
-  } else {
-    tsk.work(0);
-  }
-  gch->set_par_threads(0);  // 0 ==> non-parallel.
-  // restore, single-threaded for now, any preserved marks
-  // as a result of work_q overflow
-  restore_preserved_marks_if_any();
-}
-
-// Non-parallel version of remark
-void CMSCollector::do_remark_non_parallel() {
-  ResourceMark rm;
-  HandleMark   hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  MarkRefsIntoAndScanClosure
-    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
-             &_markStack, &_revisitStack, this,
-             false /* should_yield */, false /* not precleaning */);
-  MarkFromDirtyCardsClosure
-    markFromDirtyCardsClosure(this, _span,
-                              NULL,  // space is set further below
-                              &_markBitMap, &_markStack, &_revisitStack,
-                              &mrias_cl);
-  {
-    TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
-    // Iterate over the dirty cards, marking them precleaned, and
-    // setting the corresponding bits in the mod union table.
-    {
-      ModUnionClosure modUnionClosure(&_modUnionTable);
-      _ct->ct_bs()->dirty_card_iterate(
-                      _cmsGen->used_region(),
-                      &modUnionClosure);
-      _ct->ct_bs()->dirty_card_iterate(
-                      _permGen->used_region(),
-                      &modUnionClosure);
-    }
-    // Having transferred these marks into the modUnionTable, we just need
-    // to rescan the marked objects on the dirty cards in the modUnionTable.
-    // The initial marking may have been done during an asynchronous
-    // collection so there may be dirty bits in the mod-union table.
-    const int alignment =
-      CardTableModRefBS::card_size * BitsPerWord;
-    { 
-      // ... First handle dirty cards in CMS gen
-      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
-      MemRegion ur = _cmsGen->used_region();
-      HeapWord* lb = ur.start();
-      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
-      MemRegion cms_span(lb, ub);
-      _modUnionTable.dirty_range_iterate_clear(cms_span,
-                                               &markFromDirtyCardsClosure);
-      assert(_markStack.isEmpty(), "mark stack should be empty");
-      assert(overflow_list_is_empty(), "overflow list should be empty");
-      if (PrintCMSStatistics != 0) {
-        gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
-          markFromDirtyCardsClosure.num_dirty_cards());
-      }
-    } 
-    {
-      // .. and then repeat for dirty cards in perm gen
-      markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
-      MemRegion ur = _permGen->used_region();
-      HeapWord* lb = ur.start();
-      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
-      MemRegion perm_span(lb, ub);
-      _modUnionTable.dirty_range_iterate_clear(perm_span,
-                                               &markFromDirtyCardsClosure);
-      assert(_markStack.isEmpty(), "mark stack should be empty");
-      assert(overflow_list_is_empty(), "overflow list should be empty");
-      if (PrintCMSStatistics != 0) {
-        gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
-          markFromDirtyCardsClosure.num_dirty_cards());
-      }
-    }
-  }
-  if (VerifyDuringGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(true);
-  }
-  {
-    TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
-
-    assert(_markStack.isEmpty(), "should be empty");
-    assert(overflow_list_is_empty(), "overflow list should be empty");
-  
-    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-    gch->gen_process_strong_roots(_cmsGen->level(),
-				  true,  // younger gens as roots
-				  true,  // collecting perm gen
-                                  SharedHeap::ScanningOption(roots_scanning_options()),
-				  NULL, &mrias_cl);
-  }
-  assert(_markStack.isEmpty(), "should be empty");
-  assert(overflow_list_is_empty(), "overflow list should be empty");
-  // Restore evacuated mark words, if any, used for overflow list links
-  if (!CMSOverflowEarlyRestoration) {
-    restore_preserved_marks_if_any();
-  }
-  assert(no_preserved_marks(), "no preserved marks");
-}
-
-////////////////////////////////////////////////////////
-// Parallel Reference Processing Task Proxy Class
-////////////////////////////////////////////////////////
-class CMSRefProcTaskProxy: public AbstractGangTask {
-  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
-  CMSCollector*          _collector;
-  CMSBitMap*             _mark_bit_map;
-  MemRegion              _span;
-  OopTaskQueueSet*       _task_queues;
-  ParallelTaskTerminator _term;
-  ProcessTask&           _task;
-
-public:
-  CMSRefProcTaskProxy(ProcessTask&     task,
-                      CMSCollector*    collector,
-                      const MemRegion& span,
-                      CMSBitMap*       mark_bit_map,
-                      int              total_workers,
-                      OopTaskQueueSet* task_queues):
-    AbstractGangTask("Process referents by policy in parallel"),
-    _task(task),
-    _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
-    _task_queues(task_queues),
-    _term(total_workers, task_queues)
-    { }
-
-  OopTaskQueueSet* task_queues() { return _task_queues; }
-
-  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
-  ParallelTaskTerminator* terminator() { return &_term; }
-  
-  void do_work_steal(int i,
-                     CMSParDrainMarkingStackClosure* drain,
-                     CMSParKeepAliveClosure* keep_alive,
-                     int* seed);
-
-  virtual void work(int i);
-};
-
-void CMSRefProcTaskProxy::work(int i) {
-  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
-                                        _mark_bit_map, work_queue(i));
-  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
-                                                 _mark_bit_map, work_queue(i));
-  CMSIsAliveClosure is_alive_closure(_mark_bit_map);
-  _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
-  if (_task.marks_oops_alive()) {
-    do_work_steal(i, &par_drain_stack, &par_keep_alive,
-                  _collector->hash_seed(i));
-  }
-}
-
-class CMSRefEnqueueTaskProxy: public AbstractGangTask {
-  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
-  EnqueueTask& _task;
-
-public:
-  CMSRefEnqueueTaskProxy(EnqueueTask& task)
-    : AbstractGangTask("Enqueue reference objects in parallel"),
-      _task(task)
-  { }
-
-  virtual void work(int i)
-  {
-    _task.work(i);
-  }
-};
-
-CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
-  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
-   _collector(collector),
-   _span(span),
-   _bit_map(bit_map),
-   _work_queue(work_queue),
-   _mark_and_push(collector, span, bit_map, work_queue),
-   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
-                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
-{ }
-
-// . see if we can share work_queues with ParNew? XXX
-void CMSRefProcTaskProxy::do_work_steal(int i,
-  CMSParDrainMarkingStackClosure* drain,
-  CMSParKeepAliveClosure* keep_alive,
-  int* seed) {
-  OopTaskQueue* work_q = work_queue(i);
-  NOT_PRODUCT(int num_steals = 0;)
-  oop obj_to_scan;
-
-  while (true) {
-    // Completely finish any left over work from (an) earlier round(s)
-    drain->trim_queue(0);
-    // Verify that we have no work before we resort to stealing
-    assert(work_q->size() == 0, "Have work, shouldn't steal");
-    // Try to steal from other queues that have work
-    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
-      NOT_PRODUCT(num_steals++;)
-      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
-      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
-      // Do scanning work
-      obj_to_scan->oop_iterate(keep_alive);
-      // Loop around, finish this work, and try to steal some more
-    } else if (terminator()->offer_termination()) {
-      break;  // nirvana from the infinite cycle
-    }
-  }
-  NOT_PRODUCT(
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
-    }
-  )
-}
-
-void CMSRefProcTaskExecutor::execute(ProcessTask& task)
-{
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  int n_workers = workers->total_workers();
-  CMSRefProcTaskProxy rp_task(task, &_collector, 
-                              _collector.ref_processor()->span(), 
-                              _collector.markBitMap(), 
-                              n_workers, _collector.task_queues());
-  workers->run_task(&rp_task);
-}
-
-void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
-{
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  CMSRefEnqueueTaskProxy enq_task(task);
-  workers->run_task(&enq_task);
-}
-
-void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
-
-  ResourceMark rm;
-  HandleMark   hm;
-  ReferencePolicy* soft_ref_policy;
-
-  assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
-  // Process weak references.
-  if (clear_all_soft_refs) {
-    soft_ref_policy = new AlwaysClearPolicy();
-  } else {
-#ifdef COMPILER2
-    soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-    soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-  }
-  assert(_markStack.isEmpty(), "mark stack should be empty");
-  assert(overflow_list_is_empty(), "overflow list should be empty");
-
-  ReferenceProcessor* rp = ref_processor();
-  assert(rp->span().equals(_span), "Spans should be equal");
-  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
-                                          &_markStack);
-  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
-                                _span, &_markBitMap, &_markStack,
-                                &cmsKeepAliveClosure);
-  {
-    TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
-    if (rp->processing_is_mt()) {
-      CMSRefProcTaskExecutor task_executor(*this);
-      rp->process_discovered_references(soft_ref_policy, 
-                                        &_is_alive_closure,
-                                        &cmsKeepAliveClosure,
-                                        &cmsDrainMarkingStackClosure,
-                                        &task_executor);
-    } else {
-      rp->process_discovered_references(soft_ref_policy,
-                                        &_is_alive_closure,
-                                        &cmsKeepAliveClosure,
-                                        &cmsDrainMarkingStackClosure,
-                                        NULL);
-    }
-    assert(_markStack.isEmpty(), "mark stack should be empty");
-    assert(overflow_list_is_empty(), "overflow list should be empty");
-  }
-
-  if (CMSClassUnloadingEnabled) {
-    {
-      TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
-
-      // Follow SystemDictionary roots and unload classes
-      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
-
-      // Follow CodeCache roots and unload any methods marked for unloading
-      CodeCache::do_unloading(&_is_alive_closure,
-                              &cmsKeepAliveClosure,
-                              purged_class);
-
-      cmsDrainMarkingStackClosure.do_void();
-      assert(_markStack.isEmpty(), "just drained");
-      assert(overflow_list_is_empty(), "just drained");
-
-      // Update subklass/sibling/implementor links in KlassKlass descendants
-      assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
-      oop k;
-      while ((k = _revisitStack.pop()) != NULL) {
-        ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
-                       &_is_alive_closure,
-                       &cmsKeepAliveClosure);
-      }
-      assert(!ClassUnloading ||
-             (_markStack.isEmpty() && overflow_list_is_empty()),
-             "Should not have found new reachable objects");
-      assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
-      cmsDrainMarkingStackClosure.do_void();
-      assert(_markStack.isEmpty(), "just drained");
-      assert(overflow_list_is_empty(), "just drained");
-  
-    }
-     
-    {
-      TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
-      // Now clean up stale oops in SymbolTable and StringTable
-      SymbolTable::unlink(&_is_alive_closure);
-      StringTable::unlink(&_is_alive_closure);
-    }
-  }
-
-  assert(_markStack.isEmpty(), "tautology");
-  assert(overflow_list_is_empty(), "tautology");
-  // Restore any preserved marks as a result of mark stack or
-  // work queue overflow
-  restore_preserved_marks_if_any();  // done single-threaded for now
-
-  rp->set_enqueuing_is_done(true);
-  if (rp->processing_is_mt()) {
-    CMSRefProcTaskExecutor task_executor(*this);
-    rp->enqueue_discovered_references(&task_executor);
-  } else {
-    rp->enqueue_discovered_references(NULL);
-  }
-  rp->verify_no_references_recorded();
-  assert(!rp->discovery_enabled(), "should have been disabled");
-
-  // JVMTI object tagging is based on JNI weak refs. If any of these
-  // refs were cleared then JVMTI needs to update its maps and
-  // maybe post ObjectFrees to agents.
-  JvmtiExport::cms_ref_processing_epilogue();
-}
-
-#ifndef PRODUCT
-void CMSCollector::check_correct_thread_executing() {
-  Thread* t = Thread::current();
-  // Only the VM thread or the CMS thread should be here.
-  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
-         "Unexpected thread type");
-  // If this is the vm thread, the foreground process 
-  // should not be waiting.  Note that _foregroundGCIsActive is 
-  // true while the foreground collector is waiting.
-  if (_foregroundGCShouldWait) {
-    // We cannot be the VM thread
-    assert(t->is_ConcurrentGC_thread(),
-           "Should be CMS thread");
-  } else {
-    // We can be the CMS thread only if we are in a stop-world
-    // phase of CMS collection.
-    if (t->is_ConcurrentGC_thread()) {
-      assert(_collectorState == InitialMarking ||
-             _collectorState == FinalMarking, 
-             "Should be a stop-world phase");
-      // The CMS thread should be holding the CMS_token.
-      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-             "Potential interference with concurrently "
-             "executing VM thread");
-    }
-  }
-}
-#endif
-
-void CMSCollector::sweep(bool asynch) {
-  assert(_collectorState == Sweeping, "just checking");
-  check_correct_thread_executing();
-  incrementSweepCount();
-  _sweep_timer.stop();
-  _sweep_estimate.sample(_sweep_timer.seconds());
-  size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
-
-  // If perm gen sweeping is disabled, preserve the perm gen object "deadness"
-  // information in the perm_gen_verify_bit_map. In order to do that we traverse
-  // all blocks in perm gen and mark all dead objects.
-  if (verifying() && !CMSPermGenSweepingEnabled) {
-    CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
-                             bitMapLock());
-    MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
-                               markBitMap(), perm_gen_verify_bit_map());
-    _permGen->cmsSpace()->blk_iterate(&mdo);
-  }
-
-  if (asynch) {
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
-    // First sweep the old gen then the perm gen
-    {
-      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
-                               bitMapLock());
-      sweepWork(_cmsGen, asynch);
-    }
-
-    // Now repeat for perm gen
-    if (CMSClassUnloadingEnabled && CMSPermGenSweepingEnabled) {
-      CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
-                             bitMapLock());
-      sweepWork(_permGen, asynch);
-    }
-
-    // Update Universe::_heap_*_at_gc figures.
-    // We need all the free list locks to make the abstract state
-    // transition from Sweeping to Resetting. See detailed note
-    // further below.
-    {
-      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
-                               _permGen->freelistLock());
-      // Update heap occupancy information which is used as
-      // input to soft ref clearing policy at the next gc.
-      Universe::update_heap_info_at_gc();
-      _collectorState = Resizing;
-    }
-  } else {
-    // already have needed locks
-    sweepWork(_cmsGen,  asynch);
-
-    if (CMSClassUnloadingEnabled && CMSPermGenSweepingEnabled) {
-      sweepWork(_permGen, asynch);
-    }
-    // Update heap occupancy information which is used as
-    // input to soft ref clearing policy at the next gc.
-    Universe::update_heap_info_at_gc();
-    _collectorState = Resizing;
-  }
-  _sweep_timer.reset();
-  _sweep_timer.start();
-
-  update_time_of_last_gc(os::javaTimeMillis());
-
-  // NOTE on abstract state transitions:
-  // Mutators allocate-live and/or mark the mod-union table dirty
-  // based on the state of the collection.  The former is done in
-  // the interval [Marking, Sweeping] and the latter in the interval
-  // [Marking, Sweeping).  Thus the transitions into the Marking state
-  // and out of the Sweeping state must be synchronously visible 
-  // globally to the mutators.
-  // The transition into the Marking state happens with the world
-  // stopped so the mutators will globally see it.  Sweeping is
-  // done asynchronously by the background collector so the transition
-  // from the Sweeping state to the Resizing state must be done
-  // under the freelistLock (as is the check for whether to 
-  // allocate-live and whether to dirty the mod-union table).
-  assert(_collectorState == Resizing, "Change of collector state to"
-    " Resizing must be done under the freelistLocks (plural)");
-
-  // Now that sweeping has been completed, if the GCH's
-  // incremental_collection_will_fail flag is set, clear it,
-  // thus inviting a younger gen collection to promote into
-  // this generation. If such a promotion may still fail,
-  // the flag will be set again when a young collection is
-  // attempted.
-  // I think the incremental_collection_will_fail flag's use
-  // is specific to a 2 generation collection policy, so i'll
-  // assert that that's the configuration we are operating within.
-  // The use of the flag can and should be generalized appropriately
-  // in the future to deal with a general n-generation system.
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_two_generation_policy(),
-         "Resetting of incremental_collection_will_fail flag"
-         " may be incorrect otherwise");
-  gch->clear_incremental_collection_will_fail();
-  gch->update_full_collections_completed(_collection_count_start);
-}
-
-// FIX ME!!! Looks like this belongs in CFLSpace, with
-// CMSGen merely delegating to it.
-void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
-  double nearLargestPercent = 0.999;
-  HeapWord*  minAddr        = _cmsSpace->bottom();
-  HeapWord*  largestAddr    = 
-    (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
-  if (largestAddr == 0) {
-    // The dictionary appears to be empty.  In this case 
-    // try to coalesce at the end of the heap.
-    largestAddr = _cmsSpace->end();
-  }
-  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
-  size_t nearLargestOffset =
-    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
-  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
-}
-
-bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
-  return addr >= _cmsSpace->nearLargestChunk();
-}
-
-FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
-  return _cmsSpace->find_chunk_at_end();
-}
-
-void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
-						    bool full) {
-  // The next lower level has been collected.  Gather any statistics
-  // that are of interest at this point.
-  if (!full && (current_level + 1) == level()) {
-    // Gather statistics on the young generation collection.
-    collector()->stats().record_gc0_end(used());
-  }
-}
-
-CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "Wrong type of heap");
-  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
-    gch->gen_policy()->size_policy();
-  assert(sp->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-  return sp; 
-}
-
-void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
-  }
-  _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
-  _debug_collection_type = 
-    (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("to %d ", _debug_collection_type);
-  }
-}
-
-void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
-  bool asynch) {
-  // We iterate over the space(s) underlying this generation,
-  // checking the mark bit map to see if the bits corresponding
-  // to specific blocks are marked or not. Blocks that are
-  // marked are live and are not swept up. All remaining blocks
-  // are swept up, with coalescing on-the-fly as we sweep up
-  // contiguous free and/or garbage blocks:
-  // We need to ensure that the sweeper synchronizes with allocators
-  // and stop-the-world collectors. In particular, the following
-  // locks are used:
-  // . CMS token: if this is held, a stop the world collection cannot occur
-  // . freelistLock: if this is held no allocation can occur from this
-  //                 generation by another thread
-  // . bitMapLock: if this is held, no other thread can access or update
-  //
-    
-  // Note that we need to hold the freelistLock if we use
-  // block iterate below; else the iterator might go awry if
-  // a mutator (or promotion) causes block contents to change
-  // (for instance if the allocator divvies up a block).
-  // If we hold the free list lock, for all practical purposes
-  // young generation GC's can't occur (they'll usually need to
-  // promote), so we might as well prevent all young generation
-  // GC's while we do a sweeping step. For the same reason, we might
-  // as well take the bit map lock for the entire duration
-  
-  // check that we hold the requisite locks
-  assert(have_cms_token(), "Should hold cms token");
-  assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
-         || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
-        "Should possess CMS token to sweep");
-  assert_lock_strong(gen->freelistLock());
-  assert_lock_strong(bitMapLock());
-  
-  assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
-  gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
-                                      _sweep_estimate.padded_average());
-  gen->setNearLargestChunk();
-
-  {
-    SweepClosure sweepClosure(this, gen, &_markBitMap,
-                            CMSYield && asynch);
-    gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
-    // We need to free-up/coalesce garbage/blocks from a
-    // co-terminal free run. This is done in the SweepClosure
-    // destructor; so, do not remove this scope, else the
-    // end-of-sweep-census below will be off by a little bit.
-  }
-  gen->cmsSpace()->sweep_completed();
-  gen->cmsSpace()->endSweepFLCensus(sweepCount());
-}
-
-// Reset CMS data structures (for now just the marking bit map)
-// preparatory for the next cycle.
-void CMSCollector::reset(bool asynch) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  CMSAdaptiveSizePolicy* sp = size_policy();
-  AdaptiveSizePolicyOutput(sp, gch->total_collections());
-  if (asynch) {
-    CMSTokenSyncWithLocks ts(true, bitMapLock());
-
-    // If the state is not "Resetting", the foreground  thread
-    // has done a collection and the resetting.
-    if (_collectorState != Resetting) {
-      assert(_collectorState == Idling, "The state should only change"
-	" because the foreground collector has finished the collection");
-      return;
-    }
-
-    // Clear the mark bitmap (no grey objects to start with)
-    // for the next cycle.
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
-
-    HeapWord* curAddr = _markBitMap.startWord();
-    while (curAddr < _markBitMap.endWord()) {
-      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr); 
-      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
-      _markBitMap.clear_large_range(chunk);
-      if (ConcurrentMarkSweepThread::should_yield() &&
-          !foregroundGCIsActive() &&
-          CMSYield) {
-        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-               "CMS thread should hold CMS token");
-        assert_lock_strong(bitMapLock());
-        bitMapLock()->unlock();
-        ConcurrentMarkSweepThread::desynchronize(true);
-        ConcurrentMarkSweepThread::acknowledge_yield_request();
-        stopTimer();
-        if (PrintCMSStatistics != 0) {
-          incrementYields();
-        }
-        icms_wait();
-
-	// See the comment in coordinator_yield()
-	for (unsigned i = 0; i < CMSYieldSleepCount &&
-	                ConcurrentMarkSweepThread::should_yield() &&
-	                !CMSCollector::foregroundGCIsActive(); ++i) {
-	  os::sleep(Thread::current(), 1, false);    
-	  ConcurrentMarkSweepThread::acknowledge_yield_request();
-	}
-
-        ConcurrentMarkSweepThread::synchronize(true);
-        bitMapLock()->lock_without_safepoint_check();
-        startTimer();
-      }
-      curAddr = chunk.end();
-    }
-    _collectorState = Idling;
-  } else {
-    // already have the lock
-    assert(_collectorState == Resetting, "just checking");
-    assert_lock_strong(bitMapLock());
-    _markBitMap.clear_all();
-    _collectorState = Idling;
-  }
-
-  // Stop incremental mode after a cycle completes, so that any future cycles
-  // are triggered by allocation.
-  stop_icms();
-
-  NOT_PRODUCT(
-    if (RotateCMSCollectionTypes) {
-      _cmsGen->rotate_debug_collection_type();
-    }
-  )
-}
-
-void CMSCollector::do_CMS_operation(CMS_op_type op) {
-  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
-  TraceCollectorStats tcs(counters());
-
-  switch (op) {
-    case CMS_op_checkpointRootsInitial: {
-      checkpointRootsInitial(true);       // asynch
-      if (PrintGC) {
-        _cmsGen->printOccupancy("initial-mark");
-      }
-      break;
-    }
-    case CMS_op_checkpointRootsFinal: {
-      checkpointRootsFinal(true,    // asynch
-                           false,   // !clear_all_soft_refs
-                           false);  // !init_mark_was_synchronous
-      if (PrintGC) {
-        _cmsGen->printOccupancy("remark");
-      }
-      break;
-    }
-    default:
-      fatal("No such CMS_op");
-  }
-}
-
-#ifndef PRODUCT
-size_t const CMSCollector::skip_header_HeapWords() {
-  return FreeChunk::header_size();
-}
-
-// Try and collect here conditions that should hold when
-// CMS thread is exiting. The idea is that the foreground GC
-// thread should not be blocked if it wants to terminate
-// the CMS thread and yet continue to run the VM for a while
-// after that.
-void CMSCollector::verify_ok_to_terminate() const {
-  assert(Thread::current()->is_ConcurrentGC_thread(), 
-         "should be called by CMS thread");
-  assert(!_foregroundGCShouldWait, "should be false");
-  // We could check here that all the various low-level locks
-  // are not held by the CMS thread, but that is overkill; see
-  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
-  // is checked.
-}
-#endif
-
-size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
-  assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
-         "missing Printezis mark?");
-  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
-  size_t size = pointer_delta(nextOneAddr + 1, addr);
-  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-         "alignment problem");
-  assert(size >= 3, "Necessary for Printezis marks to work");
-  return size;
-}
-
-// A variant of the above (block_size_using_printezis_bits()) except
-// that we return 0 if the P-bits are not yet set.
-size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
-  if (_markBitMap.isMarked(addr)) {
-    assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
-    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
-    size_t size = pointer_delta(nextOneAddr + 1, addr);
-    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-           "alignment problem");
-    assert(size >= 3, "Necessary for Printezis marks to work");
-    return size;
-  } else {
-    assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
-    return 0;
-  }
-}
-
-HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
-  size_t sz = 0;
-  oop p = (oop)addr;
-  if (p->klass() != NULL && p->is_parsable()) {
-    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
-  } else {
-    sz = block_size_using_printezis_bits(addr);
-  }
-  assert(sz > 0, "size must be nonzero");
-  HeapWord* next_block = addr + sz;
-  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
-                                             CardTableModRefBS::card_size);
-  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
-         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
-         "must be different cards");
-  return next_card;
-}
-
-
-// CMS Bit Map Wrapper /////////////////////////////////////////
-
-// Construct a CMS bit map infrastructure, but don't create the 
-// bit vector itself. That is done by a separate call CMSBitMap::allocate()
-// further below.
-CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
-  _bm(NULL,0),
-  _shifter(shifter),
-  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
-{
-  _bmStartWord = 0;
-  _bmWordSize  = 0;
-}
-
-bool CMSBitMap::allocate(MemRegion mr) {
-  _bmStartWord = mr.start();
-  _bmWordSize  = mr.word_size();
-  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
-                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
-  if (!brs.is_reserved()) {
-    warning("CMS bit map allocation failure");
-    return false;
-  }
-  // For now we'll just commit all of the bit map up fromt.
-  // Later on we'll try to be more parsimonious with swap.
-  if (!_virtual_space.initialize(brs, brs.size())) {
-    warning("CMS bit map backing store failure");
-    return false;
-  }
-  assert(_virtual_space.committed_size() == brs.size(),
-         "didn't reserve backing store for all of CMS bit map?");
-  _bm.set_map((uintptr_t*)_virtual_space.low());
-  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
-         _bmWordSize, "inconsistency in bit map sizing");
-  _bm.set_size(_bmWordSize >> _shifter);
-
-  // bm.clear(); // can we rely on getting zero'd memory? verify below
-  assert(isAllClear(),
-         "Expected zero'd memory from ReservedSpace constructor");
-  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
-         "consistency check");
-  return true;
-}
-
-void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
-  HeapWord *next_addr, *end_addr, *last_addr;
-  assert_locked();
-  assert(covers(mr), "out-of-range error");
-  // XXX assert that start and end are appropriately aligned
-  for (next_addr = mr.start(), end_addr = mr.end();
-       next_addr < end_addr; next_addr = last_addr) {
-    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
-    last_addr = dirty_region.end();
-    if (!dirty_region.is_empty()) {
-      cl->do_MemRegion(dirty_region);
-    } else {
-      assert(last_addr == end_addr, "program logic");
-      return;
-    }
-  }
-}
-
-#ifndef PRODUCT
-void CMSBitMap::assert_locked() const {
-  CMSLockVerifier::assert_locked(lock());
-}
-
-bool CMSBitMap::covers(MemRegion mr) const {
-  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
-  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
-         "size inconsistency");
-  return (mr.start() >= _bmStartWord) &&
-         (mr.end()   <= endWord());
-}
-
-bool CMSBitMap::covers(HeapWord* start, size_t size) const {
-    return (start >= _bmStartWord && (start + size) <= endWord());
-}
-
-void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
-  // verify that there are no 1 bits in the interval [left, right)
-  FalseBitMapClosure falseBitMapClosure;
-  iterate(&falseBitMapClosure, left, right);
-}
-
-void CMSBitMap::region_invariant(MemRegion mr)
-{
-  assert_locked();
-  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
-  assert(!mr.is_empty(), "unexpected empty region");
-  assert(covers(mr), "mr should be covered by bit map");
-  // convert address range into offset range
-  size_t start_ofs = heapWordToOffset(mr.start());
-  // Make sure that end() is appropriately aligned
-  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
-                        (1 << (_shifter+LogHeapWordSize))),
-         "Misaligned mr.end()");
-  size_t end_ofs   = heapWordToOffset(mr.end());
-  assert(end_ofs > start_ofs, "Should mark at least one bit");
-}
-
-#endif
-
-bool CMSMarkStack::allocate(size_t size) {
-  // allocate a stack of the requisite depth
-  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
-                   size * sizeof(oop)));
-  if (!rs.is_reserved()) {
-    warning("CMSMarkStack allocation failure");
-    return false;
-  }
-  if (!_virtual_space.initialize(rs, rs.size())) {
-    warning("CMSMarkStack backing store failure");
-    return false;
-  }
-  assert(_virtual_space.committed_size() == rs.size(),
-         "didn't reserve backing store for all of CMS stack?");
-  _base = (oop*)(_virtual_space.low());
-  _index = 0;
-  _capacity = size;
-  NOT_PRODUCT(_max_depth = 0);
-  return true;
-}
-
-// XXX FIX ME !!! In the MT case we come in here holding a
-// leaf lock. For printing we need to take a further lock
-// which has lower rank. We need to recallibrate the two
-// lock-ranks involved in order to be able to rpint the
-// messages below. (Or defer the printing to the caller.
-// For now we take the expedient path of just disabling the
-// messages for the problematic case.)
-void CMSMarkStack::expand() {
-  assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
-  if (_capacity == CMSMarkStackSizeMax) {
-    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
-      // We print a warning message only once per CMS cycle.
-      gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
-    }
-    return;
-  }
-  // Double capacity if possible
-  size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
-  // Do not give up existing stack until we have managed to
-  // get the double capacity that we desired.
-  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
-                   new_capacity * sizeof(oop)));
-  if (rs.is_reserved()) {
-    // Release the backing store associated with old stack
-    _virtual_space.release();
-    // Reinitialize virtual space for new stack
-    if (!_virtual_space.initialize(rs, rs.size())) {
-      fatal("Not enough swap for expanded marking stack");
-    }
-    _base = (oop*)(_virtual_space.low());
-    _index = 0;
-    _capacity = new_capacity;
-  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
-    // Failed to double capacity, continue;
-    // we print a detail message only once per CMS cycle.
-    gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
-            SIZE_FORMAT"K",
-            _capacity / K, new_capacity / K);
-  }
-}
-
-
-// Closures
-// XXX: there seems to be a lot of code  duplication here;
-// should refactor and consolidate common code.
-
-// This closure is used to mark refs into the CMS generation in
-// the CMS bit map. Called at the first checkpoint. This closure
-// assumes that we do not need to re-mark dirty cards; if the CMS
-// generation on which this is used is not an oldest (modulo perm gen)
-// generation then this will lose younger_gen cards!
-
-MarkRefsIntoClosure::MarkRefsIntoClosure(
-  MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
-    _span(span),
-    _bitMap(bitMap),
-    _should_do_nmethods(should_do_nmethods)
-{
-    assert(_ref_processor == NULL, "deliberately left NULL");
-    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
-}
-
-void MarkRefsIntoClosure::do_oop(oop* p) {
-  // if p points into _span, then mark corresponding bit in _markBitMap
-  oop thisOop = *p;
-  if (thisOop != NULL) {
-    assert(thisOop->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)thisOop;
-    if (_span.contains(addr)) {
-      // this should be made more efficient
-      _bitMap->mark(addr);
-    }
-  }
-}
-
-// A variant of the above, used for CMS marking verification.
-MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
-  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
-  bool should_do_nmethods):
-    _span(span),
-    _verification_bm(verification_bm),
-    _cms_bm(cms_bm),
-    _should_do_nmethods(should_do_nmethods) {
-    assert(_ref_processor == NULL, "deliberately left NULL");
-    assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
-}
-
-void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
-  // if p points into _span, then mark corresponding bit in _markBitMap
-  oop this_oop = *p;
-  if (this_oop != NULL) {
-    assert(this_oop->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)this_oop;
-    if (_span.contains(addr)) {
-      _verification_bm->mark(addr);
-      if (!_cms_bm->isMarked(addr)) {
-        oop(addr)->print();
-        gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
-        fatal("... aborting");
-      }
-    }
-  }
-}
-
-//////////////////////////////////////////////////
-// MarkRefsIntoAndScanClosure
-//////////////////////////////////////////////////
-
-MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
-                                                       ReferenceProcessor* rp,
-                                                       CMSBitMap* bit_map,
-                                                       CMSBitMap* mod_union_table,
-                                                       CMSMarkStack*  mark_stack,
-                                                       CMSMarkStack*  revisit_stack,
-                                                       CMSCollector* collector,
-                                                       bool should_yield,
-                                                       bool concurrent_precleaning):
-  _collector(collector),
-  _span(span),
-  _bit_map(bit_map),
-  _mark_stack(mark_stack),
-  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
-                      mark_stack, revisit_stack, concurrent_precleaning),
-  _yield(should_yield),
-  _concurrent_precleaning(concurrent_precleaning),
-  _freelistLock(NULL)
-{
-  _ref_processor = rp;
-  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
-}
-
-// This closure is used to mark refs into the CMS generation at the
-// second (final) checkpoint, and to scan and transitively follow
-// the unmarked oops. It is also used during the concurrent precleaning
-// phase while scanning objects on dirty cards in the CMS generation.
-// The marks are made in the marking bit map and the marking stack is
-// used for keeping the (newly) grey objects during the scan.
-// The parallel version (Par_...) appears further below.
-void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  if (this_oop != NULL) {
-    assert(this_oop->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)this_oop;
-    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
-    assert(_collector->overflow_list_is_empty(),
-           "overflow list should be empty");
-    if (_span.contains(addr) &&
-        !_bit_map->isMarked(addr)) {
-      // mark bit map (object is now grey)
-      _bit_map->mark(addr);
-      // push on marking stack (stack should be empty), and drain the
-      // stack by applying this closure to the oops in the oops popped
-      // from the stack (i.e. blacken the grey objects)
-      bool res = _mark_stack->push(this_oop);
-      assert(res, "Should have space to push on empty stack");
-      do {
-        oop new_oop = _mark_stack->pop();
-        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
-        assert(new_oop->is_parsable(), "Found unparsable oop");
-        assert(_bit_map->isMarked((HeapWord*)new_oop),
-               "only grey objects on this stack");
-        // iterate over the oops in this oop, marking and pushing
-        // the ones in CMS heap (i.e. in _span).
-        new_oop->oop_iterate(&_pushAndMarkClosure);
-        // check if it's time to yield
-        do_yield_check();
-      } while (!_mark_stack->isEmpty() ||
-               (!_concurrent_precleaning && take_from_overflow_list()));
-        // if marking stack is empty, and we are not doing this
-        // during precleaning, then check the overflow list
-    }
-    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
-    assert(_collector->overflow_list_is_empty(),
-           "overflow list was drained above");
-    // We could restore evacuated mark words, if any, used for
-    // overflow list links here because the overflow list is
-    // provably empty here. That would reduce the maximum
-    // size requirements for preserved_{oop,mark}_stack.
-    // But we'll just postpone it until we are all done
-    // so we can just stream through.
-    if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
-      _collector->restore_preserved_marks_if_any();
-      assert(_collector->no_preserved_marks(), "No preserved marks");
-    }
-    assert(_concurrent_precleaning || _collector->no_preserved_marks(), "no preserved marks");
-    assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
-           "no preserved marks");
-  }
-}
-
-void MarkRefsIntoAndScanClosure::do_yield_work() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_freelistLock);
-  assert_lock_strong(_bit_map->lock());
-  // relinquish the free_list_lock and bitMaplock()
-  _bit_map->lock()->unlock();
-  _freelistLock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-  _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
-  _collector->icms_wait();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-	               ConcurrentMarkSweepThread::should_yield() &&
-	               !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::sleep(Thread::current(), 1, false);    
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _freelistLock->lock_without_safepoint_check();
-  _bit_map->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-///////////////////////////////////////////////////////////
-// Par_MarkRefsIntoAndScanClosure: a parallel version of
-//                                 MarkRefsIntoAndScanClosure
-///////////////////////////////////////////////////////////
-Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
-  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
-  CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack*  revisit_stack):
-  _span(span),
-  _bit_map(bit_map),
-  _work_queue(work_queue),
-  _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
-                       (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
-  _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
-                          revisit_stack)
-{
-  _ref_processor = rp;
-  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
-}
-
-// This closure is used to mark refs into the CMS generation at the
-// second (final) checkpoint, and to scan and transitively follow
-// the unmarked oops. The marks are made in the marking bit map and
-// the work_queue is used for keeping the (newly) grey objects during
-// the scan phase whence they are also available for stealing by parallel
-// threads. Since the marking bit map is shared, updates are
-// synchronized (via CAS).
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  if (this_oop != NULL) {
-    assert(this_oop->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)this_oop;
-    if (_span.contains(addr) &&
-        !_bit_map->isMarked(addr)) {
-      // mark bit map (object will become grey):
-      // It is possible for several threads to be
-      // trying to "claim" this object concurrently;
-      // the unique thread that succeeds in marking the
-      // object first will do the subsequent push on
-      // to the work queue (or overflow list).
-      if (_bit_map->par_mark(addr)) {
-        // push on work_queue (which may not be empty), and trim the
-        // queue to an appropriate length by applying this closure to
-        // the oops in the oops popped from the stack (i.e. blacken the
-        // grey objects)
-        bool res = _work_queue->push(this_oop);
-        assert(res, "Low water mark should be less than capacity?");
-        trim_queue(_low_water_mark);
-      } // Else, another thread claimed the object
-    }
-  }
-}
-
-// This closure is used to rescan the marked objects on the dirty cards
-// in the mod union table and the card table proper.
-size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
-  oop p, MemRegion mr) {
-
-  size_t size = 0;
-  HeapWord* addr = (HeapWord*)p;
-  assert(_markStack->isEmpty(), "pre-condition (eager drainage)");
-  assert(_span.contains(addr), "we are scanning the CMS generation");
-  // check if it's time to yield
-  if (do_yield_check()) {
-    // We yielded for some foreground stop-world work,
-    // and we have been asked to abort this ongoing preclean cycle.
-    return 0;
-  }
-  if (_bitMap->isMarked(addr)) {
-    // it's marked; is it potentially uninitialized?
-    if (p->klass() != NULL) {
-      if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
-        // Signal precleaning to redirty the card since
-        // the klass pointer is already installed.
-        assert(size == 0, "Initial value");
-      } else {
-        assert(p->is_parsable(), "must be parsable.");
-        // an initialized object; ignore mark word in verification below
-        // since we are running concurrent with mutators
-        assert(p->is_oop(true), "should be an oop");
-        if (p->is_objArray()) {
-          // objArrays are precisely marked; restrict scanning
-          // to dirty cards only.
-          size = p->oop_iterate(_scanningClosure, mr);
-          assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-                 "adjustObjectSize should be the identity for array sizes, "
-                 "which are necessarily larger than minimum object size of "
-                 "two heap words");
-        } else {
-          // A non-array may have been imprecisely marked; we need
-          // to scan object in its entirety.
-          size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate(_scanningClosure));
-        }
-        #ifdef DEBUG
-          size_t direct_size =
-            CompactibleFreeListSpace::adjustObjectSize(p->size());
-          assert(size == direct_size, "Inconsistency in size");
-          assert(size >= 3, "Necessary for Printezis marks to work");
-          if (!_bitMap->isMarked(addr+1)) {
-            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
-          } else {
-            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
-            assert(_bitMap->isMarked(addr+size-1),
-                   "inconsistent Printezis mark");
-          }
-        #endif // DEBUG
-      }
-    } else {
-      // an unitialized object
-      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
-      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
-      size = pointer_delta(nextOneAddr + 1, addr);
-      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-             "alignment problem");
-      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
-      // will dirty the card when the klass pointer is installed in the
-      // object (signalling the completion of initialization).
-    }
-  } else {
-    // Either a not yet marked object or an uninitialized object
-    if (p->klass() == NULL || !p->is_parsable()) {
-      // An uninitialized object, skip to the next card, since
-      // we may not be able to read its P-bits yet.
-      assert(size == 0, "Initial value");
-    } else {
-      // An object not (yet) reached by marking: we merely need to
-      // compute its size so as to go look at the next block.
-      assert(p->is_oop(true), "should be an oop"); 
-      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
-    }
-  }
-  assert(_markStack->isEmpty(), "post-condition (eager drainage)");
-  return size;
-}
-
-void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_freelistLock);
-  assert_lock_strong(_bitMap->lock());
-  // relinquish the free_list_lock and bitMaplock()
-  _bitMap->lock()->unlock();
-  _freelistLock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-  _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
-  _collector->icms_wait();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-	               ConcurrentMarkSweepThread::should_yield() &&
-	               !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::sleep(Thread::current(), 1, false);    
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _freelistLock->lock_without_safepoint_check();
-  _bitMap->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-
-//////////////////////////////////////////////////////////////////
-// SurvivorSpacePrecleanClosure
-//////////////////////////////////////////////////////////////////
-// This (single-threaded) closure is used to preclean the oops in
-// the survivor spaces.
-size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
-
-  HeapWord* addr = (HeapWord*)p;
-  assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
-  assert(!_span.contains(addr), "we are scanning the survivor spaces");
-  assert(p->klass() != NULL, "object should be initializd");
-  assert(p->is_parsable(), "must be parsable.");
-  // an initialized object; ignore mark word in verification below
-  // since we are running concurrent with mutators
-  assert(p->is_oop(true), "should be an oop");
-  // Note that we do not yield while we iterate over
-  // the interior oops of p, pushing the relevant ones
-  // on our marking stack.
-  size_t size = p->oop_iterate(_scanning_closure);
-  do_yield_check();
-  // Observe that below, we do not abandon the preclean
-  // phase as soon as we should; rather we empty the
-  // marking stack before returning. This is to satisfy
-  // some existing assertions. In general, it may be a
-  // good idea to abort immediately and complete the marking
-  // from the grey objects at a later time.
-  while (!_mark_stack->isEmpty()) {
-    oop new_oop = _mark_stack->pop();
-    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
-    assert(new_oop->is_parsable(), "Found unparsable oop");
-    assert(_bit_map->isMarked((HeapWord*)new_oop),
-           "only grey objects on this stack");
-    // iterate over the oops in this oop, marking and pushing
-    // the ones in CMS heap (i.e. in _span).
-    new_oop->oop_iterate(_scanning_closure);
-    // check if it's time to yield
-    do_yield_check();
-  }
-  unsigned int after_count =
-    GenCollectedHeap::heap()->total_collections();
-  bool abort = (_before_count != after_count) ||
-               _collector->should_abort_preclean();
-  return abort ? 0 : size;
-}
-
-void SurvivorSpacePrecleanClosure::do_yield_work() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_bit_map->lock());
-  // Relinquish the bit map lock
-  _bit_map->lock()->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-  _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
-  _collector->icms_wait();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-	               ConcurrentMarkSweepThread::should_yield() &&
-	               !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::sleep(Thread::current(), 1, false);    
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _bit_map->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-// This closure is used to rescan the marked objects on the dirty cards
-// in the mod union table and the card table proper. In the parallel
-// case, although the bitMap is shared, we do a single read so the
-// isMarked() query is "safe".
-bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
-  // Ignore mark word because we are running concurrent with mutators
-  assert(p->is_oop_or_null(true), "expected an oop or null");
-  HeapWord* addr = (HeapWord*)p;
-  assert(_span.contains(addr), "we are scanning the CMS generation");
-  bool is_obj_array = false;
-  #ifdef DEBUG
-    if (!_parallel) {
-      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
-      assert(_collector->overflow_list_is_empty(),
-             "overflow list should be empty");
-
-    }
-  #endif // DEBUG
-  if (_bit_map->isMarked(addr)) {
-    // Obj arrays are precisely marked, non-arrays are not;
-    // so we scan objArrays precisely and non-arrays in their
-    // entirety.
-    if (p->is_objArray()) {
-      is_obj_array = true;
-      if (_parallel) {
-        p->oop_iterate(_par_scan_closure, mr);
-      } else {
-        p->oop_iterate(_scan_closure, mr);
-      }
-    } else {
-      if (_parallel) {
-        p->oop_iterate(_par_scan_closure);
-      } else {
-        p->oop_iterate(_scan_closure);
-      }
-    }
-  }
-  #ifdef DEBUG
-    if (!_parallel) {
-      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
-      assert(_collector->overflow_list_is_empty(),
-             "overflow list should be empty");
-
-    }
-  #endif // DEBUG
-  return is_obj_array;
-}
-
-MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
-                        MemRegion span,
-                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
-                        CMSMarkStack*  revisitStack,
-                        bool should_yield, bool verifying):
-  _collector(collector),
-  _span(span),
-  _bitMap(bitMap),
-  _mut(&collector->_modUnionTable),
-  _markStack(markStack),
-  _revisitStack(revisitStack),
-  _yield(should_yield),
-  _skipBits(0)
-{
-  assert(_markStack->isEmpty(), "stack should be empty");
-  _finger = _bitMap->startWord();
-  _threshold = _finger;
-  assert(_collector->_restart_addr == NULL, "Sanity check");
-  assert(_span.contains(_finger), "Out of bounds _finger?");
-  DEBUG_ONLY(_verifying = verifying;)
-}
-
-void MarkFromRootsClosure::reset(HeapWord* addr) {
-  assert(_markStack->isEmpty(), "would cause duplicates on stack");
-  assert(_span.contains(addr), "Out of bounds _finger?");
-  _finger = addr;
-  _threshold = (HeapWord*)round_to(
-                 (intptr_t)_finger, CardTableModRefBS::card_size);
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-void MarkFromRootsClosure::do_bit(size_t offset) {
-  if (_skipBits > 0) {
-    _skipBits--;
-    return;
-  }
-  // convert offset into a HeapWord*
-  HeapWord* addr = _bitMap->startWord() + offset;
-  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
-         "address out of range");
-  assert(_bitMap->isMarked(addr), "tautology");
-  if (_bitMap->isMarked(addr+1)) {
-    // this is an allocated but not yet initialized object
-    assert(_skipBits == 0, "tautology");
-    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
-    oop p = oop(addr);
-    if (p->klass() == NULL || !p->is_parsable()) {
-      DEBUG_ONLY(if (!_verifying) {)
-        // We re-dirty the cards on which this object lies and increase
-        // the _threshold so that we'll come back to scan this object
-        // during the preclean or remark phase. (CMSCleanOnEnter)
-        if (CMSCleanOnEnter) {
-          size_t sz = _collector->block_size_using_printezis_bits(addr);
-          HeapWord* start_card_addr = (HeapWord*)round_down(
-                                         (intptr_t)addr, CardTableModRefBS::card_size);
-          HeapWord* end_card_addr   = (HeapWord*)round_to(
-                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
-          MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr);
-          assert(!redirty_range.is_empty(), "Arithmetical tautology");
-          // Bump _threshold to end_card_addr; note that
-          // _threshold cannot possibly exceed end_card_addr, anyhow.
-          // This prevents future clearing of the card as the scan proceeds
-          // to the right.
-          assert(_threshold <= end_card_addr,
-                 "Because we are just scanning into this object");
-          if (_threshold < end_card_addr) {
-            _threshold = end_card_addr;
-          }
-          if (p->klass() != NULL) {
-            // Redirty the range of cards...
-            _mut->mark_range(redirty_range);
-          } // ...else the setting of klass will dirty the card anyway.
-        }
-      DEBUG_ONLY(})
-      return;
-    }
-  }
-  scanOopsInOop(addr);
-}
-
-// We take a break if we've been at this for a while,
-// so as to avoid monopolizing the locks involved.
-void MarkFromRootsClosure::do_yield_work() {
-  // First give up the locks, then yield, then re-lock
-  // We should probably use a constructor/destructor idiom to
-  // do this unlock/lock or modify the MutexUnlocker class to
-  // serve our purpose. XXX
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_bitMap->lock());
-  _bitMap->lock()->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-  _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
-  _collector->icms_wait();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-	               ConcurrentMarkSweepThread::should_yield() &&
-	               !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::sleep(Thread::current(), 1, false);    
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _bitMap->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
-  assert(_bitMap->isMarked(ptr), "expected bit to be set");
-  assert(_markStack->isEmpty(),
-         "should drain stack to limit stack usage");
-  // convert ptr to an oop preparatory to scanning
-  oop this_oop = oop(ptr);
-  // Ignore mark word in verification below, since we
-  // may be running concurrent with mutators.
-  assert(this_oop->is_oop(true), "should be an oop");
-  assert(_finger <= ptr, "_finger runneth ahead");
-  // advance the finger to right end of this object
-  _finger = ptr + this_oop->size();
-  assert(_finger > ptr, "we just incremented it above");
-  // On large heaps, it may take us some time to get through
-  // the marking phase (especially if running iCMS). During
-  // this time it's possible that a lot of mutations have
-  // accumulated in the card table and the mod union table --
-  // these mutation records are redundant until we have
-  // actually traced into the corresponding card.
-  // Here, we check whether advancing the finger would make
-  // us cross into a new card, and if so clear corresponding
-  // cards in the MUT (preclean them in the card-table in the
-  // future).
-
-  DEBUG_ONLY(if (!_verifying) {)
-    // The clean-on-enter optimization is disabled by default,
-    // until we fix 6178663.
-    if (CMSCleanOnEnter && (_finger > _threshold)) {
-      // [_threshold, _finger) represents the interval
-      // of cards to be cleared  in MUT (or precleaned in card table).
-      // The set of cards to be cleared is all those that overlap
-      // with the interval [_threshold, _finger); note that
-      // _threshold is always kept card-aligned but _finger isn't
-      // always card-aligned.
-      HeapWord* old_threshold = _threshold;
-      assert(old_threshold == (HeapWord*)round_to(
-              (intptr_t)old_threshold, CardTableModRefBS::card_size),
-             "_threshold should always be card-aligned");
-      _threshold = (HeapWord*)round_to(
-                     (intptr_t)_finger, CardTableModRefBS::card_size);
-      MemRegion mr(old_threshold, _threshold);
-      assert(!mr.is_empty(), "Control point invariant");
-      assert(_span.contains(mr), "Should clear within span");
-      // XXX When _finger crosses from old gen into perm gen
-      // we may be doing unnecessary cleaning; do better in the
-      // future by detecting that condition and clearing fewer
-      // MUT/CT entries.
-      _mut->clear_range(mr);
-    }
-  DEBUG_ONLY(})
-
-  // Note: the finger doesn't advance while we drain
-  // the stack below.
-  PushOrMarkClosure pushOrMarkClosure(_collector,
-                                      _span, _bitMap, _markStack,
-                                      _revisitStack,
-                                      _finger, this);
-  bool res = _markStack->push(this_oop);
-  assert(res, "Empty non-zero size stack should have space for single push");
-  while (!_markStack->isEmpty()) {
-    oop new_oop = _markStack->pop();
-    // Skip verifying header mark word below because we are
-    // running concurrent with mutators.
-    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
-    // now scan this oop's oops
-    new_oop->oop_iterate(&pushOrMarkClosure);
-    do_yield_check();
-  }
-  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
-}
-
-Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
-                       CMSCollector* collector, MemRegion span,
-                       CMSBitMap* bit_map,
-                       OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack,
-                       CMSMarkStack*  revisit_stack,
-                       bool should_yield):
-  _collector(collector),
-  _whole_span(collector->_span),
-  _span(span),
-  _bit_map(bit_map),
-  _mut(&collector->_modUnionTable),
-  _work_queue(work_queue),
-  _overflow_stack(overflow_stack),
-  _revisit_stack(revisit_stack),
-  _yield(should_yield),
-  _skip_bits(0),
-  _task(task)
-{
-  assert(_work_queue->size() == 0, "work_queue should be empty");
-  _finger = span.start();
-  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
-  assert(_span.contains(_finger), "Out of bounds _finger?");
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-void Par_MarkFromRootsClosure::do_bit(size_t offset) {
-  if (_skip_bits > 0) {
-    _skip_bits--;
-    return;
-  }
-  // convert offset into a HeapWord*
-  HeapWord* addr = _bit_map->startWord() + offset;
-  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
-         "address out of range");
-  assert(_bit_map->isMarked(addr), "tautology");
-  if (_bit_map->isMarked(addr+1)) {
-    // this is an allocated object that might not yet be initialized
-    assert(_skip_bits == 0, "tautology");
-    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
-    oop p = oop(addr);
-    if (p->klass() == NULL || !p->is_parsable()) {
-      // in the case of Clean-on-Enter optimization, redirty card
-      // and avoid clearing card by increasing  the threshold.
-      return;
-    }
-  }
-  scan_oops_in_oop(addr);
-}
-
-void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
-  assert(_bit_map->isMarked(ptr), "expected bit to be set");
-  // Should we assert that our work queue is empty or
-  // below some drain limit?
-  assert(_work_queue->size() == 0,
-         "should drain stack to limit stack usage");
-  // convert ptr to an oop preparatory to scanning
-  oop this_oop = oop(ptr);
-  // Ignore mark word in verification below, since we
-  // may be running concurrent with mutators.
-  assert(this_oop->is_oop(true), "should be an oop");
-  assert(_finger <= ptr, "_finger runneth ahead");
-  // advance the finger to right end of this object
-  _finger = ptr + this_oop->size();
-  assert(_finger > ptr, "we just incremented it above");
-  // On large heaps, it may take us some time to get through
-  // the marking phase (especially if running iCMS). During
-  // this time it's possible that a lot of mutations have
-  // accumulated in the card table and the mod union table --
-  // these mutation records are redundant until we have
-  // actually traced into the corresponding card.
-  // Here, we check whether advancing the finger would make
-  // us cross into a new card, and if so clear corresponding
-  // cards in the MUT (preclean them in the card-table in the
-  // future).
-
-  // The clean-on-enter optimization is disabled by default,
-  // until we fix 6178663.
-  if (CMSCleanOnEnter && (_finger > _threshold)) {
-    // [_threshold, _finger) represents the interval
-    // of cards to be cleared  in MUT (or precleaned in card table).
-    // The set of cards to be cleared is all those that overlap
-    // with the interval [_threshold, _finger); note that
-    // _threshold is always kept card-aligned but _finger isn't
-    // always card-aligned.
-    HeapWord* old_threshold = _threshold;
-    assert(old_threshold == (HeapWord*)round_to(
-            (intptr_t)old_threshold, CardTableModRefBS::card_size),
-           "_threshold should always be card-aligned");
-    _threshold = (HeapWord*)round_to(
-                   (intptr_t)_finger, CardTableModRefBS::card_size);
-    MemRegion mr(old_threshold, _threshold);
-    assert(!mr.is_empty(), "Control point invariant");
-    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
-    // XXX When _finger crosses from old gen into perm gen
-    // we may be doing unnecessary cleaning; do better in the
-    // future by detecting that condition and clearing fewer
-    // MUT/CT entries.
-    _mut->clear_range(mr);
-  }
-
-  // Note: the local finger doesn't advance while we drain
-  // the stack below, but the global finger sure can and will.
-  HeapWord** gfa = _task->global_finger_addr();
-  Par_PushOrMarkClosure pushOrMarkClosure(_collector,
-                                      _span, _bit_map,
-                                      _work_queue,
-                                      _overflow_stack,
-                                      _revisit_stack,
-                                      _finger,
-                                      gfa, this);
-  bool res = _work_queue->push(this_oop);   // overflow could occur here
-  assert(res, "Will hold once we use workqueues");
-  while (true) {
-    oop new_oop;
-    if (!_work_queue->pop_local(new_oop)) {
-      // We emptied our work_queue; check if there's stuff that can
-      // be gotten from the overflow stack.
-      if (CMSConcMarkingTask::get_work_from_overflow_stack(
-            _overflow_stack, _work_queue)) {
-        do_yield_check();
-        continue;
-      } else {  // done
-        break;
-      }
-    }
-    // Skip verifying header mark word below because we are
-    // running concurrent with mutators.
-    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
-    // now scan this oop's oops
-    new_oop->oop_iterate(&pushOrMarkClosure);
-    do_yield_check();
-  }
-  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
-}
-
-// Yield in response to a request from VM Thread or
-// from mutators.
-void Par_MarkFromRootsClosure::do_yield_work() {
-  assert(_task != NULL, "sanity");
-  _task->yield();
-}
-
-// A variant of the above used for verifying CMS marking work.
-MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
-                        MemRegion span,
-                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
-                        CMSMarkStack*  mark_stack):
-  _collector(collector),
-  _span(span),
-  _verification_bm(verification_bm),
-  _cms_bm(cms_bm),
-  _mark_stack(mark_stack),
-  _pam_verify_closure(collector, span, verification_bm, cms_bm,
-                      mark_stack)
-{
-  assert(_mark_stack->isEmpty(), "stack should be empty");
-  _finger = _verification_bm->startWord();
-  assert(_collector->_restart_addr == NULL, "Sanity check");
-  assert(_span.contains(_finger), "Out of bounds _finger?");
-}
-
-void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
-  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
-  assert(_span.contains(addr), "Out of bounds _finger?");
-  _finger = addr;
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
-  // convert offset into a HeapWord*
-  HeapWord* addr = _verification_bm->startWord() + offset;
-  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
-         "address out of range");
-  assert(_verification_bm->isMarked(addr), "tautology");
-  assert(_cms_bm->isMarked(addr), "tautology");
-
-  assert(_mark_stack->isEmpty(),
-         "should drain stack to limit stack usage");
-  // convert addr to an oop preparatory to scanning
-  oop this_oop = oop(addr);
-  assert(this_oop->is_oop(), "should be an oop");
-  assert(_finger <= addr, "_finger runneth ahead");
-  // advance the finger to right end of this object
-  _finger = addr + this_oop->size();
-  assert(_finger > addr, "we just incremented it above");
-  // Note: the finger doesn't advance while we drain
-  // the stack below.
-  bool res = _mark_stack->push(this_oop);
-  assert(res, "Empty non-zero size stack should have space for single push");
-  while (!_mark_stack->isEmpty()) {
-    oop new_oop = _mark_stack->pop();
-    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
-    // now scan this oop's oops
-    new_oop->oop_iterate(&_pam_verify_closure);
-  }
-  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
-}
-
-PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
-  CMSCollector* collector, MemRegion span,
-  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
-  CMSMarkStack*  mark_stack):
-  OopClosure(collector->ref_processor()),
-  _collector(collector),
-  _span(span),
-  _verification_bm(verification_bm),
-  _cms_bm(cms_bm),
-  _mark_stack(mark_stack)
-{ }
-
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _mark_stack->reset();  // discard stack contents
-  _mark_stack->expand(); // expand the stack if possible
-}
-
-void PushAndMarkVerifyClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
-  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
-    // Oop lies in _span and isn't yet grey or black
-    _verification_bm->mark(addr);            // now grey
-    if (!_cms_bm->isMarked(addr)) {
-      oop(addr)->print();
-      gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
-      fatal("... aborting");
-    }
-
-    if (!_mark_stack->push(this_oop)) { // stack overflow
-      if (PrintCMSStatistics != 0) {
-        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                               SIZE_FORMAT, _mark_stack->capacity());
-      }
-      assert(_mark_stack->isFull(), "Else push should have succeeded");
-      handle_stack_overflow(addr);
-    }
-    // anything including and to the right of _finger
-    // will be scanned as we iterate over the remainder of the
-    // bit map
-  }
-}
-
-PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
-                     MemRegion span,
-                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
-                     CMSMarkStack*  revisitStack,
-                     HeapWord* finger, MarkFromRootsClosure* parent) :
-  OopClosure(collector->ref_processor()),
-  _collector(collector),
-  _span(span),
-  _bitMap(bitMap),
-  _markStack(markStack),
-  _revisitStack(revisitStack),
-  _finger(finger),
-  _parent(parent)
-{ }
-
-Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
-                     MemRegion span,
-                     CMSBitMap* bit_map,
-                     OopTaskQueue* work_queue,
-                     CMSMarkStack*  overflow_stack,
-                     CMSMarkStack*  revisit_stack,
-                     HeapWord* finger,
-                     HeapWord** global_finger_addr,
-                     Par_MarkFromRootsClosure* parent) :
-  OopClosure(collector->ref_processor()),
-  _collector(collector),
-  _whole_span(collector->_span),
-  _span(span),
-  _bit_map(bit_map),
-  _work_queue(work_queue),
-  _overflow_stack(overflow_stack),
-  _revisit_stack(revisit_stack),
-  _finger(finger),
-  _global_finger_addr(global_finger_addr),
-  _parent(parent)
-{ }
-
-
-void CMSCollector::lower_restart_addr(HeapWord* low) {
-  assert(_span.contains(low), "Out of bounds addr");
-  if (_restart_addr == NULL) {
-    _restart_addr = low;
-  } else {
-    _restart_addr = MIN2(_restart_addr, low);
-  }
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _markStack->reset();  // discard stack contents
-  _markStack->expand(); // expand the stack if possible
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
-  // We need to do this under a mutex to prevent other
-  // workers from interfering with the expansion below.
-  MutexLockerEx ml(_overflow_stack->par_lock(),
-                   Mutex::_no_safepoint_check_flag);
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _overflow_stack->reset();  // discard stack contents
-  _overflow_stack->expand(); // expand the stack if possible
-}
-
-
-void PushOrMarkClosure::do_oop(oop* p) {
-  oop    thisOop = *p;
-  // Ignore mark word because we are running concurrent with mutators.
-  assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)thisOop;
-  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
-    // Oop lies in _span and isn't yet grey or black
-    _bitMap->mark(addr);            // now grey
-    if (addr < _finger) {
-      // the bit map iteration has already either passed, or
-      // sampled, this bit in the bit map; we'll need to
-      // use the marking stack to scan this oop's oops.
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
-        if (PrintCMSStatistics != 0) {
-          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                                 SIZE_FORMAT, _markStack->capacity());
-        }
-        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
-        handle_stack_overflow(addr);
-      }
-    }
-    // anything including and to the right of _finger
-    // will be scanned as we iterate over the remainder of the
-    // bit map
-    do_yield_check();
-  }
-}
-
-void Par_PushOrMarkClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  // Ignore mark word because we are running concurrent with mutators.
-  assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
-  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // Oop lies in _span and isn't yet grey or black
-    // We read the global_finger (volatile read) strictly after marking oop
-    bool res = _bit_map->par_mark(addr);    // now grey
-    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
-    // Should we push this marked oop on our stack?
-    // -- if someone else marked it, nothing to do
-    // -- if target oop is above global finger nothing to do
-    // -- if target oop is in chunk and above local finger
-    //      then nothing to do
-    // -- else push on work queue
-    if (   !res       // someone else marked it, they will deal with it
-        || (addr >= *gfa)  // will be scanned in a later task
-        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
-      return;
-    }
-    // the bit map iteration has already either passed, or
-    // sampled, this bit in the bit map; we'll need to
-    // use the marking stack to scan this oop's oops.
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (CMSMarkStackOverflowALot &&
-          _collector->simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow ||
-        !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
-      // stack overflow
-      if (PrintCMSStatistics != 0) {
-        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                               SIZE_FORMAT, _overflow_stack->capacity());
-      }
-      // We cannot assert that the overflow stack is full because
-      // it may have been emptied since.
-      assert(simulate_overflow ||
-             _work_queue->size() == _work_queue->max_elems(),
-            "Else push should have succeeded");
-      handle_stack_overflow(addr);
-    }
-    do_yield_check();
-  }
-}
-
-
-// Grey object rescan during pre-cleaning and second checkpoint phases --
-// the non-parallel version (the parallel version appears further below.)
-void PushAndMarkClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  // If _concurrent_precleaning, ignore mark word verification
-  assert(this_oop->is_oop_or_null(_concurrent_precleaning),
-         "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
-  // Check if oop points into the CMS generation
-  // and is not marked
-  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // a white object ...
-    _bit_map->mark(addr);         // ... now grey
-    // push on the marking stack (grey set)
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (CMSMarkStackOverflowALot &&
-          _collector->simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow || !_mark_stack->push(this_oop)) {
-      if (_concurrent_precleaning) {
-         // During precleaning we can just dirty the appropriate card
-         // in the mod union table, thus ensuring that the object remains
-         // in the grey set  and continue. Note that no one can be intefering
-         // with us in this action of dirtying the mod union table, so
-         // no locking is required.
-         _mod_union_table->mark(addr);
-         _collector->_ser_pmc_preclean_ovflw++;
-      } else {
-         // During the remark phase, we need to remember this oop
-         // in the overflow list.
-         _collector->push_on_overflow_list(this_oop);
-         _collector->_ser_pmc_remark_ovflw++;
-      }
-    }
-  }
-}
-
-// Grey object rescan during second checkpoint phase --
-// the parallel version.
-void Par_PushAndMarkClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  assert(this_oop->is_oop_or_null(),
-         "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
-  // Check if oop points into the CMS generation 
-  // and is not marked
-  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // a white object ...
-    // If we manage to "claim" the object, by being the
-    // first thread to mark it, then we push it on our
-    // marking stack
-    if (_bit_map->par_mark(addr)) {     // ... now grey
-      // push on work queue (grey set)
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->par_simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow || !_work_queue->push(this_oop)) {
-        _collector->par_push_on_overflow_list(this_oop);
-        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
-      }
-    } // Else, some other thread got there first
-  }
-}
-
-void PushAndMarkClosure::remember_klass(Klass* k) {
-  if (!_revisit_stack->push(oop(k))) {
-    fatal("Revisit stack overflowed in PushAndMarkClosure");
-  }
-}
-
-void Par_PushAndMarkClosure::remember_klass(Klass* k) {
-  if (!_revisit_stack->par_push(oop(k))) {
-    fatal("Revist stack overflowed in Par_PushAndMarkClosure");
-  }
-}
-
-void CMSPrecleanRefsYieldClosure::do_yield_work() {
-  Mutex* bml = _collector->bitMapLock();
-  assert_lock_strong(bml);
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-
-  bml->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-
-  _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
-  _collector->icms_wait();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-	               ConcurrentMarkSweepThread::should_yield() &&
-	               !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::sleep(Thread::current(), 1, false);    
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  bml->lock();
-
-  _collector->startTimer();
-}
-
-bool CMSPrecleanRefsYieldClosure::should_return() {
-  if (ConcurrentMarkSweepThread::should_yield()) {
-    do_yield_work();
-  }
-  return _collector->foregroundGCIsActive();
-}
-
-void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
-  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
-         "mr should be aligned to start at a card boundary");
-  // We'd like to assert:
-  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
-  //        "mr should be a range of cards");
-  // However, that would be too strong in one case -- the last
-  // partition ends at _unallocated_block which, in general, can be
-  // an arbitrary boundary, not necessarily card aligned.
-  if (PrintCMSStatistics != 0) {
-    _num_dirty_cards +=
-         mr.word_size()/CardTableModRefBS::card_size_in_words;
-  }
-  _space->object_iterate_mem(mr, &_scan_cl);
-}
-
-SweepClosure::SweepClosure(CMSCollector* collector,
-                           ConcurrentMarkSweepGeneration* g,
-                           CMSBitMap* bitMap, bool should_yield) :
-  _collector(collector),
-  _g(g),
-  _sp(g->cmsSpace()),
-  _limit(_sp->sweep_limit()),
-  _freelistLock(_sp->freelistLock()),
-  _bitMap(bitMap),
-  _yield(should_yield),
-  _inFreeRange(false),           // No free range at beginning of sweep
-  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
-  _lastFreeRangeCoalesced(false),
-  _freeFinger(g->used_region().start())
-{
-  NOT_PRODUCT(
-    _numObjectsFreed = 0;
-    _numWordsFreed   = 0;
-    _numObjectsLive = 0;
-    _numWordsLive = 0;
-    _numObjectsAlreadyFree = 0;
-    _numWordsAlreadyFree = 0;
-    _last_fc = NULL;
-
-    _sp->initializeIndexedFreeListArrayReturnedBytes();
-    _sp->dictionary()->initializeDictReturnedBytes();
-  )
-  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
-         "sweep _limit out of bounds");
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print("\n====================\nStarting new sweep\n");
-  }
-}
-
-// We need this destructor to reclaim any space at the end
-// of the space, which do_blk below may not have added back to
-// the free lists. [basically dealing with the "fringe effect"]
-SweepClosure::~SweepClosure() {
-  assert_lock_strong(_freelistLock);
-  // this should be treated as the end of a free run if any
-  // The current free range should be returned to the free lists
-  // as one coalesced chunk.
-  if (inFreeRange()) {
-    flushCurFreeChunk(freeFinger(), 
-      pointer_delta(_limit, freeFinger()));
-    assert(freeFinger() < _limit, "the finger pointeth off base");
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print("destructor:");
-      gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
-                 "[coalesced:"SIZE_FORMAT"]\n",
-                 freeFinger(), pointer_delta(_limit, freeFinger()),
-                 lastFreeRangeCoalesced());
-    }
-  }
-  NOT_PRODUCT(
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
-                          SIZE_FORMAT " bytes",
-                 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
-      gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
-                             SIZE_FORMAT" bytes  "
-	"Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
-	_numObjectsLive, _numWordsLive*sizeof(HeapWord), 
-	_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
-      size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
-	sizeof(HeapWord);
-      gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
-
-      if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
-        size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
-        size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
-        size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
-        gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
-        gclog_or_tty->print("	Indexed List Returned "SIZE_FORMAT" bytes", 
-  	  indexListReturnedBytes);
-        gclog_or_tty->print_cr("	Dictionary Returned "SIZE_FORMAT" bytes",
-  	  dictReturnedBytes);
-      }
-    }
-  )
-  // Now, in debug mode, just null out the sweep_limit
-  NOT_PRODUCT(_sp->clear_sweep_limit();)
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print("end of sweep\n================\n");
-  }
-}
-
-void SweepClosure::initialize_free_range(HeapWord* freeFinger, 
-    bool freeRangeInFreeLists) {
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
-               freeFinger, _sp->block_size(freeFinger),
-	       freeRangeInFreeLists);
-  }
-  assert(!inFreeRange(), "Trampling existing free range");
-  set_inFreeRange(true);
-  set_lastFreeRangeCoalesced(false);
-
-  set_freeFinger(freeFinger);
-  set_freeRangeInFreeLists(freeRangeInFreeLists);
-  if (CMSTestInFreeList) {
-    if (freeRangeInFreeLists) { 
-      FreeChunk* fc = (FreeChunk*) freeFinger;
-      assert(fc->isFree(), "A chunk on the free list should be free.");
-      assert(fc->size() > 0, "Free range should have a size");
-      assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
-    }
-  }
-}
-
-// Note that the sweeper runs concurrently with mutators. Thus,
-// it is possible for direct allocation in this generation to happen
-// in the middle of the sweep. Note that the sweeper also coalesces
-// contiguous free blocks. Thus, unless the sweeper and the allocator
-// synchronize appropriately freshly allocated blocks may get swept up.
-// This is accomplished by the sweeper locking the free lists while
-// it is sweeping. Thus blocks that are determined to be free are
-// indeed free. There is however one additional complication:
-// blocks that have been allocated since the final checkpoint and
-// mark, will not have been marked and so would be treated as
-// unreachable and swept up. To prevent this, the allocator marks
-// the bit map when allocating during the sweep phase. This leads,
-// however, to a further complication -- objects may have been allocated
-// but not yet initialized -- in the sense that the header isn't yet
-// installed. The sweeper can not then determine the size of the block
-// in order to skip over it. To deal with this case, we use a technique
-// (due to Printezis) to encode such uninitialized block sizes in the
-// bit map. Since the bit map uses a bit per every HeapWord, but the
-// CMS generation has a minimum object size of 3 HeapWords, it follows
-// that "normal marks" won't be adjacent in the bit map (there will
-// always be at least two 0 bits between successive 1 bits). We make use
-// of these "unused" bits to represent uninitialized blocks -- the bit
-// corresponding to the start of the uninitialized object and the next
-// bit are both set. Finally, a 1 bit marks the end of the object that
-// started with the two consecutive 1 bits to indicate its potentially
-// uninitialized state.
-
-size_t SweepClosure::do_blk_careful(HeapWord* addr) {
-  FreeChunk* fc = (FreeChunk*)addr;
-  size_t res;
-
-  // check if we are done sweepinrg
-  if (addr == _limit) { // we have swept up to the limit, do nothing more
-    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
-           "sweep _limit out of bounds");
-    // help the closure application finish
-    return pointer_delta(_sp->end(), _limit);
-  }
-  assert(addr <= _limit, "sweep invariant");
-
-  // check if we should yield
-  do_yield_check(addr);
-  if (fc->isFree()) {
-    // Chunk that is already free
-    res = fc->size();
-    doAlreadyFreeChunk(fc);
-    debug_only(_sp->verifyFreeLists());
-    assert(res == fc->size(), "Don't expect the size to change");
-    NOT_PRODUCT(
-      _numObjectsAlreadyFree++;
-      _numWordsAlreadyFree += res;
-    )
-    NOT_PRODUCT(_last_fc = fc;)
-  } else if (!_bitMap->isMarked(addr)) {
-    // Chunk is fresh garbage
-    res = doGarbageChunk(fc);
-    debug_only(_sp->verifyFreeLists());
-    NOT_PRODUCT(
-      _numObjectsFreed++;
-      _numWordsFreed += res;
-    )
-  } else {
-    // Chunk that is alive.
-    res = doLiveChunk(fc);
-    debug_only(_sp->verifyFreeLists());
-    NOT_PRODUCT(
-	_numObjectsLive++;
-	_numWordsLive += res;
-    )
-  }
-  return res;
-}
-
-// For the smart allocation, record following
-//  split deaths - a free chunk is removed from its free list because
-//	it is being split into two or more chunks.
-//  split birth - a free chunk is being added to its free list because
-//	a larger free chunk has been split and resulted in this free chunk.
-//  coal death - a free chunk is being removed from its free list because
-//	it is being coalesced into a large free chunk.
-//  coal birth - a free chunk is being added to its free list because
-//	it was created when two or more free chunks where coalesced into
-//	this free chunk.
-//
-// These statistics are used to determine the desired number of free
-// chunks of a given size.  The desired number is chosen to be relative
-// to the end of a CMS sweep.  The desired number at the end of a sweep
-// is the 
-// 	count-at-end-of-previous-sweep (an amount that was enough)
-//		- count-at-beginning-of-current-sweep  (the excess)
-//		+ split-births  (gains in this size during interval)
-//		- split-deaths  (demands on this size during interval)
-// where the interval is from the end of one sweep to the end of the
-// next.
-//
-// When sweeping the sweeper maintains an accumulated chunk which is
-// the chunk that is made up of chunks that have been coalesced.  That
-// will be termed the left-hand chunk.  A new chunk of garbage that
-// is being considered for coalescing will be referred to as the
-// right-hand chunk.
-//
-// When making a decision on whether to coalesce a right-hand chunk with
-// the current left-hand chunk, the current count vs. the desired count
-// of the left-hand chunk is considered.  Also if the right-hand chunk
-// is near the large chunk at the end of the heap (see 
-// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the 
-// left-hand chunk is coalesced.
-//
-// When making a decision about whether to split a chunk, the desired count
-// vs. the current count of the candidate to be split is also considered.
-// If the candidate is underpopulated (currently fewer chunks than desired)
-// a chunk of an overpopulated (currently more chunks than desired) size may 
-// be chosen.  The "hint" associated with a free list, if non-null, points
-// to a free list which may be overpopulated.  
-//
-
-void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
-  size_t size = fc->size();
-  // Chunks that cannot be coalesced are not in the
-  // free lists.
-  if (CMSTestInFreeList && !fc->cantCoalesce()) {
-    assert(_sp->verifyChunkInFreeLists(fc), 
-      "free chunk should be in free lists");
-  }
-  // a chunk that is already free, should not have been
-  // marked in the bit map
-  HeapWord* addr = (HeapWord*) fc;
-  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
-  // Verify that the bit map has no bits marked between
-  // addr and purported end of this block.
-  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-
-  // Some chunks cannot be coalesced in under any circumstances.  
-  // See the definition of cantCoalesce().
-  if (!fc->cantCoalesce()) {
-    // This chunk can potentially be coalesced.
-    if (_sp->adaptive_freelists()) {
-      // All the work is done in 
-      doPostIsFreeOrGarbageChunk(fc, size);
-    } else {  // Not adaptive free lists
-      // this is a free chunk that can potentially be coalesced by the sweeper;
-      if (!inFreeRange()) {
-        // if the next chunk is a free block that can't be coalesced
-        // it doesn't make sense to remove this chunk from the free lists
-        FreeChunk* nextChunk = (FreeChunk*)(addr + size);
-        assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
-        if ((HeapWord*)nextChunk < _limit  &&    // there's a next chunk...
-            nextChunk->isFree()    &&            // which is free...
-            nextChunk->cantCoalesce()) {         // ... but cant be coalesced
-          // nothing to do
-        } else {
-          // Potentially the start of a new free range:
-	  // Don't eagerly remove it from the free lists.  
-	  // No need to remove it if it will just be put
-	  // back again.  (Also from a pragmatic point of view
-	  // if it is a free block in a region that is beyond
-	  // any allocated blocks, an assertion will fail)
-          // Remember the start of a free run.
-          initialize_free_range(addr, true);
-          // end - can coalesce with next chunk
-        }
-      } else {
-        // the midst of a free range, we are coalescing
-        debug_only(record_free_block_coalesced(fc);)
-        if (CMSTraceSweeper) { 
-          gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
-        }
-        // remove it from the free lists
-        _sp->removeFreeChunkFromFreeLists(fc);
-        set_lastFreeRangeCoalesced(true);
-        // If the chunk is being coalesced and the current free range is
-        // in the free lists, remove the current free range so that it
-        // will be returned to the free lists in its entirety - all
-        // the coalesced pieces included.
-        if (freeRangeInFreeLists()) {
-	  FreeChunk* ffc = (FreeChunk*) freeFinger();
-	  assert(ffc->size() == pointer_delta(addr, freeFinger()),
-	    "Size of free range is inconsistent with chunk size.");
-	  if (CMSTestInFreeList) {
-            assert(_sp->verifyChunkInFreeLists(ffc),
-	      "free range is not in free lists");
-	  }
-          _sp->removeFreeChunkFromFreeLists(ffc);
-	  set_freeRangeInFreeLists(false);
-        }
-      }
-    }
-  } else {
-    // Code path common to both original and adaptive free lists.
-
-    // cant coalesce with previous block; this should be treated
-    // as the end of a free run if any
-    if (inFreeRange()) {
-      // we kicked some butt; time to pick up the garbage
-      assert(freeFinger() < addr, "the finger pointeth off base");
-      flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
-    }
-    // else, nothing to do, just continue
-  }
-}
-
-size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
-  // This is a chunk of garbage.  It is not in any free list.
-  // Add it to a free list or let it possibly be coalesced into
-  // a larger chunk.
-  HeapWord* addr = (HeapWord*) fc;
-  size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
-
-  if (_sp->adaptive_freelists()) {
-    // Verify that the bit map has no bits marked between
-    // addr and purported end of just dead object.
-    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-
-    doPostIsFreeOrGarbageChunk(fc, size);
-  } else {
-    if (!inFreeRange()) {
-      // start of a new free range
-      assert(size > 0, "A free range should have a size");
-      initialize_free_range(addr, false);
-
-    } else {
-      // this will be swept up when we hit the end of the
-      // free range
-      if (CMSTraceSweeper) {
-        gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
-      }
-      // If the chunk is being coalesced and the current free range is
-      // in the free lists, remove the current free range so that it
-      // will be returned to the free lists in its entirety - all
-      // the coalesced pieces included.
-      if (freeRangeInFreeLists()) {
-	FreeChunk* ffc = (FreeChunk*)freeFinger();
-	assert(ffc->size() == pointer_delta(addr, freeFinger()),
-	  "Size of free range is inconsistent with chunk size.");
-	if (CMSTestInFreeList) {
-          assert(_sp->verifyChunkInFreeLists(ffc),
-	    "free range is not in free lists");
-	}
-        _sp->removeFreeChunkFromFreeLists(ffc);
-	set_freeRangeInFreeLists(false);
-      }
-      set_lastFreeRangeCoalesced(true);
-    }
-    // this will be swept up when we hit the end of the free range
-
-    // Verify that the bit map has no bits marked between
-    // addr and purported end of just dead object.
-    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-  }
-  return size;
-}
-
-size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
-  HeapWord* addr = (HeapWord*) fc;
-  // The sweeper has just found a live object. Return any accumulated
-  // left hand chunk to the free lists.
-  if (inFreeRange()) {
-    if (_sp->adaptive_freelists()) {
-      flushCurFreeChunk(freeFinger(),
-                        pointer_delta(addr, freeFinger()));
-    } else { // not adaptive freelists
-      set_inFreeRange(false);
-      // Add the free range back to the free list if it is not already
-      // there.
-      if (!freeRangeInFreeLists()) {
-        assert(freeFinger() < addr, "the finger pointeth off base");
-        if (CMSTraceSweeper) {
-          gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
-            "[coalesced:%d]\n",
-            freeFinger(), pointer_delta(addr, freeFinger()),
-            lastFreeRangeCoalesced());
-        }
-        _sp->addChunkAndRepairOffsetTable(freeFinger(),
-          pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
-      }
-    }
-  }
-
-  // Common code path for original and adaptive free lists.
-
-  // this object is live: we'd normally expect this to be
-  // an oop, and like to assert the following:
-  // assert(oop(addr)->is_oop(), "live block should be an oop");
-  // However, as we commented above, this may be an object whose
-  // header hasn't yet been initialized.
-  size_t size;
-  assert(_bitMap->isMarked(addr), "Tautology for this control point");
-  if (_bitMap->isMarked(addr + 1)) {
-    // Determine the size from the bit map, rather than trying to
-    // compute it from the object header.
-    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
-    size = pointer_delta(nextOneAddr + 1, addr);
-    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-           "alignment problem");
-
-    #ifdef DEBUG
-      if (oop(addr)->klass() != NULL &&
-          (!CMSPermGenSweepingEnabled || oop(addr)->is_parsable())) {
-        // Ignore mark word because we are running concurrent with mutators
-        assert(oop(addr)->is_oop(true), "live block should be an oop");
-        assert(size ==
-               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
-               "P-mark and computed size do not agree");
-      }
-    #endif
-
-  } else {
-    // This should be an initialized object that's alive.
-    assert(oop(addr)->klass() != NULL &&
-           (!CMSPermGenSweepingEnabled || oop(addr)->is_parsable()),
-           "Should be an initialized object");
-    // Ignore mark word because we are running concurrent with mutators
-    assert(oop(addr)->is_oop(true), "live block should be an oop");
-    // Verify that the bit map has no bits marked between
-    // addr and purported end of this block.
-    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
-    assert(size >= 3, "Necessary for Printezis marks to work");
-    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
-    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
-  }
-  return size;
-}
-
-void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc, 
-					    size_t chunkSize) { 
-  // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
-  // scheme.
-  bool fcInFreeLists = fc->isFree();
-  assert(_sp->adaptive_freelists(), "Should only be used in this case.");
-  assert((HeapWord*)fc <= _limit, "sweep invariant");
-  if (CMSTestInFreeList && fcInFreeLists) {
-    assert(_sp->verifyChunkInFreeLists(fc), 
-      "free chunk is not in free lists");
-  }
-  
- 
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
-  }
-
-  HeapWord* addr = (HeapWord*) fc;
-
-  bool coalesce;
-  size_t left  = pointer_delta(addr, freeFinger());
-  size_t right = chunkSize;
-  switch (FLSCoalescePolicy) {
-    // numeric value forms a coalition aggressiveness metric
-    case 0:  { // never coalesce
-      coalesce = false;
-      break;
-    } 
-    case 1: { // coalesce if left & right chunks on overpopulated lists
-      coalesce = _sp->coalOverPopulated(left) &&
-                 _sp->coalOverPopulated(right);
-      break;
-    }
-    case 2: { // coalesce if left chunk on overpopulated list (default)
-      coalesce = _sp->coalOverPopulated(left);
-      break;
-    }
-    case 3: { // coalesce if left OR right chunk on overpopulated list
-      coalesce = _sp->coalOverPopulated(left) || 
-                 _sp->coalOverPopulated(right);
-      break;
-    }
-    case 4: { // always coalesce
-      coalesce = true;
-      break;
-    }
-    default:
-     ShouldNotReachHere();
-  }
-
-  // Should the current free range be coalesced?
-  // If the chunk is in a free range and either we decided to coalesce above
-  // or the chunk is near the large block at the end of the heap
-  // (isNearLargestChunk() returns true), then coalesce this chunk.
-  bool doCoalesce = inFreeRange() &&
-    (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
-  if (doCoalesce) {
-    // Coalesce the current free range on the left with the new
-    // chunk on the right.  If either is on a free list,
-    // it must be removed from the list and stashed in the closure.
-    if (freeRangeInFreeLists()) {
-      FreeChunk* ffc = (FreeChunk*)freeFinger();
-      assert(ffc->size() == pointer_delta(addr, freeFinger()),
-        "Size of free range is inconsistent with chunk size.");
-      if (CMSTestInFreeList) {
-        assert(_sp->verifyChunkInFreeLists(ffc),
-	  "Chunk is not in free lists");
-      }
-      _sp->coalDeath(ffc->size());
-      _sp->removeFreeChunkFromFreeLists(ffc);
-      set_freeRangeInFreeLists(false);
-    }
-    if (fcInFreeLists) {
-      _sp->coalDeath(chunkSize);
-      assert(fc->size() == chunkSize, 
-	"The chunk has the wrong size or is not in the free lists");
-      _sp->removeFreeChunkFromFreeLists(fc);
-    }
-    set_lastFreeRangeCoalesced(true);
-  } else {  // not in a free range and/or should not coalesce
-    // Return the current free range and start a new one.
-    if (inFreeRange()) {
-      // In a free range but cannot coalesce with the right hand chunk.
-      // Put the current free range into the free lists.
-      flushCurFreeChunk(freeFinger(), 
-	pointer_delta(addr, freeFinger()));
-    }
-    // Set up for new free range.  Pass along whether the right hand
-    // chunk is in the free lists.
-    initialize_free_range((HeapWord*)fc, fcInFreeLists);
-  }
-}
-void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
-  assert(inFreeRange(), "Should only be called if currently in a free range.");
-  assert(size > 0, 
-    "A zero sized chunk cannot be added to the free lists.");
-  if (!freeRangeInFreeLists()) {
-    if(CMSTestInFreeList) {
-      FreeChunk* fc = (FreeChunk*) chunk;
-      fc->setSize(size);
-      assert(!_sp->verifyChunkInFreeLists(fc),
-	"chunk should not be in free lists yet");
-    }
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
-                    chunk, size);
-    }
-    // A new free range is going to be starting.  The current
-    // free range has not been added to the free lists yet or
-    // was removed so add it back.
-    // If the current free range was coalesced, then the death
-    // of the free range was recorded.  Record a birth now.
-    if (lastFreeRangeCoalesced()) {
-      _sp->coalBirth(size);
-    }
-    _sp->addChunkAndRepairOffsetTable(chunk, size,
-	    lastFreeRangeCoalesced());
-  }
-  set_inFreeRange(false);
-  set_freeRangeInFreeLists(false);
-}
-
-// We take a break if we've been at this for a while,
-// so as to avoid monopolizing the locks involved.
-void SweepClosure::do_yield_work(HeapWord* addr) {
-  // Return current free chunk being used for coalescing (if any)
-  // to the appropriate freelist.  After yielding, the next
-  // free block encountered will start a coalescing range of
-  // free blocks.  If the next free block is adjacent to the
-  // chunk just flushed, they will need to wait for the next
-  // sweep to be coalesced.
-  if (inFreeRange()) {
-    flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
-  }
-
-  // First give up the locks, then yield, then re-lock.
-  // We should probably use a constructor/destructor idiom to
-  // do this unlock/lock or modify the MutexUnlocker class to
-  // serve our purpose. XXX
-  assert_lock_strong(_bitMap->lock());
-  assert_lock_strong(_freelistLock);
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  _bitMap->lock()->unlock();
-  _freelistLock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-  _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
-  _collector->icms_wait();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-	               ConcurrentMarkSweepThread::should_yield() &&
-	               !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::sleep(Thread::current(), 1, false);    
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _freelistLock->lock();
-  _bitMap->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-#ifndef PRODUCT
-// This is actually very useful in a product build if it can
-// be called from the debugger.  Compile it into the product
-// as needed.
-bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
-  return debug_cms_space->verifyChunkInFreeLists(fc);
-}
-
-void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
-  }
-}
-#endif
-
-// CMSIsAliveClosure
-bool CMSIsAliveClosure::do_object_b(oop obj) {
-  HeapWord* addr = (HeapWord*)obj;
-  return addr != NULL &&
-         (!_span.contains(addr) || _bit_map->isMarked(addr));
-} 
-
-// CMSKeepAliveClosure: the serial version
-void CMSKeepAliveClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  HeapWord* addr = (HeapWord*)this_oop;
-  if (_span.contains(addr) &&
-      !_bit_map->isMarked(addr)) {
-    _bit_map->mark(addr);
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (CMSMarkStackOverflowALot &&
-          _collector->simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow || !_mark_stack->push(this_oop)) {
-      _collector->push_on_overflow_list(this_oop);
-      _collector->_ser_kac_ovflw++;
-    }
-  }
-}
-
-// CMSParKeepAliveClosure: a parallel version of the above.
-// The work queues are private to each closure (thread),
-// but (may be) available for stealing by other threads.
-void CMSParKeepAliveClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  HeapWord* addr = (HeapWord*)this_oop;
-  if (_span.contains(addr) &&
-      !_bit_map->isMarked(addr)) {
-    // In general, during recursive tracing, several threads
-    // may be concurrently getting here; the first one to
-    // "tag" it, claims it.
-    if (_bit_map->par_mark(addr)) { 
-      bool res = _work_queue->push(this_oop);
-      assert(res, "Low water mark should be much less than capacity");
-      // Do a recursive trim in the hope that this will keep
-      // stack usage lower, but leave some oops for potential stealers
-      trim_queue(_low_water_mark);
-    } // Else, another thread got there first
-  }
-}
-
-void CMSParKeepAliveClosure::trim_queue(uint max) {
-  while (_work_queue->size() > max) {
-    oop new_oop;
-    if (_work_queue->pop_local(new_oop)) {
-      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
-      assert(_bit_map->isMarked((HeapWord*)new_oop),
-             "no white objects on this stack!");
-      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
-      // iterate over the oops in this oop, marking and pushing
-      // the ones in CMS heap (i.e. in _span).
-      new_oop->oop_iterate(&_mark_and_push);
-    }
-  }
-}
-
-void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  HeapWord* addr = (HeapWord*)this_oop;
-  if (_span.contains(addr) &&
-      !_bit_map->isMarked(addr)) {
-    if (_bit_map->par_mark(addr)) {
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->par_simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow || !_work_queue->push(this_oop)) {
-        _collector->par_push_on_overflow_list(this_oop);
-        _collector->_par_kac_ovflw++;
-      }
-    } // Else another thread got there already
-  }
-}
-
-//////////////////////////////////////////////////////////////////
-//  CMSExpansionCause		     /////////////////////////////
-//////////////////////////////////////////////////////////////////
-const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
-  switch (cause) {
-    case _no_expansion:
-      return "No expansion";
-    case _satisfy_free_ratio:
-      return "Free ratio";
-    case _satisfy_promotion:
-      return "Satisfy promotion";
-    case _satisfy_allocation:
-      return "allocation";
-    case _allocate_par_lab:
-      return "Par LAB";
-    case _allocate_par_spooling_space:
-      return "Par Spooling Space";
-    case _adaptive_size_policy:
-      return "Ergonomics";
-    default:
-      return "unknown";
-  }
-}
-
-void CMSDrainMarkingStackClosure::do_void() {
-  // the max number to take from overflow list at a time
-  const size_t num = _mark_stack->capacity()/4;
-  while (!_mark_stack->isEmpty() ||
-         // if stack is empty, check the overflow list
-         _collector->take_from_overflow_list(num, _mark_stack)) {
-    oop this_oop = _mark_stack->pop();
-    HeapWord* addr = (HeapWord*)this_oop;
-    assert(_span.contains(addr), "Should be within span");
-    assert(_bit_map->isMarked(addr), "Should be marked");
-    assert(this_oop->is_oop(), "Should be an oop");
-    this_oop->oop_iterate(_keep_alive);
-  }
-}
-
-void CMSParDrainMarkingStackClosure::do_void() {
-  // drain queue
-  trim_queue(0);
-}
-
-// Trim our work_queue so its length is below max at return
-void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
-  while (_work_queue->size() > max) {
-    oop new_oop;
-    if (_work_queue->pop_local(new_oop)) {
-      assert(new_oop->is_oop(), "Expected an oop");
-      assert(_bit_map->isMarked((HeapWord*)new_oop),
-             "no white objects on this stack!");
-      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
-      // iterate over the oops in this oop, marking and pushing
-      // the ones in CMS heap (i.e. in _span).
-      new_oop->oop_iterate(&_mark_and_push);
-    }
-  }
-}
-
-////////////////////////////////////////////////////////////////////
-// Support for Marking Stack Overflow list handling and related code
-////////////////////////////////////////////////////////////////////
-// Much of the following code is similar in shape and spirit to the
-// code used in ParNewGC. We should try and share that code
-// as much as possible in the future.
-
-#ifndef PRODUCT
-// Debugging support for CMSStackOverflowALot
-
-// It's OK to call this multi-threaded;  the worst thing
-// that can happen is that we'll get a bunch of closely
-// spaced simulated oveflows, but that's OK, in fact
-// probably good as it would exercise the overflow code
-// under contention.
-bool CMSCollector::simulate_overflow() {
-  if (_overflow_counter-- <= 0) { // just being defensive
-    _overflow_counter = CMSMarkStackOverflowInterval;
-    return true;
-  } else {
-    return false;
-  }
-}
-
-bool CMSCollector::par_simulate_overflow() {
-  return simulate_overflow();
-}
-#endif
-
-// Single-threaded
-bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
-  assert(stack->isEmpty(), "Expected precondition");
-  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
-  size_t i = num;
-  oop  cur = _overflow_list;
-  const markOop proto = markOopDesc::prototype();
-  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
-    next = oop(cur->mark());
-    cur->set_mark(proto);   // until proven otherwise
-    bool res = stack->push(cur);
-    assert(res, "Bit off more than can chew?");
-  }
-  _overflow_list = cur;
-  return !stack->isEmpty();
-}
-
-// Multi-threaded; use CAS to break off a prefix
-bool CMSCollector::par_take_from_overflow_list(size_t num,
-                                               OopTaskQueue* work_q) {
-  assert(work_q->size() == 0, "That's the current policy");
-  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
-  if (_overflow_list == NULL) {
-    return false;
-  }
-  // Grab the entire list; we'll put back a suffix
-  oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
-  if (prefix == NULL) {  // someone grabbed it before we did ...
-    // ... we could spin for a short while, but for now we don't
-    return false;
-  }
-  size_t i = num;
-  oop cur = prefix;
-  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
-  if (cur->mark() != NULL) {
-    oop suffix_head = cur->mark(); // suffix will be put back on global list
-    cur->set_mark(NULL);           // break off suffix
-    // Find tail of suffix so we can prepend suffix to global list
-    for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
-    oop suffix_tail = cur;
-    assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
-           "Tautology");
-    oop observed_overflow_list = _overflow_list;
-    do {
-      cur = observed_overflow_list;
-      suffix_tail->set_mark(markOop(cur));
-      observed_overflow_list =
-        (oop) Atomic::cmpxchg_ptr(suffix_tail, &_overflow_list, cur);
-    } while (cur != observed_overflow_list);
-  }
-
-  // Push the prefix elements on work_q
-  assert(prefix != NULL, "control point invariant");
-  const markOop proto = markOopDesc::prototype();
-  oop next;
-  for (cur = prefix; cur != NULL; cur = next) {
-    next = oop(cur->mark());
-    cur->set_mark(proto);   // until proven otherwise
-    bool res = work_q->push(cur);
-    assert(res, "Bit off more than we can chew?");
-  }
-  return true;
-}
-
-// Single-threaded
-void CMSCollector::push_on_overflow_list(oop p) {
-  preserve_mark_if_necessary(p);
-  p->set_mark((markOop)_overflow_list);
-  _overflow_list = p;
-}
-
-// Multi-threaded; use CAS to prepend to overflow list
-void CMSCollector::par_push_on_overflow_list(oop p) {
-  par_preserve_mark_if_necessary(p);
-  oop observed_overflow_list = _overflow_list;
-  oop cur_overflow_list;
-  do {
-    cur_overflow_list = observed_overflow_list;
-    p->set_mark(markOop(cur_overflow_list));
-    observed_overflow_list =
-      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
-  } while (cur_overflow_list != observed_overflow_list);
-}
-
-// Single threaded
-// General Note on GrowableArray: pushes may silently fail
-// because we are (temporarily) out of C-heap for expanding
-// the stack. The problem is quite ubiquitous and affects
-// a lot of code in the JVM. The prudent thing for GrowableArray
-// to do (for now) is to exit with an error. However, that may
-// be too draconian in some cases because the caller may be
-// able to recover without much harm. For suych cases, we
-// should probably introduce a "soft_push" method which returns
-// an indication of success or failure with the assumption that
-// the caller may be able to recover from a failure; code in
-// the VM can then be changed, incrementally, to deal with such
-// failures where possible, thus, incrementally hardening the VM
-// in such low resource situations.
-void CMSCollector::preserve_mark_work(oop p, markOop m) {
-  int PreserveMarkStackSize = 128;
-
-  if (_preserved_oop_stack == NULL) {
-    assert(_preserved_mark_stack == NULL,
-           "bijection with preserved_oop_stack");
-    // Allocate the stacks
-    _preserved_oop_stack  = new (ResourceObj::C_HEAP) 
-      GrowableArray<oop>(PreserveMarkStackSize, true);
-    _preserved_mark_stack = new (ResourceObj::C_HEAP) 
-      GrowableArray<markOop>(PreserveMarkStackSize, true);
-    if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
-      vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
-                            "Preserved Mark/Oop Stack for CMS (C-heap)");
-    }
-  }
-  _preserved_oop_stack->push(p);
-  _preserved_mark_stack->push(m);
-  assert(m == p->mark(), "Mark word changed");
-  assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
-         "bijection");
-}
-
-// Single threaded
-void CMSCollector::preserve_mark_if_necessary(oop p) {
-  markOop m = p->mark();
-  if (m->must_be_preserved(p)) {
-    preserve_mark_work(p, m);
-  }
-}
-
-void CMSCollector::par_preserve_mark_if_necessary(oop p) {
-  markOop m = p->mark();
-  if (m->must_be_preserved(p)) {
-    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-    // Even though we read the mark word without holding
-    // the lock, we are assured that it will not change
-    // because we "own" this oop, so no other thread can
-    // be trying to push it on the overflow list; see
-    // the assertion in preserve_mark_work() that checks
-    // that m == p->mark().
-    preserve_mark_work(p, m);
-  }
-}
-
-// We should be able to do this multi-threaded,
-// a chunk of stack being a task (this is
-// correct because each oop only ever appears
-// once in the overflow list. However, it's
-// not very easy to completely overlap this with
-// other operations, so will generally not be done
-// until all work's been completed. Because we
-// expect the preserved oop stack (set) to be small,
-// it's probably fine to do this single-threaded.
-// We can explore cleverer concurrent/overlapped/parallel
-// processing of preserved marks if we feel the
-// need for this in the future. Stack overflow should
-// be so rare in practice and, when it happens, its
-// effect on performance so great that this will
-// likely just be in the noise anyway.
-void CMSCollector::restore_preserved_marks_if_any() {
-  if (_preserved_oop_stack == NULL) {
-    assert(_preserved_mark_stack == NULL,
-           "bijection with preserved_oop_stack");
-    return;
-  }
-
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-  assert(Thread::current()->is_ConcurrentGC_thread() ||
-         Thread::current()->is_VM_thread(),
-         "should be single-threaded");
-
-  int length = _preserved_oop_stack->length();
-  assert(_preserved_mark_stack->length() == length, "bijection");
-  for (int i = 0; i < length; i++) {
-    oop p = _preserved_oop_stack->at(i);
-    assert(p->is_oop(), "Should be an oop");
-    assert(_span.contains(p), "oop should be in _span");
-    assert(p->mark() == markOopDesc::prototype(),
-           "Set when taken from overflow list");
-    markOop m = _preserved_mark_stack->at(i);
-    p->set_mark(m);
-  }
-  _preserved_mark_stack->clear();
-  _preserved_oop_stack->clear();
-  assert(_preserved_mark_stack->is_empty() &&
-         _preserved_oop_stack->is_empty(),
-         "stacks were cleared above");
-}
-
-#ifndef PRODUCT
-bool CMSCollector::no_preserved_marks() {
-  return (   (   _preserved_mark_stack == NULL
-              && _preserved_oop_stack == NULL)
-          || (   _preserved_mark_stack->is_empty()
-              && _preserved_oop_stack->is_empty()));
-}
-#endif
-
-CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
-{
-  GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
-  CMSAdaptiveSizePolicy* size_policy =
-    (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
-  assert(size_policy->is_gc_cms_adaptive_size_policy(),
-    "Wrong type for size policy");
-  return size_policy;
-}
-
-void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
-                                           size_t desired_promo_size) {
-  if (cur_promo_size < desired_promo_size) {
-    size_t expand_bytes = desired_promo_size - cur_promo_size;
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
-	"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
-	expand_bytes);
-    }
-    expand(expand_bytes,
-           MinHeapDeltaBytes,
-           CMSExpansionCause::_adaptive_size_policy);
-  } else if (desired_promo_size < cur_promo_size) {
-    size_t shrink_bytes = cur_promo_size - desired_promo_size;
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
-	"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
-	shrink_bytes);
-    }
-    shrink(shrink_bytes);
-  }
-}
-
-CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  CMSGCAdaptivePolicyCounters* counters =
-    (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
-  assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-    "Wrong kind of counters");
-  return counters;
-}
-
-
-void ASConcurrentMarkSweepGeneration::update_counters() {
-  if (UsePerfData) {
-    _space_counters->update_all();
-    _gen_counters->update_all();
-    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
-    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
-      "Wrong gc statistics type");
-    counters->update_counters(gc_stats_l);
-  }
-}
-
-void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
-  if (UsePerfData) {
-    _space_counters->update_used(used);
-    _space_counters->update_capacity();
-    _gen_counters->update_all();
-
-    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
-    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
-      "Wrong gc statistics type");
-    counters->update_counters(gc_stats_l);
-  }
-}
-
-// The desired expansion delta is computed so that:
-// . desired free percentage or greater is used
-void ASConcurrentMarkSweepGeneration::compute_new_size() {
-  assert_locked_or_safepoint(Heap_lock);
-
-  GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
-
-  // If incremental collection failed, we just want to expand
-  // to the limit.
-  if (incremental_collection_failed()) {
-    clear_incremental_collection_failed();
-    grow_to_reserved();
-    return;
-  }
-
-  assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
-
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "Wrong type of heap");
-  int prev_level = level() - 1;
-  assert(prev_level >= 0, "The cms generation is the lowest generation");
-  Generation* prev_gen = gch->get_gen(prev_level);
-  assert(prev_gen->kind() == Generation::ASParNew,
-    "Wrong type of young generation");
-  ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
-  size_t cur_eden = younger_gen->eden()->capacity();
-  CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
-  size_t cur_promo = free();
-  size_policy->compute_tenured_generation_free_space(cur_promo, 
-						       max_available(),
-						       cur_eden);
-  resize(cur_promo, size_policy->promo_size());
-
-  // Record the new size of the space in the cms generation
-  // that is available for promotions.  This is temporary.
-  // It should be the desired promo size.
-  size_policy->avg_cms_promo()->sample(free());
-  size_policy->avg_old_live()->sample(used());
-
-  if (UsePerfData) {
-    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
-    counters->update_cms_capacity_counter(capacity());
-  }
-}
-
-void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  assert_lock_strong(freelistLock());
-  HeapWord* old_end = _cmsSpace->end();
-  HeapWord* unallocated_start = _cmsSpace->unallocated_block();
-  assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
-  FreeChunk* chunk_at_end = find_chunk_at_end();
-  if (chunk_at_end == NULL) {
-    // No room to shrink
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("No room to shrink: old_end  "
-	PTR_FORMAT "  unallocated_start  " PTR_FORMAT 
-	" chunk_at_end  " PTR_FORMAT,
-        old_end, unallocated_start, chunk_at_end);
-    }
-    return;
-  } else {
-
-    // Find the chunk at the end of the space and determine
-    // how much it can be shrunk.
-    size_t shrinkable_size_in_bytes = chunk_at_end->size();
-    size_t aligned_shrinkable_size_in_bytes = 
-      align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
-    assert(unallocated_start <= chunk_at_end->end(),
-      "Inconsistent chunk at end of space");
-    size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
-    size_t word_size_before = heap_word_size(_virtual_space.committed_size());
-  
-    // Shrink the underlying space
-    _virtual_space.shrink_by(bytes);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
-        " desired_bytes " SIZE_FORMAT 
-        " shrinkable_size_in_bytes " SIZE_FORMAT
-        " aligned_shrinkable_size_in_bytes " SIZE_FORMAT 
-        "  bytes  " SIZE_FORMAT, 
-        desired_bytes, shrinkable_size_in_bytes, 
-        aligned_shrinkable_size_in_bytes, bytes);
-      gclog_or_tty->print_cr("		old_end  " SIZE_FORMAT 
-        "  unallocated_start  " SIZE_FORMAT, 
-        old_end, unallocated_start);
-    }
-  
-    // If the space did shrink (shrinking is not guaranteed),
-    // shrink the chunk at the end by the appropriate amount.
-    if (((HeapWord*)_virtual_space.high()) < old_end) {
-      size_t new_word_size = 
-        heap_word_size(_virtual_space.committed_size());
-  
-      // Have to remove the chunk from the dictionary because it is changing
-      // size and might be someplace elsewhere in the dictionary.
-
-      // Get the chunk at end, shrink it, and put it
-      // back.
-      _cmsSpace->removeChunkFromDictionary(chunk_at_end);
-      size_t word_size_change = word_size_before - new_word_size;
-      size_t chunk_at_end_old_size = chunk_at_end->size();
-      assert(chunk_at_end_old_size >= word_size_change,
-        "Shrink is too large");
-      chunk_at_end->setSize(chunk_at_end_old_size - 
-  			  word_size_change);
-      _cmsSpace->freed((HeapWord*) chunk_at_end->end(), 
-        word_size_change);
-      
-      _cmsSpace->returnChunkToDictionary(chunk_at_end);
-  
-      MemRegion mr(_cmsSpace->bottom(), new_word_size);
-      _bts->resize(new_word_size);  // resize the block offset shared array
-      Universe::heap()->barrier_set()->resize_covered_region(mr);
-      _cmsSpace->assert_locked();
-      _cmsSpace->set_end((HeapWord*)_virtual_space.high());
-  
-      NOT_PRODUCT(_cmsSpace->dictionary()->verify());
-  
-      // update the space and generation capacity counters
-      if (UsePerfData) {
-        _space_counters->update_capacity();
-        _gen_counters->update_all();
-      }
-  
-      if (Verbose && PrintGCDetails) {
-        size_t new_mem_size = _virtual_space.committed_size();
-        size_t old_mem_size = new_mem_size + bytes;
-        gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
-                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
-      }
-    }
-  
-    assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), 
-      "Inconsistency at end of space");
-    assert(chunk_at_end->end() == _cmsSpace->end(), 
-      "Shrinking is inconsistent");
-    return;
-  }
-}
-
-// Transfer some number of overflown objects to usual marking
-// stack. Return true if some objects were transferred.
-bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
-  size_t num = MIN2((size_t)_mark_stack->capacity()/4,
-                    (size_t)ParGCDesiredObjsFromOverflowList);
-  
-  bool res = _collector->take_from_overflow_list(num, _mark_stack);
-  assert(_collector->overflow_list_is_empty() || res,
-         "If list is not empty, we should have taken something");
-  assert(!res || _mark_stack->isEmpty(),
-         "If we took something, it should now be on our stack");
-  return res;
-}
-
-size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
-  size_t res = _sp->block_size_no_stall(addr, _collector);
-  assert(res != 0, "Should always be able to compute a size");
-  if (_sp->block_is_obj(addr)) {
-    if (_live_bit_map->isMarked(addr)) {
-      // It can't have been dead in a previous cycle
-      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
-    } else {
-      _dead_bit_map->mark(addr);      // mark the dead object
-    }
-  }
-  return res;
-}
--- a/hotspot/src/share/vm/memory/concurrentMarkSweepGeneration.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1809 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)concurrentMarkSweepGeneration.hpp	1.158 07/05/05 17:05:46 JVM"
-#endif
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// ConcurrentMarkSweepGeneration is in support of a concurrent
-// mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
-// style. We assume, for now, that this generation is always the
-// seniormost generation (modulo the PermGeneration), and for simplicity
-// in the first implementation, that this generation is a single compactible
-// space. Neither of these restrictions appears essential, and will be
-// relaxed in the future when more time is available to implement the
-// greater generality (and there's a need for it).
-//
-// Concurrent mode failures are currently handled by
-// means of a sliding mark-compact.
-
-class CMSAdaptiveSizePolicy;
-class CMSConcMarkingTask;
-class CMSGCAdaptivePolicyCounters;
-class ConcurrentMarkSweepGeneration;
-class ConcurrentMarkSweepPolicy;
-class ConcurrentMarkSweepThread;
-class CompactibleFreeListSpace;
-class FreeChunk;
-class PromotionInfo;
-class ScanMarkedObjectsAgainCarefullyClosure;
-
-
-// A generic CMS bit map. It's the basis for both the CMS marking bit map
-// as well as for the mod union table (in each case only a subset of the
-// methods are used). This is essentially a wrapper around the BitMap class,
-// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
-// we have _shifter == 0. and for the mod union table we have
-// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
-// XXX 64-bit issues in BitMap?
-class CMSBitMap VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
-
-  HeapWord* _bmStartWord;   // base address of range covered by map
-  size_t    _bmWordSize;    // map size (in #HeapWords covered)
-  const int _shifter;	    // shifts to convert HeapWord to bit position
-  VirtualSpace _virtual_space; // underlying the bit map
-  BitMap    _bm;            // the bit map itself
- public:
-  Mutex* const _lock;       // mutex protecting _bm;
-
- public:
-  // constructor
-  CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
-
-  // allocates the actual storage for the map
-  bool allocate(MemRegion mr);
-  // field getter
-  Mutex* lock() const { return _lock; }
-  // locking verifier convenience function
-  void assert_locked() const PRODUCT_RETURN;
-
-  // inquiries
-  HeapWord* startWord()   const { return _bmStartWord; }
-  size_t    sizeInWords() const { return _bmWordSize;  }
-  size_t    sizeInBits()  const { return _bm.size();   }
-  // the following is one past the last word in space
-  HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
-
-  // reading marks
-  bool isMarked(HeapWord* addr) const;
-  bool par_isMarked(HeapWord* addr) const; // do not lock checks
-  bool isUnmarked(HeapWord* addr) const;
-  bool isAllClear() const;
-
-  // writing marks
-  void mark(HeapWord* addr);
-  // For marking by parallel GC threads;
-  // returns true if we did, false if another thread did
-  bool par_mark(HeapWord* addr);
-
-  void mark_range(MemRegion mr);
-  void par_mark_range(MemRegion mr);
-  void mark_large_range(MemRegion mr);
-  void par_mark_large_range(MemRegion mr);
-  void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
-  void clear_range(MemRegion mr);
-  void par_clear_range(MemRegion mr);
-  void clear_large_range(MemRegion mr);
-  void par_clear_large_range(MemRegion mr);
-  void clear_all();
-  void clear_all_incrementally();  // Not yet implemented!!
-
-  NOT_PRODUCT(
-    // checks the memory region for validity
-    void region_invariant(MemRegion mr);
-  )
-  
-  // iteration
-  void iterate(BitMapClosure* cl) {
-    _bm.iterate(cl);
-  }
-  void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
-  void dirty_range_iterate_clear(MemRegionClosure* cl);
-  void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
- 
-  // auxiliary support for iteration
-  HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
-  HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
-                                            HeapWord* end_addr) const;
-  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
-  HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
-                                              HeapWord* end_addr) const;
-  MemRegion getAndClearMarkedRegion(HeapWord* addr);
-  MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 
-                                           HeapWord* end_addr);
-
-  // conversion utilities
-  HeapWord* offsetToHeapWord(size_t offset) const;
-  size_t    heapWordToOffset(HeapWord* addr) const;
-  size_t    heapWordDiffToOffsetDiff(size_t diff) const;
-  
-  // debugging
-  // is this address range covered by the bit-map?
-  NOT_PRODUCT(
-    bool covers(MemRegion mr) const;
-    bool covers(HeapWord* start, size_t size = 0) const;
-  )
-  void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
-};
-
-// Represents a marking stack used by the CMS collector.
-// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
-class CMSMarkStack: public CHeapObj  {
-  // 
-  friend class CMSCollector;   // to get at expasion stats further below
-  //
-
-  VirtualSpace _virtual_space;  // space for the stack
-  oop*   _base;      // bottom of stack
-  size_t _index;     // one more than last occupied index
-  size_t _capacity;  // max #elements
-  Mutex  _par_lock;  // an advisory lock used in case of parallel access
-  NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
-
- protected:
-  size_t _hit_limit;      // we hit max stack size limit
-  size_t _failed_double;  // we failed expansion before hitting limit
-
- public:
-  CMSMarkStack():
-    _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
-    _hit_limit(0),
-    _failed_double(0) {}
-
-  bool allocate(size_t size);
-
-  size_t capacity() const { return _capacity; }
-
-  oop pop() {
-    if (!isEmpty()) {
-      return _base[--_index] ;
-    }
-    return NULL;
-  }
-
-  bool push(oop ptr) {
-    if (isFull()) {
-      return false;
-    } else {
-      _base[_index++] = ptr;
-      NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
-      return true;
-    }
-  }
-
-  bool isEmpty() const { return _index == 0; }
-  bool isFull()  const {
-    assert(_index <= _capacity, "buffer overflow");
-    return _index == _capacity;
-  }
-
-  size_t length() { return _index; }
-
-  // "Parallel versions" of some of the above
-  oop par_pop() {
-    // lock and pop
-    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
-    return pop();
-  }
-
-  bool par_push(oop ptr) {
-    // lock and push
-    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
-    return push(ptr);
-  }
-
-  // Forcibly reset the stack, losing all of its contents.
-  void reset() {
-    _index = 0;
-  }
-
-  // Expand the stack, typically in response to an overflow condition
-  void expand();
-
-  // Compute the least valued stack element.
-  oop least_value(HeapWord* low) {
-     oop least = (oop)low;
-     for (size_t i = 0; i < _index; i++) {
-       least = MIN2(least, _base[i]);
-     }
-     return least;
-  }
-
-  // Exposed here to allow stack expansion in || case
-  Mutex* par_lock() { return &_par_lock; }
-};
-
-class CardTableRS;
-class CMSParGCThreadState;
-
-class ModUnionClosure: public MemRegionClosure {
- protected:
-  CMSBitMap* _t;
- public:
-  ModUnionClosure(CMSBitMap* t): _t(t) { }
-  void do_MemRegion(MemRegion mr);
-};
-
-class ModUnionClosurePar: public ModUnionClosure {
- public:
-  ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
-  void do_MemRegion(MemRegion mr);
-};
-
-// Survivor Chunk Array in support of parallelization of
-// Survivor Space rescan.
-class ChunkArray: public CHeapObj {
-  size_t _index;
-  size_t _capacity;
-  HeapWord** _array;   // storage for array
-
- public:
-  ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
-  ChunkArray(HeapWord** a, size_t c):
-    _index(0), _capacity(c), _array(a) {}
-
-  HeapWord** array() { return _array; }
-  void set_array(HeapWord** a) { _array = a; }
-
-  size_t capacity() { return _capacity; }
-  void set_capacity(size_t c) { _capacity = c; }
-
-  size_t end() {
-    assert(_index < capacity(), "_index out of bounds");
-    return _index;
-  }  // exclusive
-
-  HeapWord* nth(size_t n) {
-    assert(n < end(), "Out of bounds access");
-    return _array[n];
-  }
-  
-  void reset() {
-    _index = 0;
-  }
-
-  void record_sample(HeapWord* p, size_t sz) {
-    // For now we do not do anything with the size
-    if (_index < _capacity) {
-      _array[_index++] = p;
-    }
-  }
-};
-
-// 
-// Timing, allocation and promotion statistics for gc scheduling and incremental
-// mode pacing.  Most statistics are exponential averages.
-// 
-class CMSStats VALUE_OBJ_CLASS_SPEC {
- private:
-  ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
-
-  // The following are exponential averages with factor alpha:
-  //   avg = (100 - alpha) * avg + alpha * cur_sample
-  // 
-  //   The durations measure:  end_time[n] - start_time[n]
-  //   The periods measure:    start_time[n] - start_time[n-1]
-  //
-  // The cms period and duration include only concurrent collections; time spent
-  // in foreground cms collections due to System.gc() or because of a failure to
-  // keep up are not included.
-  //
-  // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
-  // real value, but is used only after the first period.  A value of 100 is
-  // used for the first sample so it gets the entire weight.
-  unsigned int _saved_alpha; // 0-100
-  unsigned int _gc0_alpha;
-  unsigned int _cms_alpha;
-
-  double _gc0_duration;
-  double _gc0_period;
-  size_t _gc0_promoted;		// bytes promoted per gc0
-  double _cms_duration;
-  double _cms_duration_pre_sweep; // time from initiation to start of sweep
-  double _cms_duration_per_mb;
-  double _cms_period;
-  size_t _cms_allocated;	// bytes of direct allocation per gc0 period
-
-  // Timers.
-  elapsedTimer _cms_timer;
-  TimeStamp    _gc0_begin_time;
-  TimeStamp    _cms_begin_time;
-  TimeStamp    _cms_end_time;
-
-  // Snapshots of the amount used in the CMS generation.
-  size_t _cms_used_at_gc0_begin;
-  size_t _cms_used_at_gc0_end;
-  size_t _cms_used_at_cms_begin;
-
-  // Used to prevent the duty cycle from being reduced in the middle of a cms
-  // cycle.
-  bool _allow_duty_cycle_reduction;
-
-  enum {
-    _GC0_VALID = 0x1,
-    _CMS_VALID = 0x2,
-    _ALL_VALID = _GC0_VALID | _CMS_VALID
-  };
-
-  unsigned int _valid_bits;
-
-  unsigned int _icms_duty_cycle;	// icms duty cycle (0-100).
-
- protected:
-
-  // Return a duty cycle that avoids wild oscillations, by limiting the amount
-  // of change between old_duty_cycle and new_duty_cycle (the latter is treated
-  // as a recommended value).
-  static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
-					     unsigned int new_duty_cycle);
-  unsigned int icms_update_duty_cycle_impl();
-
- public:
-  CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
-	   unsigned int alpha = CMSExpAvgFactor);
-
-  // Whether or not the statistics contain valid data; higher level statistics
-  // cannot be called until this returns true (they require at least one young
-  // gen and one cms cycle to have completed).
-  bool valid() const;
-
-  // Record statistics.
-  void record_gc0_begin();
-  void record_gc0_end(size_t cms_gen_bytes_used);
-  void record_cms_begin();
-  void record_cms_end();
-
-  // Allow management of the cms timer, which must be stopped/started around
-  // yield points.
-  elapsedTimer& cms_timer()     { return _cms_timer; }
-  void start_cms_timer()        { _cms_timer.start(); }
-  void stop_cms_timer()         { _cms_timer.stop(); }
-
-  // Basic statistics; units are seconds or bytes.
-  double gc0_period() const     { return _gc0_period; }
-  double gc0_duration() const   { return _gc0_duration; }
-  size_t gc0_promoted() const   { return _gc0_promoted; }
-  double cms_period() const          { return _cms_period; }
-  double cms_duration() const        { return _cms_duration; }
-  double cms_duration_per_mb() const { return _cms_duration_per_mb; }
-  size_t cms_allocated() const       { return _cms_allocated; }
-
-  size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
-
-  // Seconds since the last background cms cycle began or ended.
-  double cms_time_since_begin() const;
-  double cms_time_since_end() const;
-
-  // Higher level statistics--caller must check that valid() returns true before
-  // calling.
-
-  // Returns bytes promoted per second of wall clock time.
-  double promotion_rate() const;
-
-  // Returns bytes directly allocated per second of wall clock time.
-  double cms_allocation_rate() const;
-
-  // Rate at which space in the cms generation is being consumed (sum of the
-  // above two).
-  double cms_consumption_rate() const;
-
-  // Returns an estimate of the number of seconds until the cms generation will
-  // fill up, assuming no collection work is done.
-  double time_until_cms_gen_full() const;
-
-  // Returns an estimate of the number of seconds remaining until
-  // the cms generation collection should start.
-  double time_until_cms_start() const;
-
-  // End of higher level statistics.
-
-  // Returns the cms incremental mode duty cycle, as a percentage (0-100).
-  unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
-
-  // Update the duty cycle and return the new value.
-  unsigned int icms_update_duty_cycle();
-
-  // Debugging.
-  void print_on(outputStream* st) const PRODUCT_RETURN;
-  void print() const { print_on(gclog_or_tty); }
-};
-
-// A closure related to weak references processing which
-// we embed in the CMSCollector, since we need to pass
-// it to the reference processor for secondary filtering
-// of references based on reachability of referent;
-// see role of _is_alive_non_header closure in the
-// ReferenceProcessor class.
-// For objects in the CMS generation, this closure checks
-// if the object is "live" (reachable). Used in weak
-// reference processing.
-class CMSIsAliveClosure: public BoolObjectClosure {
-  MemRegion  _span;
-  const CMSBitMap* _bit_map;
-
-  friend class CMSCollector;
- protected:
-  void set_span(MemRegion span) { _span = span; }
- public:
-  CMSIsAliveClosure(CMSBitMap* bit_map):
-    _bit_map(bit_map) { }
-
-  CMSIsAliveClosure(MemRegion span,
-                    CMSBitMap* bit_map):
-    _span(span),
-    _bit_map(bit_map) { }
-  void do_object(oop obj) {
-    assert(false, "not to be invoked");
-  }
-  bool do_object_b(oop obj);
-};
-
-
-// Implements AbstractRefProcTaskExecutor for CMS.
-class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
-public:
-
-  CMSRefProcTaskExecutor(CMSCollector& collector)
-    : _collector(collector)
-  { }
-  
-  // Executes a task using worker threads.  
-  virtual void execute(ProcessTask& task);
-  virtual void execute(EnqueueTask& task);
-private:
-  CMSCollector& _collector;
-};
-
-
-class CMSCollector: public CHeapObj {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepThread;
-  friend class ConcurrentMarkSweepGeneration;
-  friend class CompactibleFreeListSpace;
-  friend class CMSParRemarkTask;
-  friend class CMSConcMarkingTask;
-  friend class CMSRefProcTaskProxy;
-  friend class CMSRefProcTaskExecutor;
-  friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
-  friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
-  friend class PushOrMarkClosure;             // to access _restart_addr
-  friend class Par_PushOrMarkClosure;             // to access _restart_addr
-  friend class MarkFromRootsClosure;          //  -- ditto --
-                                              // ... and for clearing cards
-  friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
-                                              // ... and for clearing cards
-  friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
-  friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
-  friend class PushAndMarkVerifyClosure;      //  -- ditto --
-  friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
-  friend class PushAndMarkClosure;            //  -- ditto --
-  friend class Par_PushAndMarkClosure;        //  -- ditto --
-  friend class CMSKeepAliveClosure;           //  -- ditto --
-  friend class CMSDrainMarkingStackClosure;   //  -- ditto --
-  friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
-  NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
-  friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
-  friend class VM_CMS_Operation;
-  friend class VM_CMS_Initial_Mark_Operation;
-  friend class VM_CMS_Final_Remark_Operation;
-
- private:
-  jlong _time_of_last_gc;
-  void update_time_of_last_gc(jlong now) {
-    _time_of_last_gc = now;
-  }
-  
-  OopTaskQueueSet* _task_queues;
-
-  // Overflow list of grey objects, threaded through mark-word
-  // Manipulated with CAS in the parallel/multi-threaded case.
-  oop _overflow_list;
-  // The following array-pair keeps track of mark words
-  // displaced for accomodating overflow list above.
-  // This code will likely be revisited under RFE#4922830.
-  GrowableArray<oop>*     _preserved_oop_stack; 
-  GrowableArray<markOop>* _preserved_mark_stack; 
-
-  int*             _hash_seed;
-
-  // In support of multi-threaded concurrent phases
-  YieldingFlexibleWorkGang* _conc_workers;
-
-  // Performance Counters
-  CollectorCounters* _gc_counters;
-
-  // Initialization Errors
-  bool _completed_initialization;
-
-  // In support of ExplicitGCInvokesConcurrent
-  static   bool _full_gc_requested;
-  unsigned int  _collection_count_start;
-
-  // Verification support
-  CMSBitMap     _verification_mark_bm;
-  void verify_after_remark_work_1();
-  void verify_after_remark_work_2();
-
-  // true if any verification flag is on.
-  bool _verifying;
-  bool verifying() const { return _verifying; }
-  void set_verifying(bool v) { _verifying = v; }
-
-  // Collector policy
-  ConcurrentMarkSweepPolicy* _collector_policy;
-  ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
-
-  // Check whether the gc time limit has been 
-  // exceeded and set the size policy flag
-  // appropriately.
-  void check_gc_time_limit();
-  // XXX Move these to CMSStats ??? FIX ME !!!
-  elapsedTimer _sweep_timer;
-  AdaptivePaddedAverage _sweep_estimate;
-
- protected:
-  ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
-  ConcurrentMarkSweepGeneration* _permGen; // perm gen
-  MemRegion                      _span;    // span covering above two
-  CardTableRS*                   _ct;      // card table
-
-  // CMS marking support structures
-  CMSBitMap     _markBitMap;
-  CMSBitMap     _modUnionTable;
-  CMSMarkStack  _markStack;
-  CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
-                                          // to revisit
-  CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
-
-  HeapWord*     _restart_addr; // in support of marking stack overflow
-  void          lower_restart_addr(HeapWord* low);
-
-  // Counters in support of marking stack / work queue overflow handling:
-  // a non-zero value indicates certain types of overflow events during
-  // the current CMS cycle and could lead to stack resizing efforts at
-  // an opportune future time.
-  size_t        _ser_pmc_preclean_ovflw;
-  size_t        _ser_pmc_remark_ovflw;
-  size_t        _par_pmc_remark_ovflw;
-  size_t        _ser_kac_ovflw;
-  size_t        _par_kac_ovflw;
-
-  // ("Weak") Reference processing support
-  ReferenceProcessor*            _ref_processor;
-  CMSIsAliveClosure              _is_alive_closure;
-      // keep this textually after _markBitMap; c'tor dependency
-
-  ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
-  ModUnionClosure    _modUnionClosure;
-  ModUnionClosurePar _modUnionClosurePar;
-
-  // CMS abstract state machine
-  // initial_state: Idling
-  // next_state(Idling)            = {Marking}
-  // next_state(Marking)           = {Precleaning, Sweeping}
-  // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
-  // next_state(AbortablePreclean) = {FinalMarking}
-  // next_state(FinalMarking)      = {Sweeping}
-  // next_state(Sweeping)          = {Resizing}
-  // next_state(Resizing)          = {Resetting}
-  // next_state(Resetting)         = {Idling}
-  // The numeric values below are chosen so that:
-  // . _collectorState <= Idling ==  post-sweep && pre-mark
-  // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
-  //                                            precleaning || abortablePrecleanb
-  enum CollectorState {
-    Resizing            = 0,
-    Resetting           = 1,
-    Idling              = 2,
-    InitialMarking      = 3,
-    Marking             = 4,
-    Precleaning         = 5,
-    AbortablePreclean   = 6,
-    FinalMarking        = 7,
-    Sweeping            = 8 
-  };
-  static CollectorState _collectorState;
-
-  // State related to prologue/epilogue invocation for my generations
-  bool _between_prologue_and_epilogue;
-
-  // Signalling/State related to coordination between fore- and backgroud GC
-  // Note: When the baton has been passed from background GC to foreground GC,
-  // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
-  static bool _foregroundGCIsActive;    // true iff foreground collector is active or
-                                 // wants to go active
-  static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
-                                 // yet passed the baton to the foreground GC
-
-  // Support for CMSScheduleRemark (abortable preclean)
-  bool _abort_preclean;
-  bool _start_sampling;
-
-  int    _numYields;
-  size_t _numDirtyCards;
-  uint   _sweepCount;
-  // number of full gc's since the last concurrent gc.
-  uint	 _full_gcs_since_conc_gc;
-
-  // if occupancy exceeds this, start a new gc cycle
-  double _initiatingOccupancy;
-  // occupancy used for bootstrapping stats
-  double _bootstrap_occupancy;
-
-  // timer
-  elapsedTimer _timer;
-
-  // Timing, allocation and promotion statistics, used for scheduling.
-  CMSStats      _stats;
-
-  // Allocation limits installed in the young gen, used only in
-  // CMSIncrementalMode.  When an allocation in the young gen would cross one of
-  // these limits, the cms generation is notified and the cms thread is started
-  // or stopped, respectively.
-  HeapWord*	_icms_start_limit;
-  HeapWord*	_icms_stop_limit;
-
-  enum CMS_op_type {
-    CMS_op_checkpointRootsInitial,
-    CMS_op_checkpointRootsFinal
-  };
-
-  void do_CMS_operation(CMS_op_type op);
-  bool stop_world_and_do(CMS_op_type op);
-
-  OopTaskQueueSet* task_queues() { return _task_queues; }
-  int*             hash_seed(int i) { return &_hash_seed[i]; }
-  YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
-
-  // Support for parallelizing Eden rescan in CMS remark phase
-  void sample_eden(); // ... sample Eden space top
-
- private:
-  // Support for parallelizing young gen rescan in CMS remark phase
-  Generation* _young_gen;  // the younger gen
-  HeapWord** _top_addr;    // ... Top of Eden
-  HeapWord** _end_addr;    // ... End of Eden
-  HeapWord** _eden_chunk_array; // ... Eden partitioning array
-  size_t     _eden_chunk_index; // ... top (exclusive) of array
-  size_t     _eden_chunk_capacity;  // ... max entries in array
-
-  // Support for parallelizing survivor space rescan 
-  HeapWord** _survivor_chunk_array;
-  size_t     _survivor_chunk_index;
-  size_t     _survivor_chunk_capacity;
-  size_t*    _cursor;
-  ChunkArray* _survivor_plab_array;
-
-  // Support for marking stack overflow handling
-  bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
-  bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
-  void push_on_overflow_list(oop p);
-  void par_push_on_overflow_list(oop p);
-  // the following is, obviously, not, in general, "MT-stable"
-  bool overflow_list_is_empty() { return _overflow_list == NULL; }
-  
-  void preserve_mark_if_necessary(oop p);
-  void par_preserve_mark_if_necessary(oop p);
-  void preserve_mark_work(oop p, markOop m);
-  void restore_preserved_marks_if_any();
-  NOT_PRODUCT(bool no_preserved_marks();)
-  // in support of testing overflow code
-  NOT_PRODUCT(int _overflow_counter;)
-  NOT_PRODUCT(bool simulate_overflow();)       // sequential
-  NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
-
-  int _roots_scanning_options;
-  int roots_scanning_options() const      { return _roots_scanning_options; }
-  void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
-  void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
-
-  // CMS work methods
-  void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
-
-  // a return value of false indicates failure due to stack overflow
-  bool markFromRootsWork(bool asynch);  // concurrent marking work
-
- public:   // FIX ME!!! only for testing
-  bool do_marking_st(bool asynch);      // single-threaded marking
-  bool do_marking_mt(bool asynch);      // multi-threaded  marking
-
- private:
-
-  // concurrent precleaning work
-  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
-                                  ScanMarkedObjectsAgainCarefullyClosure* cl);
-  size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
-                             ScanMarkedObjectsAgainCarefullyClosure* cl);
-  // Does precleaning work, returning a quantity indicative of
-  // the amount of "useful work" done.
-  size_t preclean_work(bool clean_refs, bool clean_survivors);
-  void abortable_preclean(); // Preclean while looking for possible abort
-  void initialize_sequential_subtasks_for_young_gen_rescan(int i);
-  // Helper function for above; merge-sorts the per-thread plab samples
-  void merge_survivor_plab_arrays(ContiguousSpace* surv);
-  // Resets (i.e. clears) the per-thread plab sample vectors
-  void reset_survivor_plab_arrays();
-
-  // final (second) checkpoint work
-  void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
-                                bool init_mark_was_synchronous);
-  // work routine for parallel version of remark
-  void do_remark_parallel();
-  // work routine for non-parallel version of remark
-  void do_remark_non_parallel();
-  // reference processing work routine (during second checkpoint)
-  void refProcessingWork(bool asynch, bool clear_all_soft_refs);
-
-  // concurrent sweeping work
-  void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
-
-  // (concurrent) resetting of support data structures
-  void reset(bool asynch);
-
-  // Clear _expansion_cause fields of constituent generations
-  void clear_expansion_cause();
-
-  // An auxilliary method used to record the ends of
-  // used regions of each generation to limit the extent of sweep
-  void save_sweep_limits();
-
-  // Resize the generations included in the collector.
-  void compute_new_size();
-
-  // A work method used by foreground collection to determine
-  // what type of collection (compacting or not, continuing or fresh)
-  // it should do.
-  void decide_foreground_collection_type(bool clear_all_soft_refs,
-    bool* should_compact, bool* should_start_over);
-
-  // A work method used by the foreground collector to do
-  // a mark-sweep-compact.
-  void do_compaction_work(bool clear_all_soft_refs);
-
-  // A work method used by the foreground collector to do
-  // a mark-sweep, after taking over from a possibly on-going
-  // concurrent mark-sweep collection.
-  void do_mark_sweep_work(bool clear_all_soft_refs,
-    CollectorState first_state, bool should_start_over);
-
-  // If the backgrould GC is active, acquire control from the background
-  // GC and do the collection.
-  void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
-
-  // For synchronizing passing of control from background to foreground
-  // GC.  waitForForegroundGC() is called by the background
-  // collector.  It if had to wait for a foreground collection,
-  // it returns true and the background collection should assume
-  // that the collection was finished by the foreground
-  // collector.
-  bool waitForForegroundGC();
-
-  // Incremental mode triggering:  recompute the icms duty cycle and set the
-  // allocation limits in the young gen.
-  void icms_update_allocation_limits();
-
-  size_t block_size_using_printezis_bits(HeapWord* addr) const;
-  size_t block_size_if_printezis_bits(HeapWord* addr) const;
-  HeapWord* next_card_start_after_block(HeapWord* addr) const;
-
-  void reset_cms_verification_state();
- public:
-  CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
-               ConcurrentMarkSweepGeneration* permGen,
-               CardTableRS*                   ct,
-	       ConcurrentMarkSweepPolicy*     cp);
-  ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
-
-  ReferenceProcessor* ref_processor() { return _ref_processor; }
-  void ref_processor_init();
-
-  Mutex* bitMapLock()        const { return _markBitMap.lock();    }
-  static CollectorState abstract_state() { return _collectorState;  }
-  double initiatingOccupancy() const { return _initiatingOccupancy; }
-
-  bool should_abort_preclean() const; // Whether preclean should be aborted.
-  size_t get_eden_used() const;
-  size_t get_eden_capacity() const;
-
-  ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
-
-  // locking checks
-  NOT_PRODUCT(static bool have_cms_token();)
-
-  // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
-  bool shouldConcurrentCollect();
-
-  void collect(bool   full,
-               bool   clear_all_soft_refs,
-               size_t size,
-               bool   tlab);
-  void collect_in_background(bool clear_all_soft_refs);
-  void collect_in_foreground(bool clear_all_soft_refs);
-
-  // In support of ExplicitGCInvokesConcurrent
-  static void request_full_gc(unsigned int full_gc_count);
-
-  void direct_allocated(HeapWord* start, size_t size);
-
-  // Object is dead if not marked and current phase is sweeping.
-  bool is_dead_obj(oop obj) const;
-
-  // After a promotion (of "start"), do any necessary marking.
-  // If "par", then it's being done by a parallel GC thread.
-  // The last two args indicate if we need precise marking
-  // and if so the size of the object so it can be dirtied
-  // in its entirety.
-  void promoted(bool par, HeapWord* start,
-                bool is_obj_array, size_t obj_size);
-
-  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
-				     size_t word_size);
-
-  void getFreelistLocks() const;
-  void releaseFreelistLocks() const;
-  bool haveFreelistLocks() const;
-
-  // GC prologue and epilogue
-  void gc_prologue(bool full);
-  void gc_epilogue(bool full);
-
-  jlong time_of_last_gc(jlong now) {
-    if (_collectorState <= Idling) {
-      // gc not in progress
-      return _time_of_last_gc;
-    } else {
-      // collection in progress
-      return now;
-    }
-  }
-
-  // Support for parallel remark of survivor space
-  void* get_data_recorder(int thr_num);
-
-  CMSBitMap* markBitMap()  { return &_markBitMap; }
-  void directAllocated(HeapWord* start, size_t size);
-
-  // main CMS steps and related support
-  void checkpointRootsInitial(bool asynch);
-  bool markFromRoots(bool asynch);  // a return value of false indicates failure
-                                    // due to stack overflow
-  void preclean();
-  void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
-                            bool init_mark_was_synchronous);
-  void sweep(bool asynch);
-
-  // Check that the currently executing thread is the expected
-  // one (foreground collector or background collector).
-  void check_correct_thread_executing()        PRODUCT_RETURN;
-  // XXXPERM void print_statistics()           PRODUCT_RETURN;
-
-  bool is_cms_reachable(HeapWord* addr);
-
-  // Performance Counter Support
-  CollectorCounters* counters()    { return _gc_counters; }
-
-  // timer stuff
-  void    startTimer() { _timer.start();   }
-  void    stopTimer()  { _timer.stop();    }
-  void    resetTimer() { _timer.reset();   }
-  double  timerValue() { return _timer.seconds(); }
-
-  int  yields()          { return _numYields; }
-  void resetYields()     { _numYields = 0;    }
-  void incrementYields() { _numYields++;      }
-  void resetNumDirtyCards()               { _numDirtyCards = 0; }
-  void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
-  size_t  numDirtyCards()                 { return _numDirtyCards; }
-
-  static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
-  static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
-  static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
-  static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
-  uint  sweepCount() const             { return _sweepCount; }
-  void incrementSweepCount()           { _sweepCount++; }
-
-  // Timers/stats for gc scheduling and incremental mode pacing.
-  CMSStats& stats() { return _stats; }
-
-  // Convenience methods that check whether CMSIncrementalMode is enabled and
-  // forward to the corresponding methods in ConcurrentMarkSweepThread.
-  static void start_icms();
-  static void stop_icms();    // Called at the end of the cms cycle.
-  static void disable_icms(); // Called before a foreground collection.
-  static void enable_icms();  // Called after a foreground collection.
-  void icms_wait();	     // Called at yield points.
-
-  // Adaptive size policy
-  CMSAdaptiveSizePolicy* size_policy();
-  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
-
-  // debugging
-  void verify(bool);
-  bool verify_after_remark();
-  void verify_ok_to_terminate() const PRODUCT_RETURN;
-
-  // convenience methods in support of debugging
-  static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
-  HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
-
-  // accessors
-  CMSMarkStack* verification_mark_stack() { return &_markStack; }
-  CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
-
-  // Get the bit map with a perm gen "deadness" information.
-  CMSBitMap* perm_gen_verify_bit_map()       { return &_perm_gen_verify_bit_map; }
-
-  // Initialization errors
-  bool completed_initialization() { return _completed_initialization; }
-};
-
-class CMSExpansionCause : public AllStatic  {
- public:
-  enum Cause {
-    _no_expansion,
-    _satisfy_free_ratio,
-    _satisfy_promotion,
-    _satisfy_allocation,
-    _allocate_par_lab,
-    _allocate_par_spooling_space,
-    _adaptive_size_policy
-  };
-  // Return a string describing the cause of the expansion.
-  static const char* to_string(CMSExpansionCause::Cause cause);
-};
-
-class ConcurrentMarkSweepGeneration: public CardGeneration {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepThread;
-  friend class ConcurrentMarkSweep;
-  friend class CMSCollector;
- protected:
-  static CMSCollector*       _collector; // the collector that collects us
-  CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
-
-  // Performance Counters
-  GenerationCounters*      _gen_counters;
-  GSpaceCounters*          _space_counters;
-
-  // Words directly allocated, used by CMSStats.
-  size_t _direct_allocated_words;
-
-  // Non-product stat counters
-  NOT_PRODUCT(
-    int _numObjectsPromoted;
-    int _numWordsPromoted;
-    int _numObjectsAllocated;
-    int _numWordsAllocated;
-  )
-
-  // Used for sizing decisions
-  bool _incremental_collection_failed;
-  bool incremental_collection_failed() {
-    return _incremental_collection_failed;
-  }
-  void set_incremental_collection_failed() {
-    _incremental_collection_failed = true;
-  }
-  void clear_incremental_collection_failed() {
-    _incremental_collection_failed = false;
-  }
-
- private:
-  // For parallel young-gen GC support.
-  CMSParGCThreadState** _par_gc_thread_states;
-
-  // Reason generation was expanded
-  CMSExpansionCause::Cause _expansion_cause;
-
-  // accessors
-  void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
-  CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
-
-  // In support of MinChunkSize being larger than min object size
-  const double _dilatation_factor;
-
-  enum CollectionTypes {
-    Concurrent_collection_type		= 0,
-    MS_foreground_collection_type	= 1,
-    MSC_foreground_collection_type	= 2,
-    Unknown_collection_type		= 3
-  };
-
-  CollectionTypes _debug_collection_type;
-
- protected:
-  // Grow generation by specified size (returns false if unable to grow)
-  bool grow_by(size_t bytes);
-  // Grow generation to reserved size.
-  bool grow_to_reserved();
-  // Shrink generation by specified size (returns false if unable to shrink)
-  virtual void shrink_by(size_t bytes);
-
-  // Update statistics for GC
-  virtual void update_gc_stats(int level, bool full);
-
-  // Maximum available space in the generation (including uncommitted)
-  // space.
-  size_t max_available() const;
-
- public:
-  ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
-                                int level, CardTableRS* ct,
-				bool use_adaptive_freelists,
-                                FreeBlockDictionary::DictionaryChoice);
-
-  // Accessors
-  CMSCollector* collector() const { return _collector; }
-  static void set_collector(CMSCollector* collector) {
-    assert(_collector == NULL, "already set");
-    _collector = collector;
-  }
-  CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
-  
-  Mutex* freelistLock() const;
-
-  virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
-
-  // Adaptive size policy
-  CMSAdaptiveSizePolicy* size_policy();
-
-  bool refs_discovery_is_atomic() const { return false; }
-  bool refs_discovery_is_mt()     const {
-    // Note: CMS does MT-discovery during the parallel-remark
-    // phases. Use ReferenceProcessorMTMutator to make refs
-    // discovery MT-safe during such phases or other parallel
-    // discovery phases in the future. This may all go away
-    // if/when we decide that refs discovery is sufficiently
-    // rare that the cost of the CAS's involved is in the
-    // noise. That's a measurement that should be done, and
-    // the code simplified if that turns out to be the case.
-    return false;
-  }
-
-  // Override
-  virtual void ref_processor_init();
-
-  void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
-
-  // Space enquiries
-  size_t capacity() const;
-  size_t used() const;
-  size_t free() const;
-  double occupancy()      { return ((double)used())/((double)capacity()); }
-  size_t contiguous_available() const;
-  size_t unsafe_max_alloc_nogc() const;
-
-  // over-rides
-  MemRegion used_region() const;
-  MemRegion used_region_at_save_marks() const;
-
-  // Does a "full" (forced) collection invoked on this generation collect
-  // all younger generations as well? Note that the second conjunct is a
-  // hack to allow the collection of the younger gen first if the flag is
-  // set. This is better than using th policy's should_collect_gen0_first()
-  // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
-  virtual bool full_collects_younger_generations() const {
-    return UseCMSCompactAtFullCollection && !CollectGen0First;
-  }
-
-  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
-
-  // Support for compaction
-  CompactibleSpace* first_compaction_space() const;
-  // Adjust quantites in the generation affected by
-  // the compaction.
-  void reset_after_compaction();
-
-  // Allocation support
-  HeapWord* allocate(size_t size, bool tlab);
-  HeapWord* have_lock_and_allocate(size_t size, bool tlab);
-  oop       promote(oop obj, size_t obj_size, oop* ref);
-  HeapWord* par_allocate(size_t size, bool tlab) {
-    return allocate(size, tlab);
-  }
-
-  // Incremental mode triggering.
-  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
-				     size_t word_size);
-
-  // Used by CMSStats to track direct allocation.  The value is sampled and
-  // reset after each young gen collection.
-  size_t direct_allocated_words() const { return _direct_allocated_words; }
-  void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
-
-  // Overrides for parallel promotion.
-  virtual oop par_promote(int thread_num,
-			  oop obj, markOop m, size_t word_sz);
-  // This one should not be called for CMS.
-  virtual void par_promote_alloc_undo(int thread_num,
-				      HeapWord* obj, size_t word_sz);
-  virtual void par_promote_alloc_done(int thread_num);
-  virtual void par_oop_since_save_marks_iterate_done(int thread_num);
-
-  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
-    bool younger_handles_promotion_failure) const;
-
-  bool should_collect(bool full, size_t size, bool tlab);
-    // XXXPERM
-  bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM
-  void collect(bool   full,
-               bool   clear_all_soft_refs,
-               size_t size,
-               bool   tlab);
-
-  HeapWord* expand_and_allocate(size_t word_size,
-				bool tlab,
-				bool parallel = false);
-
-  // GC prologue and epilogue
-  void gc_prologue(bool full);
-  void gc_prologue_work(bool full, bool registerClosure,
-                        ModUnionClosure* modUnionClosure);
-  void gc_epilogue(bool full);
-  void gc_epilogue_work(bool full);
-
-  // Time since last GC of this generation
-  jlong time_of_last_gc(jlong now) {
-    return collector()->time_of_last_gc(now);
-  }
-  void update_time_of_last_gc(jlong now) {
-    collector()-> update_time_of_last_gc(now);
-  }
-
-  // Allocation failure
-  void expand(size_t bytes, size_t expand_bytes, 
-    CMSExpansionCause::Cause cause);
-  void shrink(size_t bytes);
-  HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
-  bool expand_and_ensure_spooling_space(PromotionInfo* promo);
-
-  // Iteration support and related enquiries
-  void save_marks();
-  bool no_allocs_since_save_marks();
-  void object_iterate_since_last_GC(ObjectClosure* cl);
-  void younger_refs_iterate(OopsInGenClosure* cl);
-
-  // Iteration support specific to CMS generations
-  void save_sweep_limit();
-
-  // More iteration support
-  virtual void oop_iterate(MemRegion mr, OopClosure* cl);
-  virtual void oop_iterate(OopClosure* cl);
-  virtual void object_iterate(ObjectClosure* cl);
-
-  // Need to declare the full complement of closures, whether we'll
-  // override them or not, or get message from the compiler:
-  //   oop_since_save_marks_iterate_nv hides virtual function...
-  #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
-    void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
-  ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
-
-  // Smart allocation  XXX -- move to CFLSpace?
-  void setNearLargestChunk();
-  bool isNearLargestChunk(HeapWord* addr);
-
-  // Get the chunk at the end of the space.  Delagates to
-  // the space.
-  FreeChunk* find_chunk_at_end(); 
-
-  // Overriding of unused functionality (sharing not yet supported with CMS)
-  void pre_adjust_pointers();
-  void post_compact();
-
-  // Debugging
-  void prepare_for_verify();
-  void verify(bool allow_dirty);
-  void print_statistics()               PRODUCT_RETURN;
-
-  // Performance Counters support
-  virtual void update_counters();
-  virtual void update_counters(size_t used);
-  void initialize_performance_counters();
-  CollectorCounters* counters()  { return collector()->counters(); }
-
-  // Support for parallel remark of survivor space
-  void* get_data_recorder(int thr_num) {
-    //Delegate to collector
-    return collector()->get_data_recorder(thr_num);
-  }
-
-  // Printing
-  const char* name() const;
-  virtual const char* short_name() const { return "CMS"; }
-  void        print() const;
-  void printOccupancy(const char* s);
-  bool must_be_youngest() const { return false; }
-  bool must_be_oldest()   const { return true; }
-
-  void compute_new_size();
-
-  CollectionTypes debug_collection_type() { return _debug_collection_type; }
-  void rotate_debug_collection_type();
-};
-
-class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
-
-  // Return the size policy from the heap's collector
-  // policy casted to CMSAdaptiveSizePolicy*.
-  CMSAdaptiveSizePolicy* cms_size_policy() const;
-
-  // Resize the generation based on the adaptive size
-  // policy.
-  void resize(size_t cur_promo, size_t desired_promo);
-
-  // Return the GC counters from the collector policy
-  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
-
-  virtual void shrink_by(size_t bytes);
-
- public:
-  virtual void compute_new_size();
-  ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
-                                  int level, CardTableRS* ct,
-				  bool use_adaptive_freelists,
-                                  FreeBlockDictionary::DictionaryChoice 
-				    dictionaryChoice) :
-    ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
-      use_adaptive_freelists, dictionaryChoice) {}
-
-  virtual const char* short_name() const { return "ASCMS"; }
-  virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
-
-  virtual void update_counters();
-  virtual void update_counters(size_t used);
-};
-
-//
-// Closures of various sorts used by CMS to accomplish its work
-//
-
-// This closure is used to check that a certain set of oops is empty.
-class FalseClosure: public OopClosure {
- public:
-  void do_oop(oop* p) {
-    guarantee(false, "Should be an empty set");
-  }
-};
-
-// This closure is used to do concurrent marking from the roots
-// following the first checkpoint. 
-class MarkFromRootsClosure: public BitMapClosure {
-  CMSCollector*  _collector;
-  MemRegion      _span;
-  CMSBitMap*     _bitMap;
-  CMSBitMap*     _mut;
-  CMSMarkStack*  _markStack;
-  CMSMarkStack*  _revisitStack;
-  bool           _yield;
-  int            _skipBits;
-  HeapWord*      _finger;
-  HeapWord*      _threshold;
-  DEBUG_ONLY(bool _verifying;)
-
- public:
-  MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
-                       CMSBitMap* bitMap,
-                       CMSMarkStack*  markStack,
-                       CMSMarkStack*  revisitStack,
-                       bool should_yield, bool verifying = false);
-  void do_bit(size_t offset);
-  void reset(HeapWord* addr);
-  inline void do_yield_check();
-
- private:
-  void scanOopsInOop(HeapWord* ptr);
-  void do_yield_work();
-};
-
-// This closure is used to do concurrent multi-threaded
-// marking from the roots following the first checkpoint. 
-// XXX This should really be a subclass of The serial version
-// above, but i have not had the time to refactor things cleanly.
-// That willbe done for Dolphin.
-class Par_MarkFromRootsClosure: public BitMapClosure {
-  CMSCollector*  _collector;
-  MemRegion      _whole_span;
-  MemRegion      _span;
-  CMSBitMap*     _bit_map;
-  CMSBitMap*     _mut;
-  OopTaskQueue*  _work_queue;
-  CMSMarkStack*  _overflow_stack;
-  CMSMarkStack*  _revisit_stack;
-  bool           _yield;
-  int            _skip_bits;
-  HeapWord*      _finger;
-  HeapWord*      _threshold;
-  CMSConcMarkingTask* _task;
- public:
-  Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
-                       MemRegion span,
-                       CMSBitMap* bit_map,
-                       OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack,
-                       CMSMarkStack*  revisit_stack,
-                       bool should_yield);
-  void do_bit(size_t offset);
-  inline void do_yield_check();
-
- private:
-  void scan_oops_in_oop(HeapWord* ptr);
-  void do_yield_work();
-  bool get_work_from_overflow_stack();
-};
-
-// The following closures are used to do certain kinds of verification of
-// CMS marking.
-class PushAndMarkVerifyClosure: public OopClosure {
-  CMSCollector*    _collector;
-  MemRegion        _span;
-  CMSBitMap*       _verification_bm;
-  CMSBitMap*       _cms_bm;
-  CMSMarkStack*    _mark_stack;
- public:
-  PushAndMarkVerifyClosure(CMSCollector* cms_collector,
-                           MemRegion span,
-                           CMSBitMap* verification_bm,
-                           CMSBitMap* cms_bm,
-                           CMSMarkStack*  mark_stack);
-  void do_oop(oop* p);
-  // Deal with a stack overflow condition
-  void handle_stack_overflow(HeapWord* lost);
-};
-
-class MarkFromRootsVerifyClosure: public BitMapClosure {
-  CMSCollector*  _collector;
-  MemRegion      _span;
-  CMSBitMap*     _verification_bm;
-  CMSBitMap*     _cms_bm;
-  CMSMarkStack*  _mark_stack;
-  HeapWord*      _finger;
-  PushAndMarkVerifyClosure _pam_verify_closure;
- public:
-  MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
-                             CMSBitMap* verification_bm,
-                             CMSBitMap* cms_bm,
-                             CMSMarkStack*  mark_stack);
-  void do_bit(size_t offset);
-  void reset(HeapWord* addr);
-};
-
-
-// This closure is used to check that a certain set of bits is
-// "empty" (i.e. the bit vector doesn't have any 1-bits).
-class FalseBitMapClosure: public BitMapClosure {
- public:
-  void do_bit(size_t offset) {
-    guarantee(false, "Should not have a 1 bit"); 
-  }
-};
-
-// This closure is used during the second checkpointing phase
-// to rescan the marked objects on the dirty cards in the mod
-// union table and the card table proper. It's invoked via
-// MarkFromDirtyCardsClosure below. It uses either
-// [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
-// declared in genOopClosures.hpp to accomplish some of its work.
-// In the parallel case the bitMap is shared, so access to
-// it needs to be suitably synchronized for updates by embedded
-// closures that update it; however, this closure itself only
-// reads the bit_map and because it is idempotent, is immune to
-// reading stale values.
-class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
-  #ifdef ASSERT
-    CMSCollector*          _collector;
-    MemRegion              _span;
-    union {
-      CMSMarkStack*        _mark_stack;
-      OopTaskQueue*        _work_queue;
-    };
-  #endif // ASSERT
-  bool                       _parallel;
-  CMSBitMap*                 _bit_map;
-  union {
-    MarkRefsIntoAndScanClosure*     _scan_closure;
-    Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
-  };
-
- public:
-  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
-                                MemRegion span,
-                                ReferenceProcessor* rp,
-                                CMSBitMap* bit_map,
-                                CMSMarkStack*  mark_stack,
-                                CMSMarkStack*  revisit_stack,
-                                MarkRefsIntoAndScanClosure* cl):
-    #ifdef ASSERT
-      _collector(collector),
-      _span(span),
-      _mark_stack(mark_stack),
-    #endif // ASSERT
-    _parallel(false),
-    _bit_map(bit_map),
-    _scan_closure(cl) { }
-
-  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
-                                MemRegion span,
-                                ReferenceProcessor* rp,
-                                CMSBitMap* bit_map,
-                                OopTaskQueue* work_queue,
-                                CMSMarkStack* revisit_stack,
-                                Par_MarkRefsIntoAndScanClosure* cl):
-    #ifdef ASSERT
-      _collector(collector),
-      _span(span),
-      _work_queue(work_queue),
-    #endif // ASSERT
-    _parallel(true),
-    _bit_map(bit_map),
-    _par_scan_closure(cl) { }
-                                
-  void do_object(oop obj) {
-    guarantee(false, "Call do_object_b(oop, MemRegion) instead");
-  }
-  bool do_object_b(oop obj) {
-    guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
-    return false;
-  }
-  bool do_object_bm(oop p, MemRegion mr);
-};
-
-// This closure is used during the second checkpointing phase
-// to rescan the marked objects on the dirty cards in the mod
-// union table and the card table proper. It invokes
-// ScanMarkedObjectsAgainClosure above to accomplish much of its work.
-// In the parallel case, the bit map is shared and requires
-// synchronized access.
-class MarkFromDirtyCardsClosure: public MemRegionClosure {
-  CompactibleFreeListSpace*      _space;
-  ScanMarkedObjectsAgainClosure  _scan_cl;
-  size_t                         _num_dirty_cards;
-
- public:
-  MarkFromDirtyCardsClosure(CMSCollector* collector,
-                            MemRegion span,
-                            CompactibleFreeListSpace* space,
-                            CMSBitMap* bit_map,
-                            CMSMarkStack* mark_stack,
-                            CMSMarkStack* revisit_stack,
-                            MarkRefsIntoAndScanClosure* cl):
-    _space(space),
-    _num_dirty_cards(0),
-    _scan_cl(collector, span, collector->ref_processor(), bit_map,
-                 mark_stack, revisit_stack, cl) { }
-
-  MarkFromDirtyCardsClosure(CMSCollector* collector,
-                            MemRegion span,
-                            CompactibleFreeListSpace* space,
-                            CMSBitMap* bit_map,
-                            OopTaskQueue* work_queue,
-                            CMSMarkStack* revisit_stack,
-                            Par_MarkRefsIntoAndScanClosure* cl):
-    _space(space),
-    _num_dirty_cards(0),
-    _scan_cl(collector, span, collector->ref_processor(), bit_map,
-             work_queue, revisit_stack, cl) { }
-
-  void do_MemRegion(MemRegion mr);
-  void set_space(CompactibleFreeListSpace* space) { _space = space; }
-  size_t num_dirty_cards() { return _num_dirty_cards; }
-};
-
-// This closure is used in the non-product build to check
-// that there are no MemRegions with a certain property.
-class FalseMemRegionClosure: public MemRegionClosure {
-  void do_MemRegion(MemRegion mr) {
-    guarantee(!mr.is_empty(), "Shouldn't be empty");
-    guarantee(false, "Should never be here");
-  }
-};
-
-// This closure is used during the precleaning phase
-// to "carefully" rescan marked objects on dirty cards.
-// It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
-// to accomplish some of its work.
-class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
-  CMSCollector*                  _collector;
-  MemRegion                      _span;
-  bool                           _yield;
-  Mutex*                         _freelistLock;
-  CMSBitMap*                     _bitMap;
-  CMSMarkStack*                  _markStack;
-  MarkRefsIntoAndScanClosure*    _scanningClosure;
-
- public:
-  ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
-                                         MemRegion     span,
-                                         CMSBitMap* bitMap,
-                                         CMSMarkStack*  markStack,
-                                         CMSMarkStack*  revisitStack,
-                                         MarkRefsIntoAndScanClosure* cl,
-                                         bool should_yield):
-    _collector(collector),
-    _span(span),
-    _yield(should_yield),
-    _bitMap(bitMap),
-    _markStack(markStack),
-    _scanningClosure(cl) {
-  }
-  
-  void do_object(oop p) {
-    guarantee(false, "call do_object_careful instead");
-  }
-
-  size_t      do_object_careful(oop p) {
-    guarantee(false, "Unexpected caller");
-    return 0;
-  }
-
-  size_t      do_object_careful_m(oop p, MemRegion mr);
-
-  void setFreelistLock(Mutex* m) {
-    _freelistLock = m;
-    _scanningClosure->set_freelistLock(m);
-  }
-
- private:
-  inline bool do_yield_check();
-
-  void do_yield_work();
-};
-
-class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
-  CMSCollector*                  _collector;
-  MemRegion                      _span;
-  bool                           _yield;
-  CMSBitMap*                     _bit_map;
-  CMSMarkStack*                  _mark_stack;
-  PushAndMarkClosure*            _scanning_closure;
-  unsigned int                   _before_count;
-
- public:
-  SurvivorSpacePrecleanClosure(CMSCollector* collector,
-                               MemRegion     span,
-                               CMSBitMap*    bit_map,
-                               CMSMarkStack* mark_stack,
-                               PushAndMarkClosure* cl,
-                               unsigned int  before_count,
-                               bool          should_yield):
-    _collector(collector),
-    _span(span),
-    _yield(should_yield),
-    _bit_map(bit_map),
-    _mark_stack(mark_stack),
-    _scanning_closure(cl),
-    _before_count(before_count)
-  { }
-
-  void do_object(oop p) {
-    guarantee(false, "call do_object_careful instead");
-  }
-
-  size_t      do_object_careful(oop p);
-
-  size_t      do_object_careful_m(oop p, MemRegion mr) {
-    guarantee(false, "Unexpected caller");
-    return 0;
-  }
-
- private:
-  inline void do_yield_check();
-  void do_yield_work();
-};
-
-// This closure is used to accomplish the sweeping work
-// after the second checkpoint but before the concurrent reset
-// phase.
-// 
-// Terminology
-//   left hand chunk (LHC) - block of one or more chunks currently being
-//     coalesced.  The LHC is available for coalescing with a new chunk.
-//   right hand chunk (RHC) - block that is currently being swept that is
-//     free or garbage that can be coalesced with the LHC.
-// _inFreeRange is true if there is currently a LHC
-// _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
-// _freeRangeInFreeLists is true if the LHC is in the free lists.
-// _freeFinger is the address of the current LHC
-class SweepClosure: public BlkClosureCareful {
-  CMSCollector*                  _collector;  // collector doing the work
-  ConcurrentMarkSweepGeneration* _g;	// Generation being swept
-  CompactibleFreeListSpace*      _sp;	// Space being swept
-  HeapWord*                      _limit;
-  Mutex*                         _freelistLock;	// Free list lock (in space)
-  CMSBitMap*                     _bitMap;	// Marking bit map (in 
-						// generation)
-  bool                           _inFreeRange;	// Indicates if we are in the
-						// midst of a free run
-  bool				 _freeRangeInFreeLists;	
-					// Often, we have just found
-					// a free chunk and started
-					// a new free range; we do not
-					// eagerly remove this chunk from
-					// the free lists unless there is
-					// a possibility of coalescing.
-					// When true, this flag indicates
-					// that the _freeFinger below
-					// points to a potentially free chunk
-					// that may still be in the free lists
-  bool				 _lastFreeRangeCoalesced;
-					// free range contains chunks
-					// coalesced
-  bool                           _yield;	
-					// Whether sweeping should be 
-					// done with yields. For instance 
-					// when done by the foreground 
-					// collector we shouldn't yield.
-  HeapWord*                      _freeFinger;	// When _inFreeRange is set, the
-						// pointer to the "left hand 
-						// chunk"
-  size_t			 _freeRangeSize; 
-					// When _inFreeRange is set, this 
-					// indicates the accumulated size 
-					// of the "left hand chunk"
-  NOT_PRODUCT(
-    size_t		         _numObjectsFreed;
-    size_t		         _numWordsFreed;
-    size_t			 _numObjectsLive;
-    size_t			 _numWordsLive;
-    size_t			 _numObjectsAlreadyFree;
-    size_t			 _numWordsAlreadyFree;
-    FreeChunk*			 _last_fc;
-  )
- private:
-  // Code that is common to a free chunk or garbage when
-  // encountered during sweeping.
-  void doPostIsFreeOrGarbageChunk(FreeChunk *fc, 
-				  size_t chunkSize);
-  // Process a free chunk during sweeping.
-  void doAlreadyFreeChunk(FreeChunk *fc);
-  // Process a garbage chunk during sweeping.
-  size_t doGarbageChunk(FreeChunk *fc);
-  // Process a live chunk during sweeping.
-  size_t doLiveChunk(FreeChunk* fc);
-
-  // Accessors.
-  HeapWord* freeFinger() const	 	{ return _freeFinger; }
-  void set_freeFinger(HeapWord* v)  	{ _freeFinger = v; }
-  size_t freeRangeSize() const	 	{ return _freeRangeSize; }
-  void set_freeRangeSize(size_t v)  	{ _freeRangeSize = v; }
-  bool inFreeRange() 	const	 	{ return _inFreeRange; }
-  void set_inFreeRange(bool v)  	{ _inFreeRange = v; }
-  bool lastFreeRangeCoalesced()	const	 { return _lastFreeRangeCoalesced; }
-  void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
-  bool freeRangeInFreeLists() const	{ return _freeRangeInFreeLists; }
-  void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
-
-  // Initialize a free range.
-  void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
-  // Return this chunk to the free lists.
-  void flushCurFreeChunk(HeapWord* chunk, size_t size);
-
-  // Check if we should yield and do so when necessary.
-  inline void do_yield_check(HeapWord* addr);
-
-  // Yield
-  void do_yield_work(HeapWord* addr);
-
-  // Debugging/Printing
-  void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
-
- public:
-  SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
-               CMSBitMap* bitMap, bool should_yield);
-  ~SweepClosure();
-
-  size_t       do_blk_careful(HeapWord* addr);
-};
-
-// Closures related to weak references processing
-
-// During CMS' weak reference processing, this is a
-// work-routine/closure used to complete transitive
-// marking of objects as live after a certain point
-// in which an initial set has been completely accumulated.
-class CMSDrainMarkingStackClosure: public VoidClosure {
-  CMSCollector*        _collector;
-  MemRegion            _span;
-  CMSMarkStack*        _mark_stack;
-  CMSBitMap*           _bit_map;
-  CMSKeepAliveClosure* _keep_alive;
- public:
-  CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
-                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
-                      CMSKeepAliveClosure* keep_alive):
-    _collector(collector),
-    _span(span),
-    _bit_map(bit_map),
-    _mark_stack(mark_stack),
-    _keep_alive(keep_alive) { }
-
-  void do_void();
-};
-
-// A parallel version of CMSDrainMarkingStackClosure above.
-class CMSParDrainMarkingStackClosure: public VoidClosure {
-  CMSCollector*           _collector;
-  MemRegion               _span;
-  OopTaskQueue*           _work_queue;
-  CMSBitMap*              _bit_map;
-  CMSInnerParMarkAndPushClosure _mark_and_push;
-
- public:
-  CMSParDrainMarkingStackClosure(CMSCollector* collector,
-                                 MemRegion span, CMSBitMap* bit_map,
-                                 OopTaskQueue* work_queue):
-    _collector(collector),
-    _span(span),
-    _bit_map(bit_map),
-    _work_queue(work_queue),
-    _mark_and_push(collector, span, bit_map, work_queue) { }
-
- public:
-  void trim_queue(uint max);
-  void do_void();
-};
-
-// Allow yielding or short-circuiting of reference list
-// prelceaning work.
-class CMSPrecleanRefsYieldClosure: public YieldClosure {
-  CMSCollector* _collector;
-  void do_yield_work();
- public:
-  CMSPrecleanRefsYieldClosure(CMSCollector* collector):
-    _collector(collector) {}
-  virtual bool should_return();
-};
-
-
-// Convenience class that locks free list locks for given CMS collector
-class FreelistLocker: public StackObj {
- private:
-  CMSCollector* _collector;
- public:
-  FreelistLocker(CMSCollector* collector):
-    _collector(collector) {
-    _collector->getFreelistLocks();
-  }
-
-  ~FreelistLocker() {
-    _collector->releaseFreelistLocks();
-  }
-};
-
-// Mark all dead objects in a given space.
-class MarkDeadObjectsClosure: public BlkClosure {
-  const CMSCollector*             _collector;
-  const CompactibleFreeListSpace* _sp;
-  CMSBitMap*                      _live_bit_map;
-  CMSBitMap*                      _dead_bit_map;
-public:
-  MarkDeadObjectsClosure(const CMSCollector* collector,
-                         const CompactibleFreeListSpace* sp, 
-                         CMSBitMap *live_bit_map,
-                         CMSBitMap *dead_bit_map) :
-    _collector(collector),
-    _sp(sp),
-    _live_bit_map(live_bit_map),
-    _dead_bit_map(dead_bit_map) {}
-  size_t do_blk(HeapWord* addr);
-};
--- a/hotspot/src/share/vm/memory/concurrentMarkSweepGeneration.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,510 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)concurrentMarkSweepGeneration.inline.hpp	1.46 07/05/05 17:05:45 JVM"
-#endif
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-inline void CMSBitMap::clear_all() {
-  assert_locked();
-  // CMS bitmaps are usually cover large memory regions
-  _bm.clear_large();
-  return;
-}
-
-inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
-  return (pointer_delta(addr, _bmStartWord)) >> _shifter;
-}
-
-inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
-  return _bmStartWord + (offset << _shifter);
-}
-
-inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
-  assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
-  return diff >> _shifter;
-}
-
-inline void CMSBitMap::mark(HeapWord* addr) {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  _bm.set_bit(heapWordToOffset(addr));
-}
-
-inline bool CMSBitMap::par_mark(HeapWord* addr) {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return _bm.par_at_put(heapWordToOffset(addr), true);
-}
-
-inline void CMSBitMap::par_clear(HeapWord* addr) {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  _bm.par_at_put(heapWordToOffset(addr), false);
-}
-
-inline void CMSBitMap::mark_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                BitMap::small_range);
-}
-
-inline void CMSBitMap::clear_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                  BitMap::small_range);
-}
-
-inline void CMSBitMap::par_mark_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                    BitMap::small_range);
-}
-
-inline void CMSBitMap::par_clear_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                      BitMap::small_range);
-}
-
-inline void CMSBitMap::mark_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                BitMap::large_range);
-}
-
-inline void CMSBitMap::clear_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                  BitMap::large_range);
-}
-
-inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                    BitMap::large_range);
-}
-
-inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), 
-                      BitMap::large_range);
-}
-
-// Starting at "addr" (inclusive) return a memory region
-// corresponding to the first maximally contiguous marked ("1") region.
-inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
-  return getAndClearMarkedRegion(addr, endWord());
-}
-
-// Starting at "start_addr" (inclusive) return a memory region
-// corresponding to the first maximal contiguous marked ("1") region
-// strictly less than end_addr.
-inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
-                                                    HeapWord* end_addr) {
-  HeapWord *start, *end;
-  assert_locked();
-  start = getNextMarkedWordAddress  (start_addr, end_addr);
-  end   = getNextUnmarkedWordAddress(start,      end_addr);
-  assert(start <= end, "Consistency check");
-  MemRegion mr(start, end);
-  if (!mr.is_empty()) {
-    clear_range(mr);
-  }
-  return mr;
-}
-
-inline bool CMSBitMap::isMarked(HeapWord* addr) const {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return _bm.at(heapWordToOffset(addr));
-}
-
-// The same as isMarked() but without a lock check.
-inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return _bm.at(heapWordToOffset(addr));
-}
-
-
-inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return !_bm.at(heapWordToOffset(addr));
-}
-
-// Return the HeapWord address corresponding to next "1" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
-  return getNextMarkedWordAddress(addr, endWord());
-}
-
-// Return the least HeapWord address corresponding to next "1" bit
-// starting at start_addr (inclusive) but strictly less than end_addr.
-inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
-  HeapWord* start_addr, HeapWord* end_addr) const {
-  assert_locked();
-  size_t nextOffset = _bm.get_next_one_offset(
-                        heapWordToOffset(start_addr),
-                        heapWordToOffset(end_addr));
-  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
-  assert(nextAddr >= start_addr &&
-         nextAddr <= end_addr, "get_next_one postcondition");
-  assert((nextAddr == end_addr) ||
-         isMarked(nextAddr), "get_next_one postcondition");
-  return nextAddr;
-}
-
-
-// Return the HeapWord address corrsponding to the next "0" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
-  return getNextUnmarkedWordAddress(addr, endWord());
-}
-
-// Return the HeapWord address corrsponding to the next "0" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
-  HeapWord* start_addr, HeapWord* end_addr) const {
-  assert_locked();
-  size_t nextOffset = _bm.get_next_zero_offset(
-                        heapWordToOffset(start_addr),
-                        heapWordToOffset(end_addr));
-  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
-  assert(nextAddr >= start_addr &&
-         nextAddr <= end_addr, "get_next_zero postcondition");
-  assert((nextAddr == end_addr) ||
-          isUnmarked(nextAddr), "get_next_zero postcondition");
-  return nextAddr;
-}
-
-inline bool CMSBitMap::isAllClear() const {
-  assert_locked();
-  return getNextMarkedWordAddress(startWord()) >= endWord();
-}
-
-inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
-                            HeapWord* right) {
-  assert_locked();
-  left = MAX2(_bmStartWord, left);
-  right = MIN2(_bmStartWord + _bmWordSize, right);
-  if (right > left) {
-    _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
-  }
-}
-
-inline void CMSCollector::start_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::start_icms();
-  }
-}
-
-inline void CMSCollector::stop_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::stop_icms();
-  }
-}
-
-inline void CMSCollector::disable_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::disable_icms();
-  }
-}
-
-inline void CMSCollector::enable_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::enable_icms();
-  }
-}
-
-inline void CMSCollector::icms_wait() {
-  if (CMSIncrementalMode) {
-    cmsThread()->icms_wait();
-  }
-}
-
-inline void CMSCollector::save_sweep_limits() {
-  _cmsGen->save_sweep_limit();
-  _permGen->save_sweep_limit();
-}
-
-inline bool CMSCollector::is_dead_obj(oop obj) const {
-  HeapWord* addr = (HeapWord*)obj;
-  assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
-	  && _cmsGen->cmsSpace()->block_is_obj(addr))
-	 ||
-         (_permGen->cmsSpace()->is_in_reserved(addr)
-	  && _permGen->cmsSpace()->block_is_obj(addr)),
-	 "must be object");
-  return  CMSPermGenSweepingEnabled &&
-          _collectorState == Sweeping &&
-         !_markBitMap.isMarked(addr);
-}
-
-inline bool CMSCollector::should_abort_preclean() const {
-  // We are in the midst of an "abortable preclean" and either
-  // scavenge is done or foreground GC wants to take over collection
-  return _collectorState == AbortablePreclean &&
-         (_abort_preclean || _foregroundGCIsActive ||
-          GenCollectedHeap::heap()->incremental_collection_will_fail());
-}
-
-inline size_t CMSCollector::get_eden_used() const {
-  return _young_gen->as_DefNewGeneration()->eden()->used();
-}
-
-inline size_t CMSCollector::get_eden_capacity() const {
-  return _young_gen->as_DefNewGeneration()->eden()->capacity();
-}
-
-inline bool CMSStats::valid() const {
-  return _valid_bits == _ALL_VALID;
-}
-
-inline void CMSStats::record_gc0_begin() {
-  if (_gc0_begin_time.is_updated()) {
-    float last_gc0_period = _gc0_begin_time.seconds();
-    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period, 
-      last_gc0_period, _gc0_alpha);
-    _gc0_alpha = _saved_alpha;
-    _valid_bits |= _GC0_VALID;
-  }
-  _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
-
-  _gc0_begin_time.update();
-}
-
-inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
-  float last_gc0_duration = _gc0_begin_time.seconds();
-  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration, 
-    last_gc0_duration, _gc0_alpha);
-
-  // Amount promoted.
-  _cms_used_at_gc0_end = cms_gen_bytes_used;
-
-  size_t promoted_bytes = 0;
-  if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
-    promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
-  } 
-
-  // If the younger gen collections were skipped, then the
-  // number of promoted bytes will be 0 and adding it to the
-  // average will incorrectly lessen the average.  It is, however,
-  // also possible that no promotion was needed.
-  // 
-  // _gc0_promoted used to be calculated as
-  // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
-  //  promoted_bytes, _gc0_alpha);
-  _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
-  _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
-
-  // Amount directly allocated.
-  size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
-  _cms_gen->reset_direct_allocated_words();
-  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated, 
-    allocated_bytes, _gc0_alpha);
-}
-
-inline void CMSStats::record_cms_begin() {
-  _cms_timer.stop();
-
-  // This is just an approximate value, but is good enough.
-  _cms_used_at_cms_begin = _cms_used_at_gc0_end;
-
-  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period, 
-    (float) _cms_timer.seconds(), _cms_alpha);
-  _cms_begin_time.update();
-
-  _cms_timer.reset();
-  _cms_timer.start();
-}
-
-inline void CMSStats::record_cms_end() {
-  _cms_timer.stop();
-
-  float cur_duration = _cms_timer.seconds();
-  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, 
-    cur_duration, _cms_alpha);
-
-  // Avoid division by 0.
-  const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
-  _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
-				 cur_duration / cms_used_mb,
-				 _cms_alpha);
-
-  _cms_end_time.update();
-  _cms_alpha = _saved_alpha;
-  _allow_duty_cycle_reduction = true;
-  _valid_bits |= _CMS_VALID;
-
-  _cms_timer.start();
-}
-
-inline double CMSStats::cms_time_since_begin() const {
-  return _cms_begin_time.seconds();
-}
-
-inline double CMSStats::cms_time_since_end() const {
-  return _cms_end_time.seconds();
-}
-
-inline double CMSStats::promotion_rate() const {
-  assert(valid(), "statistics not valid yet");
-  return gc0_promoted() / gc0_period();
-}
-
-inline double CMSStats::cms_allocation_rate() const {
-  assert(valid(), "statistics not valid yet");
-  return cms_allocated() / gc0_period();
-}
-
-inline double CMSStats::cms_consumption_rate() const {
-  assert(valid(), "statistics not valid yet");
-  return (gc0_promoted() + cms_allocated()) / gc0_period();
-}
-
-inline unsigned int CMSStats::icms_update_duty_cycle() {
-  // Update the duty cycle only if pacing is enabled and the stats are valid
-  // (after at least one young gen gc and one cms cycle have completed).
-  if (CMSIncrementalPacing && valid()) {
-    return icms_update_duty_cycle_impl();
-  }
-  return _icms_duty_cycle;
-}
-
-inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
-  cmsSpace()->save_sweep_limit();
-}
-
-inline size_t ConcurrentMarkSweepGeneration::capacity() const {
-  return _cmsSpace->capacity();
-}
-
-inline size_t ConcurrentMarkSweepGeneration::used() const {
-  return _cmsSpace->used();
-}
-
-inline size_t ConcurrentMarkSweepGeneration::free() const {
-  return _cmsSpace->free();
-}
-
-inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
-  return _cmsSpace->used_region();
-}
-
-inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
-  return _cmsSpace->used_region_at_save_marks();
-}
-
-inline void MarkFromRootsClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    do_yield_work();
-  }
-}
-
-inline void Par_MarkFromRootsClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    do_yield_work();
-  }
-}
-
-// Return value of "true" indicates that the on-going preclean
-// should be aborted.
-inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    // Sample young gen size before and after yield
-    _collector->sample_eden(); 
-    do_yield_work();
-    _collector->sample_eden();
-    return _collector->should_abort_preclean();
-  }
-  return false;
-}
-
-inline void SurvivorSpacePrecleanClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    // Sample young gen size before and after yield
-    _collector->sample_eden();
-    do_yield_work();
-    _collector->sample_eden();
-  }
-}
-
-inline void SweepClosure::do_yield_check(HeapWord* addr) {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    do_yield_work(addr);
-  }
-}
-
-inline void MarkRefsIntoAndScanClosure::do_yield_check() {
-  // The conditions are ordered for the remarking phase
-  // when _yield is false.
-  if (_yield &&
-      !_collector->foregroundGCIsActive() &&
-      ConcurrentMarkSweepThread::should_yield()) {
-    do_yield_work();
-  }
-}
-
-
-inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
-  // Align the end of mr so it's at a card boundary.
-  // This is superfluous except at the end of the space;
-  // we should do better than this XXX
-  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
-                 CardTableModRefBS::card_size /* bytes */));
-  _t->mark_range(mr2);
-}
-
-inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
-  // Align the end of mr so it's at a card boundary.
-  // This is superfluous except at the end of the space;
-  // we should do better than this XXX
-  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
-                 CardTableModRefBS::card_size /* bytes */));
-  _t->par_mark_range(mr2);
-}
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)defNewGeneration.cpp	1.71 07/05/05 17:05:47 JVM"
+#pragma ident "@(#)defNewGeneration.cpp	1.72 07/05/17 15:54:42 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -254,27 +254,9 @@
   // not be considered.  The exception is during promotion
   // failure handling when to-space can contain live objects.
   from()->set_next_compaction_space(NULL);
-
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
-    CollectedHeap* ch = Universe::heap();
-    jvmpi::post_arena_new_event(ch->addr_to_arena_id(eden_start), "Eden");
-    jvmpi::post_arena_new_event(ch->addr_to_arena_id(from_start), "Semi");
-    jvmpi::post_arena_new_event(ch->addr_to_arena_id(to_start), "Semi");
-  }
-#endif // JVMPI_SUPPORT
 }
 
 void DefNewGeneration::swap_spaces() {
-#ifdef JVMPI_SUPPORT
-  CollectedHeap* ch = Universe::heap();
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_DELETE)) {
-    jvmpi::post_arena_delete_event(ch->addr_to_arena_id(from()->bottom()));
-  }
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
-    jvmpi::post_arena_new_event(ch->addr_to_arena_id(from()->bottom()), "Semi");
-  }
-#endif // JVMPI_SUPPORT
   ContiguousSpace* s = from();
   _from_space        = to();
   _to_space          = s;
@@ -682,15 +664,8 @@
   size_t s = old->size();
   oop obj = NULL;
   
-#ifdef JVMPI_SUPPORT
-  // Try allocating obj in to-space (unless too old or won't fit or JVMPI
-  // enabled)
-  if (old->age() < tenuring_threshold() &&
-      !Universe::jvmpi_slow_allocation()) {
-#else // !JVMPI_SUPPORT
   // Try allocating obj in to-space (unless too old)
   if (old->age() < tenuring_threshold()) {
-#endif // JVMPI_SUPPORT
     obj = (oop) to()->allocate(s);
   }
 
@@ -720,12 +695,6 @@
     age_table()->add(obj, s);
   }
 
-#ifdef JVMPI_SUPPORT
-  if (Universe::jvmpi_move_event_enabled()) {
-    Universe::jvmpi_object_move(old, obj);
-  }
-#endif // JVMPI_SUPPORT
-
   // Done, insert forward pointer to obj in this header
   old->forward_to(obj);
 
@@ -864,29 +833,8 @@
   // update the generation and space performance counters
   update_counters();
   gch->collector_policy()->counters()->update_counters();
-
-#ifdef JVMPI_SUPPORT
-  if (Universe::jvmpi_slow_allocation()) {
-    // If JVMPI alloc event has been disabled, turn off slow allocation now;
-    // otherwise, fill the new generation.
-    if (!Universe::jvmpi_alloc_event_enabled()) {
-      Universe::set_jvmpi_alloc_event_enabled(Universe::_jvmpi_disabled);
-    } else {
-      fill_newgen();
-    }
-  }
-#endif // JVMPI_SUPPORT
 }
 
-#ifdef JVMPI_SUPPORT
-void DefNewGeneration::fill_newgen() {
-  assert(to()->is_empty(), "to() must be empty");
-  eden()->allocate_temporary_filler(0);
-  from()->allocate_temporary_filler(0);
-  assert(eden()->free() == 0 && from()->free() == 0, "DefNewGeneration should be full");
-}
-#endif // JVMPI_SUPPORT
-
 void DefNewGeneration::update_counters() {
   if (UsePerfData) {
     _eden_counters->update_all();
@@ -912,20 +860,6 @@
   to()->print_on(st);
 }
 
-#ifdef JVMPI_SUPPORT
-int DefNewGeneration::addr_to_arena_id(void* addr) {
-  if (eden()->is_in_reserved(addr)) return 0;
-  if (from()->is_in_reserved(addr)) {
-    return (from()->bottom() < to()->bottom()) ? 1 : 2;
-  }
-  if (to()->is_in_reserved(addr)) {
-    return (from()->bottom() < to()->bottom()) ? 2 : 1;
-  }
-  // Otherwise...
-  return -3;
-}
-#endif // JVMPI_SUPPORT
-
 
 const char* DefNewGeneration::name() const {
   return "def new generation";
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)defNewGeneration.hpp	1.39 07/05/05 17:05:46 JVM"
+#pragma ident "@(#)defNewGeneration.hpp	1.40 07/05/17 15:54:44 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -260,18 +260,10 @@
     const bool check_too_big = _pretenure_size_threshold_words > 0;
     const bool not_too_big   = word_size < _pretenure_size_threshold_words;
     const bool size_ok       = is_tlab || !check_too_big || not_too_big;
-#ifdef JVMPI_SUPPORT
-    const bool not_jvmpi     = !Universe::jvmpi_slow_allocation();
-#endif // JVMPI_SUPPORT
       
     bool result = !overflows &&
                   non_zero   && 
-#ifdef JVMPI_SUPPORT
-                  size_ok    &&
-                  not_jvmpi;
-#else // !JVMPI_SUPPORT
                   size_ok;
-#endif // JVMPI_SUPPORT
 
     return result;
   }
@@ -336,17 +328,8 @@
 
   void verify(bool allow_dirty);
 
-#ifdef JVMPI_SUPPORT
-  int addr_to_arena_id(void* addr);
-#endif // JVMPI_SUPPORT
-
  protected:
   void compute_space_boundaries(uintx minimum_eden_size);
   // Scavenge support
   void swap_spaces();
-  
-#ifdef JVMPI_SUPPORT
-  // JVMPI support
-  void fill_newgen();
-#endif // JVMPI_SUPPORT
 };
--- a/hotspot/src/share/vm/memory/dictionary.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,625 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)dictionary.cpp	1.25 07/05/05 17:05:47 JVM"
-#endif
-/*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_dictionary.cpp.incl"
-
-
-DictionaryEntry*  Dictionary::_current_class_entry = NULL;
-int               Dictionary::_current_class_index =    0;
-
-
-Dictionary::Dictionary(int table_size)
-  : TwoOopHashtable(table_size, sizeof(DictionaryEntry)) {
-  _current_class_index = 0;
-  _current_class_entry = NULL;
-};
-
-
-
-Dictionary::Dictionary(int table_size, HashtableBucket* t,
-                       int number_of_entries)
-  : TwoOopHashtable(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
-  _current_class_index = 0;
-  _current_class_entry = NULL;
-};
-
-
-DictionaryEntry* Dictionary::new_entry(unsigned int hash, klassOop klass,
-                                       oop loader) {
-  DictionaryEntry* entry;
-  entry = (DictionaryEntry*)Hashtable::new_entry(hash, klass);
-  entry->set_loader(loader);
-  entry->set_pd_set(NULL);
-  return entry;
-}
-
-
-DictionaryEntry* Dictionary::new_entry() {
-  DictionaryEntry* entry = (DictionaryEntry*)Hashtable::new_entry(0L, NULL);
-  entry->set_loader(NULL);
-  entry->set_pd_set(NULL);
-  return entry;
-}
-
-
-void Dictionary::free_entry(DictionaryEntry* entry) {
-  // avoid recursion when deleting linked list
-  while (entry->pd_set() != NULL) {
-    ProtectionDomainEntry* to_delete = entry->pd_set();
-    entry->set_pd_set(to_delete->next());
-    delete to_delete;
-  }
-  Hashtable::free_entry(entry);
-}
-
-
-bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
-#ifdef ASSERT
-  if (protection_domain == instanceKlass::cast(klass())->protection_domain()) {
-    // Ensure this doesn't show up in the pd_set (invariant)
-    bool in_pd_set = false;
-    for (ProtectionDomainEntry* current = _pd_set; 
-                                current != NULL; 
-                                current = current->next()) {
-      if (current->protection_domain() == protection_domain) {
-	in_pd_set = true;
-	break;
-      }
-    }
-    if (in_pd_set) {
-      assert(false, "A klass's protection domain should not show up "
-                    "in its sys. dict. PD set");
-    }
-  }
-#endif /* ASSERT */
-
-  if (protection_domain == instanceKlass::cast(klass())->protection_domain()) {
-    // Succeeds trivially
-    return true;
-  }
-
-  for (ProtectionDomainEntry* current = _pd_set; 
-                              current != NULL; 
-                              current = current->next()) {
-    if (current->protection_domain() == protection_domain) return true;
-  }
-  return false;
-}
-
-
-void DictionaryEntry::add_protection_domain(oop protection_domain) {
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  if (!contains_protection_domain(protection_domain)) {
-    ProtectionDomainEntry* new_head =
-                new ProtectionDomainEntry(protection_domain, _pd_set);
-    // Warning: Preserve store ordering.  The SystemDictionary is read
-    //          without locks.  The new ProtectionDomainEntry must be
-    //          complete before other threads can be allowed to see it
-    //          via a store to _pd_set.
-    OrderAccess::release_store_ptr(&_pd_set, new_head);
-  }
-  if (TraceProtectionDomainVerification && WizardMode) {
-    print();
-  }
-}
-
-
-bool Dictionary::do_unloading(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
-  bool class_was_unloaded = false;
-  int  index = 0; // Defined here for portability! Do not move
-
-  // Remove unloadable entries and classes from system dictionary
-  // The placeholder array has been handled in always_strong_oops_do.
-  DictionaryEntry* probe = NULL;
-  for (index = 0; index < table_size(); index++) {
-    for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
-      probe = *p;
-      klassOop e = probe->klass();
-      oop class_loader = probe->loader();
-
-      instanceKlass* ik = instanceKlass::cast(e);
-      if (ik->previous_versions() != NULL) {
-        // This klass has previous versions so see what we can cleanup
-        // while it is safe to do so.
-
-        int gc_count = 0;    // leave debugging breadcrumbs
-        int live_count = 0;
-
-        // RC_TRACE macro has an embedded ResourceMark
-        RC_TRACE(0x00000200, ("unload: %s: previous version length=%d",
-          ik->external_name(), ik->previous_versions()->length()));
-
-        for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) {
-          // check the previous versions array for GC'ed weak refs
-          PreviousVersionNode * pv_node = ik->previous_versions()->at(i);
-          jweak cp_ref = pv_node->prev_constant_pool();
-          assert(cp_ref != NULL, "weak cp ref was unexpectedly cleared");
-          if (cp_ref == NULL) {
-            delete pv_node;
-            ik->previous_versions()->remove_at(i);
-            // Since we are traversing the array backwards, we don't have to
-            // do anything special with the index.
-            continue;  // robustness
-          }
-      
-          constantPoolOop pvcp = (constantPoolOop)JNIHandles::resolve(cp_ref);
-          if (pvcp == NULL) {
-            // this entry has been GC'ed so remove it
-            delete pv_node;
-            ik->previous_versions()->remove_at(i);
-            // Since we are traversing the array backwards, we don't have to
-            // do anything special with the index.
-            gc_count++;
-            continue;
-          } else {
-            RC_TRACE(0x00000200, ("unload: previous version @%d is alive", i));
-            if (is_alive->do_object_b(pvcp)) {
-              live_count++;
-            } else {
-              guarantee(false, "sanity check");
-            }
-          }
-      
-          GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
-          if (method_refs != NULL) {
-            RC_TRACE(0x00000200, ("unload: previous methods length=%d",
-              method_refs->length()));
-            for (int j = method_refs->length() - 1; j >= 0; j--) {
-              jweak method_ref = method_refs->at(j);
-              assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
-              if (method_ref == NULL) {
-                method_refs->remove_at(j);
-                // Since we are traversing the array backwards, we don't have to
-                // do anything special with the index.
-                continue;  // robustness
-              }
-            
-              methodOop method = (methodOop)JNIHandles::resolve(method_ref);
-              if (method == NULL) {
-                // this method entry has been GC'ed so remove it
-                JNIHandles::destroy_weak_global(method_ref);
-                method_refs->remove_at(j);
-              } else {
-                // RC_TRACE macro has an embedded ResourceMark
-                RC_TRACE(0x00000200,
-                  ("unload: %s(%s): prev method @%d in version @%d is alive",
-                  method->name()->as_C_string(),
-                  method->signature()->as_C_string(), j, i));
-              }
-            }
-          }
-        }
-        assert(ik->previous_versions()->length() == live_count, "sanity check");
-        RC_TRACE(0x00000200,
-          ("unload: previous version stats: live=%d, GC'ed=%d", live_count,
-          gc_count));
-      }
-
-      // Non-unloadable classes were handled in always_strong_oops_do
-      if (!is_strongly_reachable(class_loader, e)) {
-        // Entry was not visited in phase1 (negated test from phase1)
-        assert(class_loader != NULL, "unloading entry with null class loader");
-        oop k_def_class_loader = ik->class_loader();
-
-        // Do we need to delete this system dictionary entry?
-        bool purge_entry = false;
-
-        // Do we need to delete this system dictionary entry?
-        if (!is_alive->do_object_b(class_loader)) {
-          // If the loader is not live this entry should always be
-          // removed (will never be looked up again). Note that this is
-          // not the same as unloading the referred class.
-          if (k_def_class_loader == class_loader) {
-            // This is the defining entry, so the referred class is about
-            // to be unloaded.
-#ifdef JVMPI_SUPPORT
-            // Notify the debugger and jvmpi, and clean up the class.
-#else // !JVMPI_SUPPORT
-            // Notify the debugger and clean up the class.
-#endif // JVMPI_SUPPORT
-            guarantee(!is_alive->do_object_b(e),
-                      "klass should not be live if defining loader is not");
-            class_was_unloaded = true;
-            // notify the debugger
-            if (JvmtiExport::should_post_class_unload()) {
-              JvmtiExport::post_class_unload(ik->as_klassOop());
-            }
-#ifdef JVMPI_SUPPORT
-            // Cannot post CLASS_UNLOAD event from here because JVM/PI 1.X
-            // requires that the event be posted with GC disabled. Since we
-            // are part of GC we just have to save the necessary info for
-            // the post.
-            if (jvmpi::is_event_enabled(JVMPI_EVENT_CLASS_UNLOAD)) {
-              jvmpi::save_class_unload_event_info(ik->java_mirror());
-            }
-#endif // JVMPI_SUPPORT
-
-            // notify ClassLoadingService of class unload
-            ClassLoadingService::notify_class_unloaded(ik);
-
-            // Clean up C heap
-            ik->release_C_heap_structures();
-          }
-          // Also remove this system dictionary entry.
-          purge_entry = true;
-
-        } else {
-          // The loader in this entry is alive. If the klass is dead,
-          // the loader must be an initiating loader (rather than the
-          // defining loader). Remove this entry.
-          if (!is_alive->do_object_b(e)) {
-            guarantee(!is_alive->do_object_b(k_def_class_loader),
-                      "defining loader should not be live if klass is not");
-            // If we get here, the class_loader must not be the defining
-            // loader, it must be an initiating one.
-            assert(k_def_class_loader != class_loader,
-                   "cannot have live defining loader and unreachable klass");
-
-            // Loader is live, but class and its defining loader are dead.
-            // Remove the entry. The class is going away.
-            purge_entry = true;
-          }
-        }
-
-        if (purge_entry) {
-          *p = probe->next();
-          if (probe == _current_class_entry) {
-            _current_class_entry = NULL;
-          }
-          free_entry(probe);
-          continue;
-        }
-      }
-      p = probe->next_addr();
-    }
-  }
-  return class_was_unloaded;
-}
-
-
-void Dictionary::always_strong_classes_do(OopClosure* blk) {
-  // Follow all system classes and temporary placeholders in dictionary
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry *probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      oop e = probe->klass();
-      oop class_loader = probe->loader();            
-      if (is_strongly_reachable(class_loader, e)) {
-        blk->do_oop((oop*)probe->klass_addr());
-        if (class_loader != NULL) {
-          blk->do_oop(probe->loader_addr());
-        }
-        probe->protection_domain_set_oops_do(blk);
-      }
-    }
-  }
-}
-
-
-//   Just the classes from defining class loaders
-void Dictionary::classes_do(void f(klassOop)) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      klassOop k = probe->klass();
-      if (probe->loader() == instanceKlass::cast(k)->class_loader()) {
-        f(k);
-      }
-    }
-  }
-}
-
-// Added for initialize_itable_for_klass to handle exceptions
-//   Just the classes from defining class loaders
-void Dictionary::classes_do(void f(klassOop, TRAPS), TRAPS) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      klassOop k = probe->klass();
-      if (probe->loader() == instanceKlass::cast(k)->class_loader()) {
-        f(k, CHECK);
-      }
-    }
-  }
-}
-
-
-//   All classes, and their class loaders
-//   (added for helpers that use HandleMarks and ResourceMarks)
-// Don't iterate over placeholders
-void Dictionary::classes_do(void f(klassOop, oop, TRAPS), TRAPS) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      klassOop k = probe->klass();
-      f(k, probe->loader(), CHECK);
-    }
-  }
-}
-
-
-//   All classes, and their class loaders
-// Don't iterate over placeholders
-void Dictionary::classes_do(void f(klassOop, oop)) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      klassOop k = probe->klass();
-      f(k, probe->loader());
-    }
-  }
-}
-
-
-void Dictionary::oops_do(OopClosure* f) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      f->do_oop((oop*)probe->klass_addr());
-      if (probe->loader() != NULL) {
-        f->do_oop(probe->loader_addr());
-      }
-      probe->protection_domain_set_oops_do(f);
-    }
-  }
-}
-
-
-void Dictionary::methods_do(void f(methodOop)) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      klassOop k = probe->klass();
-      if (probe->loader() == instanceKlass::cast(k)->class_loader()) {
-        // only take klass is we have the entry with the defining class loader
-        instanceKlass::cast(k)->methods_do(f);
-      }
-    }
-  }
-}
-
-
-klassOop Dictionary::try_get_next_class() {
-  while (true) {
-    if (_current_class_entry != NULL) {
-      klassOop k = _current_class_entry->klass();
-      _current_class_entry = _current_class_entry->next();
-      return k;
-    }
-    _current_class_index = (_current_class_index + 1) % table_size();
-    _current_class_entry = bucket(_current_class_index);
-  }
-  // never reached
-}
-
-
-// Add a loaded class to the system dictionary.
-// Readers of the SystemDictionary aren't always locked, so _buckets
-// is volatile. The store of the next field in the constructor is
-// also cast to volatile;  we do this to ensure store order is maintained
-// by the compilers.
-
-void Dictionary::add_klass(symbolHandle class_name, Handle class_loader,
-                           KlassHandle obj) {
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  assert(obj() != NULL, "adding NULL obj");
-  assert(Klass::cast(obj())->name() == class_name(), "sanity check on name");
-
-  unsigned int hash = compute_hash(class_name, class_loader);
-  int index = hash_to_index(hash);
-  DictionaryEntry* entry = new_entry(hash, obj(), class_loader());
-  add_entry(index, entry);
-}
-
-
-// This routine does not lock the system dictionary.
-//
-// Since readers don't hold a lock, we must make sure that system
-// dictionary entries are only removed at a safepoint (when only one
-// thread is running), and are added to in a safe way (all links must
-// be updated in an MT-safe manner).
-//
-// Callers should be aware that an entry could be added just after
-// _buckets[index] is read here, so the caller will not see the new entry.
-DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash,
-                                       symbolHandle class_name,
-                                       Handle class_loader) {
-  symbolOop name_ = class_name();
-  oop loader_ = class_loader();
-  debug_only(_lookup_count++);
-  for (DictionaryEntry* entry = bucket(index); 
-                        entry != NULL; 
-                        entry = entry->next()) {
-    if (entry->hash() == hash && entry->equals(name_, loader_)) {
-      return entry;
-    }
-    debug_only(_lookup_length++);
-  }
-  return NULL;
-}
-
-
-klassOop Dictionary::find(int index, unsigned int hash, symbolHandle name,
-                          Handle loader, Handle protection_domain, TRAPS) {
-  DictionaryEntry* entry = get_entry(index, hash, name, loader);
-  if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) {
-    return entry->klass();
-  } else {
-    return NULL;
-  }
-}
-
-
-klassOop Dictionary::find_class(int index, unsigned int hash,
-                                symbolHandle name, Handle loader) {
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  assert (index == index_for(name, loader), "incorrect index?");
-
-  DictionaryEntry* entry = get_entry(index, hash, name, loader);
-  return (entry != NULL) ? entry->klass() : (klassOop)NULL;
-}
-
-
-// Variant of find_class for shared classes.  No locking required, as
-// that table is static.
-
-klassOop Dictionary::find_shared_class(int index, unsigned int hash,
-                                       symbolHandle name) {
-  assert (index == index_for(name, Handle()), "incorrect index?");
-
-  DictionaryEntry* entry = get_entry(index, hash, name, Handle());
-  return (entry != NULL) ? entry->klass() : (klassOop)NULL;
-}
-
-
-void Dictionary::add_protection_domain(int index, unsigned int hash,
-                                       instanceKlassHandle klass,
-                                       Handle loader, Handle protection_domain,
-                                       TRAPS) {
-  symbolHandle klass_name(THREAD, klass->name());
-  DictionaryEntry* entry = get_entry(index, hash, klass_name, loader);
-
-  assert(entry != NULL,"entry must be present, we just created it");
-  assert(protection_domain() != NULL, 
-         "real protection domain should be present");
-
-  entry->add_protection_domain(protection_domain());
-
-  assert(entry->contains_protection_domain(protection_domain()), 
-         "now protection domain should be present");
-}
-
-
-bool Dictionary::is_valid_protection_domain(int index, unsigned int hash,
-                                            symbolHandle name,
-                                            Handle loader,
-                                            Handle protection_domain) {
-  DictionaryEntry* entry = get_entry(index, hash, name, loader);
-  return entry->is_valid_protection_domain(protection_domain);
-}
-
-
-void Dictionary::reorder_dictionary() {
-
-  // Copy all the dictionary entries into a single master list.
-
-  DictionaryEntry* master_list = NULL;
-  for (int i = 0; i < table_size(); ++i) {
-    DictionaryEntry* p = bucket(i);
-    while (p != NULL) {
-      DictionaryEntry* tmp;
-      tmp = p->next();
-      p->set_next(master_list);
-      master_list = p;
-      p = tmp;
-    }
-    set_entry(i, NULL);
-  }
-
-  // Add the dictionary entries back to the list in the correct buckets.
-  Thread *thread = Thread::current();
-
-  while (master_list != NULL) {
-    DictionaryEntry* p = master_list;
-    master_list = master_list->next();
-    p->set_next(NULL);
-    symbolHandle class_name (thread, instanceKlass::cast((klassOop)(p->klass()))->name());
-    unsigned int hash = compute_hash(class_name, Handle(thread, p->loader()));
-    int index = hash_to_index(hash);
-    p->set_hash(hash);
-    p->set_next(bucket(index));
-    set_entry(index, p);
-  }
-}
-
-
-// ----------------------------------------------------------------------------
-#ifndef PRODUCT
-
-void Dictionary::print() {
-  ResourceMark rm;
-  HandleMark   hm;
-
-  tty->print_cr("Java system dictionary (classes=%d)", number_of_entries());
-  tty->print_cr("^ indicates that initiating loader is different from "
-                "defining loader");
-
-  for (int index = 0; index < table_size(); index++) {    
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      if (Verbose) tty->print("%4d: ", index);
-      klassOop e = probe->klass();
-      oop class_loader =  probe->loader();
-      bool is_defining_class = 
-         (class_loader == instanceKlass::cast(e)->class_loader());
-      tty->print("%s%s", is_defining_class ? " " : "^", 
-                   Klass::cast(e)->external_name());
-      if (class_loader != NULL) {
-        tty->print(", loader ");
-        class_loader->print_value();
-      }
-      tty->cr();
-    }
-  }
-}
-
-#endif
-
-void Dictionary::verify() {
-  guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
-  int element_count = 0;
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      klassOop e = probe->klass();
-      oop class_loader = probe->loader();
-      guarantee(Klass::cast(e)->oop_is_instance(), 
-                              "Verify of system dictionary failed");
-      // class loader must be present;  a null class loader is the
-      // boostrap loader
-      guarantee(class_loader == NULL || class_loader->is_instance(), 
-                "checking type of class_loader");
-      e->verify();
-      probe->verify_protection_domain_set();
-      element_count++; 
-    }
-  }
-  guarantee(number_of_entries() == element_count,
-            "Verify of system dictionary failed");
-  debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
-}
--- a/hotspot/src/share/vm/memory/dictionary.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,223 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)dictionary.hpp	1.15 07/05/05 17:05:47 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class DictionaryEntry;
-
-//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-// The data structure for the system dictionary (and the shared system
-// dictionary).
-
-class Dictionary : public TwoOopHashtable {
-  friend class VMStructs;
-private:
-  // current iteration index.
-  static int                    _current_class_index;
-  // pointer to the current hash table entry.
-  static DictionaryEntry*       _current_class_entry;
-
-  DictionaryEntry* get_entry(int index, unsigned int hash,
-                             symbolHandle name, Handle loader);
-
-  DictionaryEntry* bucket(int i) {
-    return (DictionaryEntry*)Hashtable::bucket(i);
-  }
-
-  // The following method is not MT-safe and must be done under lock.
-  DictionaryEntry** bucket_addr(int i) {
-    return (DictionaryEntry**)Hashtable::bucket_addr(i);
-  }
-
-  void add_entry(int index, DictionaryEntry* new_entry) {
-    Hashtable::add_entry(index, (HashtableEntry*)new_entry);
-  }
-
-
-public:
-  Dictionary(int table_size);
-  Dictionary(int table_size, HashtableBucket* t, int number_of_entries);
-  
-  DictionaryEntry* new_entry(unsigned int hash, klassOop klass, oop loader);
-
-  DictionaryEntry* new_entry();
-
-  void free_entry(DictionaryEntry* entry);
-
-  void add_klass(symbolHandle class_name, Handle class_loader,KlassHandle obj);
-
-  klassOop find_class(int index, unsigned int hash,
-                      symbolHandle name, Handle loader);
-
-  klassOop find_shared_class(int index, unsigned int hash, symbolHandle name);
-
-  // Compiler support
-  klassOop try_get_next_class();
-
-  // GC support
-
-  void oops_do(OopClosure* f);
-  void always_strong_classes_do(OopClosure* blk);
-  void classes_do(void f(klassOop));
-  void classes_do(void f(klassOop, TRAPS), TRAPS);
-  void classes_do(void f(klassOop, oop));
-  void classes_do(void f(klassOop, oop, TRAPS), TRAPS);
-
-  void methods_do(void f(methodOop));
-
-
-  // Classes loaded by the bootstrap loader are always strongly reachable.
-  // If we're not doing class unloading, all classes are strongly reachable.
-  static bool is_strongly_reachable(oop class_loader, oop klass) {
-    assert (klass != NULL, "should have non-null klass");
-    return (class_loader == NULL || !ClassUnloading);
-  }
-
-  // Unload (that is, break root links to) all unmarked classes and
-  // loaders.  Returns "true" iff something was unloaded.
-  bool do_unloading(BoolObjectClosure* is_alive);
-
-  // Protection domains
-  klassOop find(int index, unsigned int hash, symbolHandle name,
-                Handle loader, Handle protection_domain, TRAPS);
-  bool is_valid_protection_domain(int index, unsigned int hash,
-                                  symbolHandle name, Handle class_loader,
-                                  Handle protection_domain);
-  void add_protection_domain(int index, unsigned int hash,
-                             instanceKlassHandle klass, Handle loader,
-                             Handle protection_domain, TRAPS);
-
-  // Sharing support
-  void dump(SerializeOopClosure* soc);
-  void restore(SerializeOopClosure* soc);
-  void reorder_dictionary();
-
-  
-#ifndef PRODUCT
-  void print();
-#endif
-  void verify();
-};
-
-// The following classes can be in dictionary.cpp, but we need these
-// to be in header file so that SA's vmStructs can access.
-
-class ProtectionDomainEntry :public CHeapObj {
-  friend class VMStructs;
- public:
-  ProtectionDomainEntry* _next;
-  oop                    _protection_domain;
-
-  ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) {
-    _protection_domain = protection_domain;
-    _next              = next;
-  }
-
-  ProtectionDomainEntry* next() { return _next; }
-  oop protection_domain() { return _protection_domain; }
-};
-
-// An entry in the system dictionary, this describes a class as
-// { klassOop, loader, protection_domain }.
-
-class DictionaryEntry : public HashtableEntry {
-  friend class VMStructs;
- private:
-  // Contains the set of approved protection domains that can access
-  // this system dictionary entry.
-  ProtectionDomainEntry* _pd_set;
-  oop                    _loader;
-
-
- public:
-  // Tells whether a protection is in the approved set.
-  bool contains_protection_domain(oop protection_domain) const;
-  // Adds a protection domain to the approved set.
-  void add_protection_domain(oop protection_domain);
-
-  klassOop klass() const { return (klassOop)literal(); }
-  klassOop* klass_addr() { return (klassOop*)literal_addr(); }
-
-  DictionaryEntry* next() const {
-    return (DictionaryEntry*)HashtableEntry::next();
-  }
-
-  DictionaryEntry** next_addr() {
-    return (DictionaryEntry**)HashtableEntry::next_addr();
-  }
-
-  oop loader() const { return _loader; }
-  void set_loader(oop loader) { _loader = loader; }
-  oop* loader_addr() { return &_loader; }
-
-  ProtectionDomainEntry* pd_set() const { return _pd_set; }
-  void set_pd_set(ProtectionDomainEntry* pd_set) { _pd_set = pd_set; }
-
-  bool has_protection_domain() { return _pd_set != NULL; }
-
-  // Tells whether the initiating class' protection can access the this _klass
-  bool is_valid_protection_domain(Handle protection_domain) {
-    if (!ProtectionDomainVerification) return true;
-    if (!SystemDictionary::has_checkPackageAccess()) return true;
-
-    return protection_domain() == NULL
-         ? true
-         : contains_protection_domain(protection_domain());
-  }
-
-
-  void protection_domain_set_oops_do(OopClosure* f) {
-    for (ProtectionDomainEntry* current = _pd_set;
-                                current != NULL;
-                                current = current->_next) {
-      f->do_oop(&(current->_protection_domain));
-    }
-  }
-
-  void verify_protection_domain_set() {
-    for (ProtectionDomainEntry* current = _pd_set;
-                                current != NULL;
-                                current = current->_next) {
-      current->_protection_domain->verify();
-    }
-  }
-
-  bool equals(symbolOop class_name, oop class_loader) const {
-    klassOop klass = (klassOop)literal();
-    return (instanceKlass::cast(klass)->name() == class_name &&
-            _loader == class_loader);
-  }
-
-  void print() {
-    int count = 0;
-    for (ProtectionDomainEntry* current = _pd_set;
-                                current != NULL;
-                                current = current->_next) {
-      count++;
-    }
-    tty->print_cr("pd set = #%d", count);
-  }
-};
-
--- a/hotspot/src/share/vm/memory/freeBlockDictionary.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)freeBlockDictionary.cpp	1.12 07/05/05 17:05:47 JVM"
-#endif
-/*
- * Copyright 2002-2004 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_freeBlockDictionary.cpp.incl"
-
-#ifndef PRODUCT
-Mutex* FreeBlockDictionary::par_lock() const {
-  return _lock;
-}
-
-void FreeBlockDictionary::set_par_lock(Mutex* lock) {
-  _lock = lock;
-}
-
-void FreeBlockDictionary::verify_par_locked() const {
-#ifdef ASSERT
-  if (ParallelGCThreads > 0) {
-    Thread* myThread = Thread::current();
-    if (myThread->is_GC_task_thread()) {
-      assert(par_lock() != NULL, "Should be using locking?");
-      assert_lock_strong(par_lock());
-    }
-  }
-#endif // ASSERT
-}
-#endif
--- a/hotspot/src/share/vm/memory/freeBlockDictionary.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,174 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)freeBlockDictionary.hpp	1.32 07/05/05 17:05:47 JVM"
-#endif
-/*
- * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-//
-// Free block maintenance for Concurrent Mark Sweep Generation
-//
-// The main data structure for free blocks are
-// . an indexed array of small free blocks, and
-// . a dictionary of large free blocks
-//
-
-// No virtuals in FreeChunk (don't want any vtables).
-
-// A FreeChunk is merely a chunk that can be in a doubly linked list
-// and has a size field. NOTE: FreeChunks are distinguished from allocated
-// objects in two ways (by the sweeper). The second word (prev) has the
-// LSB set to indicate a free chunk; allocated objects' klass() pointers
-// don't have their LSB set. The corresponding bit in the CMSBitMap is
-// set when the chunk is allocated. There are also blocks that "look free"
-// but are not part of the free list and should not be coalesced into larger
-// free blocks. These free blocks have their two LSB's set.
-
-class FreeChunk VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
-  FreeChunk* _next;
-  FreeChunk* _prev;
-  size_t     _size;
-
- public:
-  NOT_PRODUCT(static const size_t header_size();)
-  // Returns "true" if the "wrd", which is required to be the second word
-  // of a block, indicates that the block represents a free chunk.
-  static bool secondWordIndicatesFreeChunk(intptr_t wrd) {
-    return (wrd & 0x1) == 0x1;
-  }
-  bool isFree()       const {
-    return secondWordIndicatesFreeChunk((intptr_t)_prev);
-  }
-  bool cantCoalesce() const { return (((intptr_t)_prev) & 0x3) == 0x3; }
-  FreeChunk* next()   const { return _next; }
-  FreeChunk* prev()   const { return (FreeChunk*)(((intptr_t)_prev) & ~(0x3)); }
-  debug_only(void* prev_addr() const { return (void*)&_prev; })
-
-  void linkAfter(FreeChunk* ptr) {
-    linkNext(ptr);
-    if (ptr != NULL) ptr->linkPrev(this);
-  }
-  void linkAfterNonNull(FreeChunk* ptr) {
-    assert(ptr != NULL, "precondition violation");
-    linkNext(ptr);
-    ptr->linkPrev(this);
-  }
-  void linkNext(FreeChunk* ptr) { _next = ptr; }
-  void linkPrev(FreeChunk* ptr) { _prev = (FreeChunk*)((intptr_t)ptr | 0x1); }
-  void clearPrev()              { _prev = NULL; }
-  void clearNext()              { _next = NULL; }
-  void dontCoalesce()      {
-    // the block should be free
-    assert(isFree(), "Should look like a free block");
-    _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
-  }
-  void markFree()    { _prev = (FreeChunk*)((intptr_t)_prev | 0x1);    }
-  void markNotFree() { _prev = NULL; }
-
-  size_t size()           const { return _size; }
-  void setSize(size_t size)     { _size = size; }
-
-  // For volatile reads:
-  size_t* size_addr()           { return &_size; }
-
-  // Return the address past the end of this chunk
-  HeapWord* end() const { return ((HeapWord*) this) + _size; }
-
-  // debugging
-  void verify()             const PRODUCT_RETURN;
-  void verifyList()         const PRODUCT_RETURN;
-  void mangleAllocated(size_t size) PRODUCT_RETURN;
-  void mangleFreed(size_t size)     PRODUCT_RETURN; 
-};
-
-// Alignment helpers etc.
-#define numQuanta(x,y) ((x+y-1)/y)
-enum AlignmentConstants {
-  MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
-};
-
-// A FreeBlockDictionary is an abstract superclass that will allow
-// a number of alternative implementations in the future.
-class FreeBlockDictionary: public CHeapObj {
- public:
-  enum Dither {
-    atLeast,
-    exactly,
-    roughly
-  };
-  enum DictionaryChoice {
-    dictionaryBinaryTree = 0,
-    dictionarySplayTree  = 1,
-    dictionarySkipList   = 2
-  };
-
- private:
-  NOT_PRODUCT(Mutex* _lock;)
-
- public:
-  virtual void       removeChunk(FreeChunk* fc) = 0;
-  virtual FreeChunk* getChunk(size_t size, Dither dither = atLeast) = 0;
-  virtual void       returnChunk(FreeChunk* chunk) = 0;
-  virtual size_t     totalChunkSize(debug_only(const Mutex* lock)) const = 0;
-  virtual size_t     maxChunkSize()   const = 0;
-  virtual size_t     minSize()        const = 0;
-  // Reset the dictionary to the initial conditions for a single
-  // block.
-  virtual void	     reset(HeapWord* addr, size_t size) = 0;
-  virtual void	     reset() = 0;
-
-  virtual void       dictCensusUpdate(size_t size, bool split, bool birth) = 0;
-  virtual bool       coalDictOverPopulated(size_t size) = 0;
-  virtual void       beginSweepDictCensus(double coalSurplusPercent,
-                       float sweep_current, float sweep_ewstimate) = 0;
-  virtual void       endSweepDictCensus(double splitSurplusPercent) = 0;
-  virtual FreeChunk* findLargestDict() const = 0;
-  // verify that the given chunk is in the dictionary.
-  virtual bool verifyChunkInFreeLists(FreeChunk* tc) const = 0;
-
-  // Sigma_{all_free_blocks} (block_size^2)
-  virtual double sum_of_squared_block_sizes() const = 0;
-
-  virtual FreeChunk* find_chunk_ends_at(HeapWord* target) const = 0;
-  virtual void inc_totalSize(size_t v) = 0;
-  virtual void dec_totalSize(size_t v) = 0;
-
-  NOT_PRODUCT (
-    virtual size_t   sumDictReturnedBytes() = 0;
-    virtual void     initializeDictReturnedBytes() = 0;
-    virtual size_t   totalCount() = 0;
-  )
-
-  virtual void       reportStatistics() const {
-    gclog_or_tty->print("No statistics available");
-  }
-
-  virtual void 	     printDictCensus() const = 0;
-
-  virtual void       verify()         const = 0;
-
-  Mutex* par_lock()                const PRODUCT_RETURN0;
-  void   set_par_lock(Mutex* lock)       PRODUCT_RETURN;
-  void   verify_par_locked()       const PRODUCT_RETURN;
-};
--- a/hotspot/src/share/vm/memory/freeChunk.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)freeChunk.cpp	1.16 07/05/05 17:05:47 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_freeChunk.cpp.incl"
-
-#ifndef PRODUCT
-
-#define baadbabeHeapWord badHeapWordVal
-#define deadbeefHeapWord 0xdeadbeef
-
-size_t const FreeChunk::header_size() {
-  return sizeof(FreeChunk)/HeapWordSize;
-}
-
-void FreeChunk::mangleAllocated(size_t size) {
-  // mangle all but the header of a just-allocated block
-  // of storage
-  assert(size >= MinChunkSize, "smallest size of object");
-  // we can't assert that _size == size because this may be an
-  // allocation out of a linear allocation block
-  assert(sizeof(FreeChunk) % HeapWordSize == 0,
-         "shouldn't write beyond chunk");
-  HeapWord* addr = (HeapWord*)this;
-  size_t hdr = header_size();
-  Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord);
-}
-
-void FreeChunk::mangleFreed(size_t size) {
-  assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns");
-  // mangle all but the header of a just-freed block of storage
-  // just prior to passing it to the storage dictionary
-  assert(size >= MinChunkSize, "smallest size of object");
-  assert(size == _size, "just checking");
-  HeapWord* addr = (HeapWord*)this;
-  size_t hdr = header_size();
-  Copy::fill_to_words(addr + hdr, size - hdr, deadbeefHeapWord);
-}
-
-void FreeChunk::verifyList() const {
-  FreeChunk* nextFC = next();
-  if (nextFC != NULL) {
-    assert(this == nextFC->prev(), "broken chain");
-    assert(size() == nextFC->size(), "wrong size");
-    nextFC->verifyList();
-  }
-}
-#endif
--- a/hotspot/src/share/vm/memory/freeList.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,307 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)freeList.cpp	1.31 07/05/05 17:05:48 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_freeList.cpp.incl"
-
-// Free list.  A FreeList is used to access a linked list of chunks
-// of space in the heap.  The head and tail are maintained so that
-// items can be (as in the current implementation) added at the 
-// at the tail of the list and removed from the head of the list to
-// maintain a FIFO queue.
-
-FreeList::FreeList() :
-  _head(NULL), _tail(NULL)
-#ifdef ASSERT
-  , _protecting_lock(NULL)
-#endif
-{
-  _size		= 0;
-  _count	= 0;
-  _hint		= 0;
-  init_statistics();
-}
-
-FreeList::FreeList(FreeChunk* fc) :
-  _head(fc), _tail(fc)
-#ifdef ASSERT
-  , _protecting_lock(NULL)
-#endif
-{
-  _size		= fc->size();
-  _count	= 1;
-  _hint		= 0;
-  init_statistics();
-#ifndef PRODUCT
-  _allocation_stats.set_returnedBytes(size() * HeapWordSize);
-#endif
-}
-
-FreeList::FreeList(HeapWord* addr, size_t size) :
-  _head((FreeChunk*) addr), _tail((FreeChunk*) addr)
-#ifdef ASSERT
-  , _protecting_lock(NULL)
-#endif
-{
-  assert(size > sizeof(FreeChunk), "size is too small");
-  head()->setSize(size);
-  _size		= size;
-  _count	= 1;
-  init_statistics();
-#ifndef PRODUCT
-  _allocation_stats.set_returnedBytes(_size * HeapWordSize);
-#endif
-}
-
-void FreeList::reset(size_t hint) {
-  set_count(0);
-  set_head(NULL);
-  set_tail(NULL);
-  set_hint(hint);
-}
-
-void FreeList::init_statistics() {
-  _allocation_stats.initialize();
-}
-
-FreeChunk* FreeList::getChunkAtHead() {
-  assert_proper_lock_protection();
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-  FreeChunk* fc = head();
-  if (fc != NULL) {
-    FreeChunk* nextFC = fc->next();
-    if (nextFC != NULL) {
-      // The chunk fc being removed has a "next".  Set the "next" to the
-      // "prev" of fc.
-      nextFC->linkPrev(NULL);
-    } else { // removed tail of list
-      link_tail(NULL);
-    }
-    link_head(nextFC);
-    decrement_count();
-  }
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-  return fc;
-}
-
-
-void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) {
-  assert_proper_lock_protection();
-  assert(fl->count() == 0, "Precondition");
-  if (count() > 0) {
-    int k = 1;
-    fl->set_head(head()); n--;
-    FreeChunk* tl = head();
-    while (tl->next() != NULL && n > 0) {
-      tl = tl->next(); n--; k++;
-    }
-    assert(tl != NULL, "Loop Inv.");
-    
-    // First, fix up the list we took from.
-    FreeChunk* new_head = tl->next();
-    set_head(new_head);
-    set_count(count() - k);
-    if (new_head == NULL) {
-      set_tail(NULL);
-    } else {
-      new_head->linkPrev(NULL);
-    }
-    // Now we can fix up the tail.
-    tl->linkNext(NULL);
-    // And return the result.
-    fl->set_tail(tl);
-    fl->set_count(k);
-  }
-}
-
-// Remove this chunk from the list
-void FreeList::removeChunk(FreeChunk*fc) {
-   assert_proper_lock_protection();
-   assert(head() != NULL, "Remove from empty list");
-   assert(fc != NULL, "Remove a NULL chunk");
-   assert(size() == fc->size(), "Wrong list");
-   assert(head() == NULL || head()->prev() == NULL, "list invariant");
-   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-
-   FreeChunk* prevFC = fc->prev();
-   FreeChunk* nextFC = fc->next();
-   if (nextFC != NULL) {
-     // The chunk fc being removed has a "next".  Set the "next" to the
-     // "prev" of fc.
-     nextFC->linkPrev(prevFC);
-   } else { // removed tail of list
-     link_tail(prevFC);
-   }
-   if (prevFC == NULL) { // removed head of list
-     link_head(nextFC);
-     assert(nextFC == NULL || nextFC->prev() == NULL, 
-       "Prev of head should be NULL");
-   } else {
-     prevFC->linkNext(nextFC);
-     assert(tail() != prevFC || prevFC->next() == NULL,
-       "Next of tail should be NULL");
-   }
-   decrement_count();
-#define TRAP_CODE 1
-#if TRAP_CODE
-   if (head() == NULL) {
-     guarantee(tail() == NULL, "INVARIANT");
-     guarantee(count() == 0, "INVARIANT");
-   }
-#endif
-   // clear next and prev fields of fc, debug only
-   NOT_PRODUCT(
-     fc->linkPrev(NULL);
-     fc->linkNext(NULL);
-   )
-   assert(fc->isFree(), "Should still be a free chunk");
-   assert(head() == NULL || head()->prev() == NULL, "list invariant");
-   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-   assert(head() == NULL || head()->size() == size(), "wrong item on list");
-   assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
-}
-
-// Add this chunk at the head of the list.
-void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
-  assert_proper_lock_protection();
-  assert(chunk != NULL, "insert a NULL chunk");
-  assert(size() == chunk->size(), "Wrong size");
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-  
-  FreeChunk* oldHead = head();
-  assert(chunk != oldHead, "double insertion");
-  chunk->linkAfter(oldHead);
-  link_head(chunk);
-  if (oldHead == NULL) { // only chunk in list
-    assert(tail() == NULL, "inconsistent FreeList");
-    link_tail(chunk);
-  }
-  increment_count(); // of # of chunks in list
-  DEBUG_ONLY(
-    if (record_return) {
-      increment_returnedBytes_by(size()*HeapWordSize);
-    }
-  )
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-  assert(head() == NULL || head()->size() == size(), "wrong item on list");
-  assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
-}
-
-void FreeList::returnChunkAtHead(FreeChunk* chunk) {
-  assert_proper_lock_protection();
-  returnChunkAtHead(chunk, true);
-}
-
-// Add this chunk at the tail of the list.
-void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
-  assert_proper_lock_protection();
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-  assert(chunk != NULL, "insert a NULL chunk");
-  assert(size() == chunk->size(), "wrong size");
-
-  FreeChunk* oldTail = tail();
-  assert(chunk != oldTail, "double insertion");
-  if (oldTail != NULL) {
-    oldTail->linkAfter(chunk);
-  } else { // only chunk in list
-    assert(head() == NULL, "inconsistent FreeList");
-    link_head(chunk);
-  }
-  link_tail(chunk);
-  increment_count();  // of # of chunks in list
-  DEBUG_ONLY(
-    if (record_return) {
-      increment_returnedBytes_by(size()*HeapWordSize);
-    }
-  )
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-  assert(head() == NULL || head()->size() == size(), "wrong item on list");
-  assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
-}
-
-void FreeList::returnChunkAtTail(FreeChunk* chunk) {
-  returnChunkAtTail(chunk, true);
-}
-
-void FreeList::prepend(FreeList* fl) {
-  assert_proper_lock_protection();
-  if (fl->count() > 0) {
-    if (count() == 0) {
-      set_head(fl->head());
-      set_tail(fl->tail());
-      set_count(fl->count());
-    } else {
-      // Both are non-empty.
-      FreeChunk* fl_tail = fl->tail();
-      FreeChunk* this_head = head();
-      assert(fl_tail->next() == NULL, "Well-formedness of fl");
-      fl_tail->linkNext(this_head);
-      this_head->linkPrev(fl_tail);
-      set_head(fl->head());
-      set_count(count() + fl->count());
-    }
-    fl->set_head(NULL);
-    fl->set_tail(NULL);
-    fl->set_count(0);
-  }
-}
-
-// verifyChunkInFreeLists() is used to verify that an item is in this free list.
-// It is used as a debugging aid.
-bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
-  // This is an internal consistency check, not part of the check that the
-  // chunk is in the free lists.
-  guarantee(fc->size() == size(), "Wrong list is being searched");
-  FreeChunk* curFC = head();
-  while (curFC) {
-    // This is an internal consistency check.
-    guarantee(size() == curFC->size(), "Chunk is in wrong list.");
-    if (fc == curFC) {
-      return true;
-    }
-    curFC = curFC->next();
-  }
-  return false;
-}
-
-#ifndef PRODUCT
-void FreeList::assert_proper_lock_protection_work() const {
-#ifdef ASSERT
-  if (_protecting_lock != NULL &&
-      SharedHeap::heap()->n_par_threads() > 0) {
-    // Should become an assert.
-    guarantee(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
-  }
-#endif
-}
-#endif
--- a/hotspot/src/share/vm/memory/freeList.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,304 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)freeList.hpp	1.31 07/05/05 17:05:48 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class CompactibleFreeListSpace;
-
-// A class for maintaining a free list of FreeChunk's.  The FreeList
-// maintains a the structure of the list (head, tail, etc.) plus
-// statistics for allocations from the list.  The links between items
-// are not part of FreeList.  The statistics are
-// used to make decisions about coalescing FreeChunk's when they
-// are swept during collection.
-//
-// See the corresponding .cpp file for a description of the specifics
-// for that implementation.
-
-class Mutex;
-
-class FreeList VALUE_OBJ_CLASS_SPEC {
-  friend class CompactibleFreeListSpace;
-  FreeChunk*	_head;		// List of free chunks
-  FreeChunk*	_tail;		// Tail of list of free chunks
-  size_t	_size;		// Size in Heap words of each chunks
-  ssize_t	_count;		// Number of entries in list
-  size_t        _hint;		// next larger size list with a positive surplus
-
-  AllocationStats _allocation_stats;		// statistics for smart allocation
-
-#ifdef ASSERT
-  Mutex*	_protecting_lock;
-#endif
-
-  // Asserts false if the protecting lock (if any) is not held.
-  void assert_proper_lock_protection_work() const PRODUCT_RETURN;
-  void assert_proper_lock_protection() const {
-#ifdef ASSERT
-    if (_protecting_lock != NULL)
-      assert_proper_lock_protection_work();
-#endif
-  }
-
-  // Initialize the allocation statistics.
- protected:
-  void init_statistics();
-  void set_count(ssize_t v) { _count = v;}
-  void increment_count() { _count++; }
-  void decrement_count() {
-    _count--;
-    assert(_count >= 0, "Count should not be negative"); }
-
- public:
-  // Constructor
-  // Construct a list without any entries.
-  FreeList();
-  // Construct a list with "fc" as the first (and lone) entry in the list.
-  FreeList(FreeChunk* fc);
-  // Construct a list which will have a FreeChunk at address "addr" and
-  // of size "size" as the first (and lone) entry in the list.
-  FreeList(HeapWord* addr, size_t size);
-
-  // Reset the head, tail, hint, and count of a free list.
-  void reset(size_t hint);
-
-  // Declare the current free list to be protected by the given lock.
-#ifdef ASSERT
-  void set_protecting_lock(Mutex* protecting_lock) {
-    _protecting_lock = protecting_lock;
-  }
-#endif
-
-  // Accessors.
-  FreeChunk* head() const {
-    assert_proper_lock_protection();
-    return _head;
-  }
-  void set_head(FreeChunk* v) { 
-    assert_proper_lock_protection();
-    _head = v; 
-    assert(!_head || _head->size() == _size, "bad chunk size"); 
-  }
-  // Set the head of the list and set the prev field of non-null
-  // values to NULL.
-  void link_head(FreeChunk* v) {
-    assert_proper_lock_protection();
-    set_head(v); 
-    // If this method is not used (just set the head instead),
-    // this check can be avoided.
-    if (v != NULL) {
-      v->linkPrev(NULL);
-    }
-  }
-
-  FreeChunk* tail() const {
-    assert_proper_lock_protection();
-    return _tail;
-  }
-  void set_tail(FreeChunk* v) { 
-    assert_proper_lock_protection();
-    _tail = v; 
-    assert(!_tail || _tail->size() == _size, "bad chunk size");
-  }
-  // Set the tail of the list and set the next field of non-null
-  // values to NULL.
-  void link_tail(FreeChunk* v) {
-    assert_proper_lock_protection();
-    set_tail(v); 
-    if (v != NULL) {
-      v->clearNext();
-    }
-  }
-
-  // No locking checks in read-accessors: lock-free reads (only) are benign.
-  // Readers are expected to have the lock if they are doing work that
-  // requires atomicity guarantees in sections of code.
-  size_t size() const {
-    return _size;
-  }
-  void set_size(size_t v) {
-    assert_proper_lock_protection();
-    _size = v;
-  }
-  ssize_t count() const {
-    return _count;
-  }
-  size_t hint() const {
-    return _hint;
-  }
-  void set_hint(size_t v) {
-    assert_proper_lock_protection();
-    assert(v == 0 || _size < v, "Bad hint"); _hint = v;
-  }
-
-  // Accessors for statistics
-  AllocationStats* allocation_stats() {
-    assert_proper_lock_protection();
-    return &_allocation_stats;
-  }
-
-  ssize_t desired() const {
-    return _allocation_stats.desired();
-  }
-  void compute_desired(float inter_sweep_current,
-                       float inter_sweep_estimate) {
-    assert_proper_lock_protection();
-    _allocation_stats.compute_desired(_count,
-                                      inter_sweep_current,
-                                      inter_sweep_estimate);
-  }
-  ssize_t coalDesired() const {
-    return _allocation_stats.coalDesired();
-  }
-  void set_coalDesired(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coalDesired(v);
-  }
-
-  ssize_t surplus() const {
-    return _allocation_stats.surplus();
-  }
-  void set_surplus(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_surplus(v);
-  }
-  void increment_surplus() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_surplus();
-  }
-  void decrement_surplus() {
-    assert_proper_lock_protection();
-    _allocation_stats.decrement_surplus();
-  }
-
-  ssize_t bfrSurp() const {
-    return _allocation_stats.bfrSurp();
-  }
-  void set_bfrSurp(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_bfrSurp(v);
-  }
-  ssize_t prevSweep() const {
-    return _allocation_stats.prevSweep();
-  }
-  void set_prevSweep(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_prevSweep(v);
-  }
-  ssize_t beforeSweep() const {
-    return _allocation_stats.beforeSweep();
-  }
-  void set_beforeSweep(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_beforeSweep(v);
-  }
-
-  ssize_t coalBirths() const {
-    return _allocation_stats.coalBirths();
-  }
-  void set_coalBirths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coalBirths(v);
-  }
-  void increment_coalBirths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_coalBirths();
-  }
-
-  ssize_t coalDeaths() const {
-    return _allocation_stats.coalDeaths();
-  }
-  void set_coalDeaths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coalDeaths(v);
-  }
-  void increment_coalDeaths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_coalDeaths();
-  }
-
-  ssize_t splitBirths() const {
-    return _allocation_stats.splitBirths();
-  }
-  void set_splitBirths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_splitBirths(v);
-  }
-  void increment_splitBirths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_splitBirths();
-  }
-
-  ssize_t splitDeaths() const {
-    return _allocation_stats.splitDeaths();
-  }
-  void set_splitDeaths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_splitDeaths(v);
-  }
-  void increment_splitDeaths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_splitDeaths();
-  }
-
-  NOT_PRODUCT(
-    // For debugging.  The "_returnedBytes" in all the lists are summed
-    // and compared with the total number of bytes swept during a 
-    // collection.
-    size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
-    void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
-    void increment_returnedBytes_by(size_t v) { 
-      _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v); 
-    }
-  )
-
-  // Unlink head of list and return it.  Returns NULL if
-  // the list is empty.
-  FreeChunk* getChunkAtHead();
-
-  // Remove the first "n" or "count", whichever is smaller, chunks from the 
-  // list, setting "fl", which is required to be empty, to point to them.
-  void getFirstNChunksFromList(size_t n, FreeList* fl);
-
-  // Unlink this chunk from it's free list
-  void removeChunk(FreeChunk* fc);
-
-  // Add this chunk to this free list.
-  void returnChunkAtHead(FreeChunk* fc);
-  void returnChunkAtTail(FreeChunk* fc);
-
-  // Similar to returnChunk* but also records some diagnostic
-  // information.
-  void returnChunkAtHead(FreeChunk* fc, bool record_return);
-  void returnChunkAtTail(FreeChunk* fc, bool record_return);
-
-  // Prepend "fl" (whose size is required to be the same as that of "this")
-  // to the front of "this" list.
-  void prepend(FreeList* fl);
-
-  // Verify that the chunk is in the list.
-  // found.  Return NULL if "fc" is not found.
-  bool verifyChunkInFreeLists(FreeChunk* fc) const;
-};
--- a/hotspot/src/share/vm/memory/gcLocker.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/gcLocker.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)gcLocker.cpp	1.51 07/05/05 17:05:48 JVM"
+#pragma ident "@(#)gcLocker.cpp	1.52 07/05/17 15:54:45 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -62,13 +62,8 @@
     // We're the last thread out. Cause a GC to occur.
     // GC will also check is_active, so this check is not
     // strictly needed. It's added here to make it clear that
-#ifdef JVMPI_SUPPORT
-    // the GC will NOT be performed if JVMPI (or any other caller
-    // of GC_locker::lock()) still needs GC locked.
-#else // !JVMPI_SUPPORT
     // the GC will NOT be performed if any other caller
     // of GC_locker::lock() still needs GC locked.
-#endif // JVMPI_SUPPORT
     if (!is_active()) {
       _doing_gc = true;
       {
--- a/hotspot/src/share/vm/memory/gcLocker.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/gcLocker.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)gcLocker.hpp	1.59 07/05/05 17:05:48 JVM"
+#pragma ident "@(#)gcLocker.hpp	1.60 07/05/17 15:54:47 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -109,16 +109,6 @@
   //
   // JNI critical regions are the only participants in this scheme
   // because they are, by spec, well bounded while in a critical region.
-#ifdef JVMPI_SUPPORT
-  // JVMPI is the other GC_locker user, and they are not necessarily
-  // well bounded, as they lock GC while interacting with an external
-  // agent.
-  //
-  // If excessive OutOfMemory errors occur during JVMPI execution
-  // because of the GC lock being held too long, it's probably more
-  // appropriate to simply increase the heap size, rather than attempt
-  // to block threads.
-#endif // JVMPI_SUPPORT
   //
   // Each of the following two method is split into a fast path and a slow
   // path. JNICritical_lock is only grabbed in the slow path.
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)genCollectedHeap.cpp	1.186 07/05/05 17:05:46 JVM"
+#pragma ident "@(#)genCollectedHeap.cpp	1.187 07/05/17 15:54:50 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -435,12 +435,7 @@
     }
   }
   
-#ifdef JVMPI_SUPPORT
-  { // Call to jvmpi::post_class_unload_events must occur outside of active GC
-#else // !JVMPI_SUPPORT
   {
-#endif // JVMPI_SUPPORT
-
     FlagSetting fl(_is_gc_active, true);
 
     bool complete = full && (max_level == (n_gens()-1));
@@ -624,14 +619,6 @@
     tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
     vm_exit(-1);
   }
-
-#ifdef JVMPI_SUPPORT
-  // Information about classes unloaded by GC may have been saved by
-  // SystemDictionary::follow_roots_phase2(). We post CLASS_UNLOAD
-  // events from here because we can lock GC. We always call this
-  // function to make sure that the saved memory is released.
-  jvmpi::post_class_unload_events();
-#endif // JVMPI_SUPPORT
 }
     
 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
@@ -1211,22 +1198,6 @@
   gclog_or_tty->print("]");
 }
 
-#ifdef JVMPI_SUPPORT
-int GenCollectedHeap::addr_to_arena_id(void* addr) {
-  int res = 1;
-  for (int i = 0; i < _n_gens; i++) {
-    int loc = _gens[i]->addr_to_arena_id(addr);
-    // Non-negative value means contained address; negative indicates
-    // how many arena id's were in the generation.
-    if (loc >= 0) return res + loc;
-    else res = res - loc;
-  }
-  if (perm_gen()->is_in_reserved(addr)) return res;
-  // Otherwise...
-  return 0;
-}
-#endif // JVMPI_SUPPORT
-
 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
  private:
   bool _full;
@@ -1240,13 +1211,6 @@
 void GenCollectedHeap::gc_prologue(bool full) {
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI notification
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_GC_START)) {
-    jvmpi::post_gc_start_event();
-  }
-#endif // JVMPI_SUPPORT
-  
   always_do_update_barrier = false;
   // Fill TLAB's and such
   CollectedHeap::accumulate_statistics_all_tlabs();
@@ -1282,13 +1246,6 @@
   // flag again if the condition persists despite the collection.
   clear_incremental_collection_will_fail();
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_GC_FINISH)) {
-    jvmpi::post_gc_finish_event(used(), capacity());
-  }
-  // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
-  // is set.
-#endif // JVMPI_SUPPORT
 #ifdef COMPILER2
   assert(DerivedPointerTable::is_empty(), "derived pointer present");
   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)genCollectedHeap.hpp	1.102 07/05/05 17:05:50 JVM"
+#pragma ident "@(#)genCollectedHeap.hpp	1.103 07/05/17 15:54:53 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -320,10 +320,6 @@
   void print_heap_change(size_t prev_used) const;
   void print_perm_heap_change(size_t perm_prev_used) const;
 
-#ifdef JVMPI_SUPPORT
-  int addr_to_arena_id(void* addr);
-#endif // JVMPI_SUPPORT
-
   // The functions below are helper functions that a subclass of
   // "CollectedHeap" can use in the implementation of its virtual
   // functions.
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)genMarkSweep.cpp	1.39 07/05/05 17:05:49 JVM"
+#pragma ident "@(#)genMarkSweep.cpp	1.40 07/05/17 15:54:55 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -66,13 +66,6 @@
 
   mark_sweep_phase1(level, clear_all_softrefs);
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_OBJECT_FREE)) {
-    JVMPI_Object_Free clo;
-    gch->object_iterate(&clo);
-  }
-#endif // JVMPI_SUPPORT
-
   mark_sweep_phase2();
 
   // Don't add any more derived pointers during phase3
--- a/hotspot/src/share/vm/memory/genOopClosures.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/genOopClosures.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)genOopClosures.hpp	1.62 07/05/05 17:05:50 JVM"
+#pragma ident "@(#)genOopClosures.hpp	1.63 07/05/17 15:54:57 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -157,12 +157,7 @@
   HeapWord* _boundary;
   void do_oop_work(oop* p,
 			  bool gc_barrier,
-#ifdef JVMPI_SUPPORT
-			  bool root_scan,
-			  bool jvmpi_slow_alloc);
-#else // !JVMPI_SUPPORT
 			  bool root_scan);
-#endif // JVMPI_SUPPORT
 
   void par_do_barrier(oop* p);
 
@@ -172,13 +167,8 @@
 
 class ParScanWithBarrierClosure: public ParScanClosure {
 public:
-#ifdef JVMPI_SUPPORT
-  void do_oop(oop* p)    { do_oop_work(p, true, false, false); }
-  void do_oop_nv(oop* p) { do_oop_work(p, true, false, false); }
-#else // !JVMPI_SUPPORT
   void do_oop(oop* p)    { do_oop_work(p, true, false); }
   void do_oop_nv(oop* p) { do_oop_work(p, true, false); }
-#endif // JVMPI_SUPPORT
   ParScanWithBarrierClosure(ParNewGeneration* g,
 			    ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
@@ -189,13 +179,8 @@
   ParScanWithoutBarrierClosure(ParNewGeneration* g,
 			       ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
-#ifdef JVMPI_SUPPORT
-  void do_oop(oop* p)    { do_oop_work(p, false, false, false); }
-  void do_oop_nv(oop* p) { do_oop_work(p, false, false, false); }
-#else // !JVMPI_SUPPORT
   void do_oop(oop* p)    { do_oop_work(p, false, false); }
   void do_oop_nv(oop* p) { do_oop_work(p, false, false); }
-#endif // JVMPI_SUPPORT
 };
 
 class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
@@ -203,11 +188,7 @@
   ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
 				       ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
-#ifdef JVMPI_SUPPORT
-  void do_oop(oop* p) { do_oop_work(p, true, true, false); }
-#else // !JVMPI_SUPPORT
   void do_oop(oop* p) { do_oop_work(p, true, true); }
-#endif // JVMPI_SUPPORT
 };
 
 class ParRootScanWithoutBarrierClosure: public ParScanClosure {
@@ -215,11 +196,7 @@
   ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
 				   ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
-#ifdef JVMPI_SUPPORT
-  void do_oop(oop* p) { do_oop_work(p, false, true, false); }
-#else // !JVMPI_SUPPORT
   void do_oop(oop* p) { do_oop_work(p, false, true); }
-#endif // JVMPI_SUPPORT
 };
 
 class ParScanWeakRefClosure: public ScanWeakRefClosure {
@@ -347,6 +324,7 @@
   CMSMarkStack*    _mark_stack;
   CMSMarkStack*    _revisit_stack;
   bool             _concurrent_precleaning;
+  bool     const   _should_remember_klasses;
  public:
   PushAndMarkClosure(CMSCollector* collector,
                      MemRegion span,
@@ -355,17 +333,7 @@
                      CMSBitMap* mod_union_table,
                      CMSMarkStack*  mark_stack,
                      CMSMarkStack*  revisit_stack,
-                     bool           concurrent_precleaning):
-    OopClosure(rp),
-    _collector(collector),
-    _span(span),
-    _bit_map(bit_map),
-    _mod_union_table(mod_union_table),
-    _mark_stack(mark_stack),
-    _revisit_stack(revisit_stack),
-    _concurrent_precleaning(concurrent_precleaning) {
-    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
-  }
+                     bool           concurrent_precleaning);
 
   void do_oop(oop* p);    
   void do_oop_nv(oop* p)  { PushAndMarkClosure::do_oop(p); }
@@ -374,7 +342,7 @@
     return Prefetch::do_read;
   }
   const bool should_remember_klasses() const {
-    return CMSClassUnloadingEnabled;
+    return _should_remember_klasses;
   }
   void remember_klass(Klass* k);
 };
@@ -391,21 +359,14 @@
   CMSBitMap*       _bit_map;
   OopTaskQueue*    _work_queue;
   CMSMarkStack*    _revisit_stack;
+  bool     const   _should_remember_klasses;
  public:
   Par_PushAndMarkClosure(CMSCollector* collector,
                          MemRegion span,
                          ReferenceProcessor* rp,
                          CMSBitMap* bit_map,
                          OopTaskQueue* work_queue,
-                         CMSMarkStack* revisit_stack):
-    OopClosure(rp),
-    _collector(collector),
-    _span(span),
-    _bit_map(bit_map),
-    _work_queue(work_queue),
-    _revisit_stack(revisit_stack) {
-    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
-  }
+                         CMSMarkStack* revisit_stack);
 
   void do_oop(oop* p);
   void do_oop_nv(oop* p)  { Par_PushAndMarkClosure::do_oop(p); }
@@ -414,7 +375,7 @@
     return Prefetch::do_read;
   }
   const bool should_remember_klasses() const {
-    return CMSClassUnloadingEnabled;
+    return _should_remember_klasses;
   }
   void remember_klass(Klass* k);
 };
@@ -496,6 +457,7 @@
   CMSMarkStack*    _revisitStack;
   HeapWord* const  _finger;
   MarkFromRootsClosure* const _parent;
+  bool                  const _should_remember_klasses;
  public:
   PushOrMarkClosure(CMSCollector* cms_collector,
                     MemRegion span,
@@ -507,7 +469,7 @@
   void do_oop(oop* p);
   void do_oop_nv(oop* p)  { PushOrMarkClosure::do_oop(p); }
   const bool should_remember_klasses() const {
-    return CMSClassUnloadingEnabled;
+    return _should_remember_klasses;
   }
   void remember_klass(Klass* k);
   // Deal with a stack overflow condition
@@ -531,6 +493,7 @@
   HeapWord*  const _finger;
   HeapWord** const _global_finger_addr;
   Par_MarkFromRootsClosure* const _parent;
+  bool       const _should_remember_klasses;
  public:
   Par_PushOrMarkClosure(CMSCollector* cms_collector,
                     MemRegion span,
@@ -544,7 +507,7 @@
   void do_oop(oop* p);
   void do_oop_nv(oop* p)  { Par_PushOrMarkClosure::do_oop(p); }
   const bool should_remember_klasses() const {
-    return CMSClassUnloadingEnabled;
+    return _should_remember_klasses;
   }
   void remember_klass(Klass* k);
   // Deal with a stack overflow condition
--- a/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)genOopClosures.inline.hpp	1.38 07/05/05 17:05:50 JVM"
+#pragma ident "@(#)genOopClosures.inline.hpp	1.39 07/05/17 15:54:59 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -137,12 +137,7 @@
     } else {
       size_t obj_sz = obj->size_given_klass(objK->klass_part()); 
       *p = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
-#ifdef JVMPI_SUPPORT
-                                                           obj, obj_sz, m,
-                                                           false/*jvmpi_slow_alloc*/);
-#else // !JVMPI_SUPPORT
                                                            obj, obj_sz, m);
-#endif // JVMPI_SUPPORT
     }
   }
 }
@@ -164,12 +159,7 @@
 
 inline void ParScanClosure::do_oop_work(oop* p,
                                         bool gc_barrier,
-#ifdef JVMPI_SUPPORT
-                                        bool root_scan,
-                                        bool jvmpi_slow_alloc) {
-#else // !JVMPI_SUPPORT
                                         bool root_scan) {
-#endif // JVMPI_SUPPORT
   oop obj = *p;
   assert((!Universe::heap()->is_in_reserved(p) ||
 	  generation()->is_in_reserved(p))
@@ -191,12 +181,7 @@
 	*p = ParNewGeneration::real_forwardee(obj);
       } else {
         size_t obj_sz = obj->size_given_klass(objK->klass_part()); 
-#ifdef JVMPI_SUPPORT
-        *p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m, 
-					jvmpi_slow_alloc);
-#else // !JVMPI_SUPPORT
         *p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
-#endif // JVMPI_SUPPORT
 	if (root_scan) {
 	  // This may have pushed an object.  If we have a root
 	  // category with a lot of roots, can't let the queue get too
--- a/hotspot/src/share/vm/memory/generation.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/generation.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)generation.hpp	1.194 07/05/05 17:05:51 JVM"
+#pragma ident "@(#)generation.hpp	1.195 07/05/17 15:55:02 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -586,13 +586,6 @@
   // Performance Counter support
   virtual void update_counters() = 0;
   virtual CollectorCounters* counters() { return _gc_counters; }
-
-#ifdef JVMPI_SUPPORT
-  // If the current generation contains "addr", return the 0-based offset
-  // of the "arena" within the generation that contains "addr".  Otherwise,
-  // return the negative of the number of arenas in the generation.
-  virtual int addr_to_arena_id(void* addr) { return -1; }
-#endif // JVMPI_SUPPORT
 };
 
 // Class CardGeneration is a generation that is covered by a card table,
--- a/hotspot/src/share/vm/memory/javaClasses.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2518 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)javaClasses.cpp	1.246 07/05/05 17:05:52 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_javaClasses.cpp.incl"
-
-// Helpful macro for computing field offsets at run time rather than hardcoding them
-#define COMPUTE_OFFSET(klass_name_as_C_str, dest_offset, klass_oop, name_symbol, signature_symbol) \
-{                                                                                                  \
-  fieldDescriptor fd;                                                                              \
-  instanceKlass* ik = instanceKlass::cast(klass_oop);                                              \
-  if (!ik->find_local_field(name_symbol, signature_symbol, &fd)) {                                 \
-    fatal("Invalid layout of " klass_name_as_C_str);                                               \
-  }                                                                                                \
-  dest_offset = fd.offset();                                                                       \
-}
-
-// Same as above but for "optional" offsets that might not be present in certain JDK versions
-#define COMPUTE_OPTIONAL_OFFSET(klass_name_as_C_str, dest_offset, klass_oop, name_symbol, signature_symbol) \
-{                                                                                                  \
-  fieldDescriptor fd;                                                                              \
-  instanceKlass* ik = instanceKlass::cast(klass_oop);                                              \
-  if (ik->find_local_field(name_symbol, signature_symbol, &fd)) {                                  \
-    dest_offset = fd.offset();                                                                     \
-  }                                                                                                \
-}
-
-Handle java_lang_String::basic_create(int length, bool tenured, TRAPS) {
-  // Create the String object first, so there's a chance that the String
-  // and the char array it points to end up in the same cache line.
-  oop obj;
-  if (tenured) {
-    obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_permanent_instance(CHECK_NH);
-  } else {
-    obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_instance(CHECK_NH);
-  }
-
-  // Create the char array.  The String object must be handlized here
-  // because GC can happen as a result of the allocation attempt.
-  Handle h_obj(THREAD, obj);
-  typeArrayOop buffer;
-  if (tenured) {
-    buffer = oopFactory::new_permanent_charArray(length, CHECK_NH);
-  } else {
-    buffer = oopFactory::new_charArray(length, CHECK_NH);
-  }
-
-  // Point the String at the char array
-  obj = h_obj();
-  set_value(obj, buffer);
-  // No need to zero the offset, allocation zero'ed the entire String object
-  assert(offset(obj) == 0, "initial String offset should be zero");
-//set_offset(obj, 0);
-  set_count(obj, length);
-
-  return h_obj;
-}
-
-Handle java_lang_String::basic_create_from_unicode(jchar* unicode, int length, bool tenured, TRAPS) {
-  Handle h_obj = basic_create(length, tenured, CHECK_NH);
-  typeArrayOop buffer = value(h_obj());
-  for (int index = 0; index < length; index++) {
-    buffer->char_at_put(index, unicode[index]);
-  }
-  return h_obj;
-}
-
-Handle java_lang_String::create_from_unicode(jchar* unicode, int length, TRAPS) {
-  return basic_create_from_unicode(unicode, length, false, CHECK_NH);
-}
-
-Handle java_lang_String::create_tenured_from_unicode(jchar* unicode, int length, TRAPS) {
-  return basic_create_from_unicode(unicode, length, true, CHECK_NH);
-}
-
-oop java_lang_String::create_oop_from_unicode(jchar* unicode, int length, TRAPS) {
-  Handle h_obj = basic_create_from_unicode(unicode, length, false, CHECK_0);
-  return h_obj();
-}
-
-Handle java_lang_String::create_from_str(const char* utf8_str, TRAPS) {
-  if (utf8_str == NULL) {
-    return Handle();
-  }
-  int length = UTF8::unicode_length(utf8_str);
-  Handle h_obj = basic_create(length, false, CHECK_NH);
-  if (length > 0) {
-    UTF8::convert_to_unicode(utf8_str, value(h_obj())->char_at_addr(0), length);
-  }
-  return h_obj;
-}
-
-oop java_lang_String::create_oop_from_str(const char* utf8_str, TRAPS) {
-  Handle h_obj = create_from_str(utf8_str, CHECK_0);
-  return h_obj();
-}
-
-Handle java_lang_String::create_from_symbol(symbolHandle symbol, TRAPS) {
-  int length = UTF8::unicode_length((char*)symbol->bytes(), symbol->utf8_length());
-  Handle h_obj = basic_create(length, false, CHECK_NH);
-  if (length > 0) {
-    UTF8::convert_to_unicode((char*)symbol->bytes(), value(h_obj())->char_at_addr(0), length);
-  }
-  return h_obj;
-}
-
-// Converts a C string to a Java String based on current encoding
-Handle java_lang_String::create_from_platform_dependent_str(const char* str, TRAPS) {
-  assert(str != NULL, "bad arguments");
-
-  typedef jstring (*to_java_string_fn_t)(JNIEnv*, const char *);
-  static to_java_string_fn_t _to_java_string_fn = NULL;
-
-  if (_to_java_string_fn == NULL) {
-    void *lib_handle = os::native_java_library();
-    _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, hpi::dll_lookup(lib_handle, "NewStringPlatform"));
-    if (_to_java_string_fn == NULL) {
-      fatal("NewStringPlatform missing");
-    }
-  }
-
-  jstring js = NULL;
-  { JavaThread* thread = (JavaThread*)THREAD;
-    assert(thread->is_Java_thread(), "must be java thread");
-    ThreadToNativeFromVM ttn(thread);
-    HandleMark hm(thread);    
-    js = (_to_java_string_fn)(thread->jni_environment(), str);
-  }
-  return Handle(THREAD, JNIHandles::resolve(js));
-}
-
-Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS) {
-  oop          obj    = java_string();
-  // Typical usage is to convert all '/' to '.' in string.
-  typeArrayOop value  = java_lang_String::value(obj);
-  int          offset = java_lang_String::offset(obj);
-  int          length = java_lang_String::length(obj);
-
-  // First check if any from_char exist
-  int index; // Declared outside, used later
-  for (index = 0; index < length; index++) {
-    if (value->char_at(index + offset) == from_char) {
-      break;
-    }
-  }
-  if (index == length) {
-    // No from_char, so do not copy.
-    return java_string;
-  }
-
-  // Create new UNICODE buffer. Must handlize value because GC
-  // may happen during String and char array creation.
-  typeArrayHandle h_value(THREAD, value);
-  Handle string = basic_create(length, false, CHECK_NH);
-
-  typeArrayOop from_buffer = h_value();
-  typeArrayOop to_buffer   = java_lang_String::value(string());
-
-  // Copy contents
-  for (index = 0; index < length; index++) {
-    jchar c = from_buffer->char_at(index + offset);
-    if (c == from_char) {
-      c = to_char;
-    }
-    to_buffer->char_at_put(index, c);
-  }  
-  return string;
-}
-
-jchar* java_lang_String::as_unicode_string(oop java_string, int& length) {
-  typeArrayOop value  = java_lang_String::value(java_string);
-  int          offset = java_lang_String::offset(java_string);
-               length = java_lang_String::length(java_string);
-
-  jchar* result = NEW_RESOURCE_ARRAY(jchar, length);
-  for (int index = 0; index < length; index++) {
-    result[index] = value->char_at(index + offset);
-  }
-  return result;
-}
-
-symbolHandle java_lang_String::as_symbol(Handle java_string, TRAPS) {
-  oop          obj    = java_string();
-  typeArrayOop value  = java_lang_String::value(obj);
-  int          offset = java_lang_String::offset(obj);
-  int          length = java_lang_String::length(obj);
-
-  ResourceMark rm(THREAD);
-  symbolHandle result;
-
-  if (length > 0) {
-    int utf8_length = UNICODE::utf8_length(value->char_at_addr(offset), length);
-    char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
-    UNICODE::convert_to_utf8(value->char_at_addr(offset), length, chars);
-    // Allocate the symbol
-    result = oopFactory::new_symbol_handle(chars, utf8_length, CHECK_(symbolHandle()));  
-  } else {
-    result = oopFactory::new_symbol_handle("", 0, CHECK_(symbolHandle()));  
-  }
-  return result;
-}
-
-int java_lang_String::utf8_length(oop java_string) {
-  typeArrayOop value  = java_lang_String::value(java_string);
-  int          offset = java_lang_String::offset(java_string);
-  int          length = java_lang_String::length(java_string);
-  jchar* position = (length == 0) ? NULL : value->char_at_addr(offset);
-  return UNICODE::utf8_length(position, length);
-}
-
-char* java_lang_String::as_utf8_string(oop java_string) {
-  typeArrayOop value  = java_lang_String::value(java_string);
-  int          offset = java_lang_String::offset(java_string);
-  int          length = java_lang_String::length(java_string);
-  jchar* position = (length == 0) ? NULL : value->char_at_addr(offset);
-  return UNICODE::as_utf8(position, length);
-}
-
-char* java_lang_String::as_utf8_string(oop java_string, int start, int len) {
-  typeArrayOop value  = java_lang_String::value(java_string);
-  int          offset = java_lang_String::offset(java_string);
-  int          length = java_lang_String::length(java_string);
-  assert(start + len <= length, "just checking");
-  jchar* position = value->char_at_addr(offset + start);
-  return UNICODE::as_utf8(position, len);
-}
-
-bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
-  assert(SharedSkipVerify ||
-         java_string->klass() == SystemDictionary::string_klass(),
-         "must be java_string");
-  typeArrayOop value  = java_lang_String::value(java_string);
-  int          offset = java_lang_String::offset(java_string);
-  int          length = java_lang_String::length(java_string);
-  if (length != len) {
-    return false;
-  }
-  for (int i = 0; i < len; i++) {
-    if (value->char_at(i + offset) != chars[i]) {
-      return false;
-    }
-  }
-  return true;
-}
-
-void java_lang_String::print(Handle java_string, outputStream* st) {
-  oop          obj    = java_string();
-  assert(obj->klass() == SystemDictionary::string_klass(), "must be java_string");
-  typeArrayOop value  = java_lang_String::value(obj);
-  int          offset = java_lang_String::offset(obj);
-  int          length = java_lang_String::length(obj);
-
-  int end = MIN2(length, 100); 
-  if (value == NULL) {
-    // This can happen if, e.g., printing a String
-    // object before its initializer has been called
-    st->print_cr("NULL");
-  } else {
-    st->print("\"");
-    for (int index = 0; index < length; index++) {
-      st->print("%c", value->char_at(index + offset));
-    }
-    st->print("\"");
-  }
-}
-
-
-oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
-  assert(k->java_mirror() == NULL, "should only assign mirror once");
-  // Use this moment of initialization to cache modifier_flags also,
-  // to support Class.getModifiers().  Instance classes recalculate
-  // the cached flags after the class file is parsed, but before the
-  // class is put into the system dictionary.
-  int computed_modifiers = k->compute_modifier_flags(CHECK_0);
-  k->set_modifier_flags(computed_modifiers);
-  if (SystemDictionary::class_klass_loaded()) {
-    // Allocate mirror (java.lang.Class instance)
-    Handle mirror = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
-    // Setup indirections
-    mirror->obj_field_put(klass_offset,  k());
-    k->set_java_mirror(mirror());
-    // It might also have a component mirror.  This mirror must already exist.
-    if (k->oop_is_javaArray()) {
-      Handle comp_mirror;
-      if (k->oop_is_typeArray()) {
-        BasicType type = typeArrayKlass::cast(k->as_klassOop())->element_type();
-        comp_mirror = SystemDictionary::java_mirror(type);
-        assert(comp_mirror.not_null(), "must have primitive mirror");
-      } else if (k->oop_is_objArray()) {
-        klassOop element_klass = objArrayKlass::cast(k->as_klassOop())->element_klass();
-        if (element_klass != NULL
-            && (Klass::cast(element_klass)->oop_is_instance() ||
-                Klass::cast(element_klass)->oop_is_javaArray())) {
-          comp_mirror = Klass::cast(element_klass)->java_mirror();
-          assert(comp_mirror.not_null(), "must have element mirror");
-        }
-        // else some object array internal to the VM, like systemObjArrayKlassObj
-      }
-      if (comp_mirror.not_null()) {
-        // Two-way link between the array klass and its component mirror:
-        arrayKlass::cast(k->as_klassOop())->set_component_mirror(comp_mirror());
-        set_array_klass(comp_mirror(), k->as_klassOop());
-      }
-    }
-    return mirror();
-  } else {
-    return NULL;
-  }
-}
-
-
-oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
-  // This should be improved by adding a field at the Java level or by
-  // introducing a new VM klass (see comment in ClassFileParser)
-  oop java_class = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
-  if (type != T_VOID) {
-    klassOop aklass = Universe::typeArrayKlassObj(type);
-    assert(aklass != NULL, "correct bootstrap");
-    set_array_klass(java_class, aklass);
-  }
-  return java_class;
-}
-
-
-klassOop java_lang_Class::as_klassOop(oop java_class) {
-  //%note memory_2
-  klassOop k = klassOop(java_class->obj_field(klass_offset));
-  assert(k == NULL || k->is_klass(), "type check");
-  return k;
-}
-
-
-klassOop java_lang_Class::array_klass(oop java_class) {
-  klassOop k = klassOop(java_class->obj_field(array_klass_offset));
-  assert(k == NULL || k->is_klass() && Klass::cast(k)->oop_is_javaArray(), "should be array klass");
-  return k;
-}
-
-
-void java_lang_Class::set_array_klass(oop java_class, klassOop klass) {
-  assert(klass->is_klass() && Klass::cast(klass)->oop_is_javaArray(), "should be array klass");
-  java_class->obj_field_put(array_klass_offset, klass);
-}
-
-
-methodOop java_lang_Class::resolved_constructor(oop java_class) {
-  oop constructor = java_class->obj_field(resolved_constructor_offset);
-  assert(constructor == NULL || constructor->is_method(), "should be method");
-  return methodOop(constructor);
-}
-
-
-void java_lang_Class::set_resolved_constructor(oop java_class, methodOop constructor) {
-  assert(constructor->is_method(), "should be method");
-  java_class->obj_field_put(resolved_constructor_offset, constructor);
-}
-
-
-bool java_lang_Class::is_primitive(oop java_class) {
-  klassOop k = klassOop(java_class->obj_field(klass_offset)); 
-  return k == NULL;
-}
-
-
-BasicType java_lang_Class::primitive_type(oop java_class) {
-  assert(java_lang_Class::is_primitive(java_class), "just checking");
-  klassOop ak = klassOop(java_class->obj_field(array_klass_offset));
-  BasicType type = T_VOID;
-  if (ak != NULL) {
-    // Note: create_basic_type_mirror above initializes ak to a non-null value.
-    type = arrayKlass::cast(ak)->element_type();
-  } else {
-    assert(java_class == SystemDictionary::void_mirror(), "only valid non-array primitive");
-  }
-  assert(SystemDictionary::java_mirror(type) == java_class, "must be consistent");
-  return type;
-}
-
-
-oop java_lang_Class::primitive_mirror(BasicType t) {
-  oop mirror = SystemDictionary::java_mirror(t);
-  assert(mirror != NULL && mirror->is_a(SystemDictionary::class_klass()), "must be a Class");
-  assert(java_lang_Class::is_primitive(mirror), "must be primitive");
-  return mirror;
-}
-
-bool java_lang_Class::offsets_computed = false;
-int  java_lang_Class::classRedefinedCount_offset = -1;
-
-void java_lang_Class::compute_offsets() {
-  assert(!offsets_computed, "offsets should be initialized only once");
-  offsets_computed = true;
-
-  klassOop k = SystemDictionary::class_klass();
-  // The classRedefinedCount field is only present starting in 1.5,
-  // so don't go fatal. 
-  COMPUTE_OPTIONAL_OFFSET("java.lang.Class", classRedefinedCount_offset,
-    k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
-}
-
-int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
-  if (!JDK_Version::is_gte_jdk15x_version()
-      || classRedefinedCount_offset == -1) {
-    // The classRedefinedCount field is only present starting in 1.5.
-    // If we don't have an offset for it then just return -1 as a marker.
-    return -1;
-  }
-
-  return the_class_mirror->int_field(classRedefinedCount_offset);
-}
-
-void java_lang_Class::set_classRedefinedCount(oop the_class_mirror, int value) {
-  if (!JDK_Version::is_gte_jdk15x_version()
-      || classRedefinedCount_offset == -1) {
-    // The classRedefinedCount field is only present starting in 1.5.
-    // If we don't have an offset for it then nothing to set.
-    return;
-  }
-
-  the_class_mirror->int_field_put(classRedefinedCount_offset, value);
-}
-
-
-// Note: JDK1.1 and before had a privateInfo_offset field which was used for the
-//       platform thread structure, and a eetop offset which was used for thread
-//       local storage (and unused by the HotSpot VM). In JDK1.2 the two structures 
-//       merged, so in the HotSpot VM we just use the eetop field for the thread 
-//       instead of the privateInfo_offset.
-//
-// Note: The stackSize field is only present starting in 1.4.
-
-int java_lang_Thread::_name_offset = 0;
-int java_lang_Thread::_group_offset = 0;
-int java_lang_Thread::_contextClassLoader_offset = 0;
-int java_lang_Thread::_inheritedAccessControlContext_offset = 0;
-int java_lang_Thread::_priority_offset = 0;
-int java_lang_Thread::_eetop_offset = 0;
-int java_lang_Thread::_daemon_offset = 0;
-int java_lang_Thread::_stillborn_offset = 0;
-int java_lang_Thread::_stackSize_offset = 0;
-int java_lang_Thread::_tid_offset = 0;
-int java_lang_Thread::_thread_status_offset = 0;
-int java_lang_Thread::_park_blocker_offset = 0;
-int java_lang_Thread::_park_event_offset = 0 ; 
-
-
-void java_lang_Thread::compute_offsets() {
-  assert(_group_offset == 0, "offsets should be initialized only once");
-
-  klassOop k = SystemDictionary::thread_klass();
-  COMPUTE_OFFSET("java.lang.Thread", _name_offset,      k, vmSymbols::name_name(),      vmSymbols::char_array_signature());
-  COMPUTE_OFFSET("java.lang.Thread", _group_offset,     k, vmSymbols::group_name(),     vmSymbols::threadgroup_signature());
-  COMPUTE_OFFSET("java.lang.Thread", _contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature());
-  COMPUTE_OFFSET("java.lang.Thread", _inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), vmSymbols::accesscontrolcontext_signature());
-  COMPUTE_OFFSET("java.lang.Thread", _priority_offset,  k, vmSymbols::priority_name(),  vmSymbols::int_signature());
-  COMPUTE_OFFSET("java.lang.Thread", _daemon_offset,    k, vmSymbols::daemon_name(),    vmSymbols::bool_signature());
-  COMPUTE_OFFSET("java.lang.Thread", _eetop_offset,     k, vmSymbols::eetop_name(),     vmSymbols::long_signature());
-  COMPUTE_OFFSET("java.lang.Thread", _stillborn_offset, k, vmSymbols::stillborn_name(), vmSymbols::bool_signature());
-  // The stackSize field is only present starting in 1.4, so don't go fatal. 
-  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _stackSize_offset, k, vmSymbols::stackSize_name(), vmSymbols::long_signature());
-  // The tid and thread_status fields are only present starting in 1.5, so don't go fatal. 
-  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _tid_offset, k, vmSymbols::thread_id_name(), vmSymbols::long_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _thread_status_offset, k, vmSymbols::thread_status_name(), vmSymbols::int_signature());
-  // The parkBlocker field is only present starting in 1.6, so don't go fatal. 
-  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_blocker_offset, k, vmSymbols::park_blocker_name(), vmSymbols::object_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.Thread", _park_event_offset, k, vmSymbols::park_event_name(),
- vmSymbols::long_signature());
-}
-
-
-JavaThread* java_lang_Thread::thread(oop java_thread) {
-  return (JavaThread*) java_thread->obj_field(_eetop_offset);
-}
-
-
-void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
-  // We are storing a JavaThread* (malloc'ed data) into a long field in the thread 
-  // object. The store has to be 64-bit wide so we use a pointer store, but we 
-  // cannot call oopDesc::obj_field_put since it includes a write barrier!
-  oop* addr = java_thread->obj_field_addr(_eetop_offset);
-  *addr = (oop) thread;
-}
-
-
-typeArrayOop java_lang_Thread::name(oop java_thread) {
-  oop name = java_thread->obj_field(_name_offset);  
-  assert(name == NULL || (name->is_typeArray() && typeArrayKlass::cast(name->klass())->element_type() == T_CHAR), "just checking");
-  return typeArrayOop(name);
-}
-
-
-void java_lang_Thread::set_name(oop java_thread, typeArrayOop name) {
-  assert(java_thread->obj_field(_name_offset) == NULL, "name should be NULL");
-  java_thread->obj_field_put(_name_offset, name);
-}
-
-
-ThreadPriority java_lang_Thread::priority(oop java_thread) {
-  return (ThreadPriority)java_thread->int_field(_priority_offset);
-}
-
-
-void java_lang_Thread::set_priority(oop java_thread, ThreadPriority priority) {
-  java_thread->int_field_put(_priority_offset, priority);
-}
-
-
-oop java_lang_Thread::threadGroup(oop java_thread) {
-  return java_thread->obj_field(_group_offset);
-}
-
-
-bool java_lang_Thread::is_stillborn(oop java_thread) {
-  return java_thread->bool_field(_stillborn_offset) != 0;
-}
-
-
-// We never have reason to turn the stillborn bit off
-void java_lang_Thread::set_stillborn(oop java_thread) {
-  java_thread->bool_field_put(_stillborn_offset, true);
-}
-
-
-bool java_lang_Thread::is_alive(oop java_thread) {
-  JavaThread* thr = java_lang_Thread::thread(java_thread);
-  return (thr != NULL);
-}
-
-
-bool java_lang_Thread::is_daemon(oop java_thread) {
-  return java_thread->bool_field(_daemon_offset) != 0;
-}
-
-
-void java_lang_Thread::set_daemon(oop java_thread) {
-  java_thread->bool_field_put(_daemon_offset, true);
-}
-
-oop java_lang_Thread::context_class_loader(oop java_thread) {
-  return java_thread->obj_field(_contextClassLoader_offset);
-}
-
-oop java_lang_Thread::inherited_access_control_context(oop java_thread) {
-  return java_thread->obj_field(_inheritedAccessControlContext_offset);
-}
-
-
-jlong java_lang_Thread::stackSize(oop java_thread) {
-  // The stackSize field is only present starting in 1.4
-  if (_stackSize_offset > 0) {
-    assert(JDK_Version::is_gte_jdk14x_version(), "sanity check");
-    return java_thread->long_field(_stackSize_offset);
-  } else {
-    return 0;
-  }
-}
-
-// Write the thread status value to threadStatus field in java.lang.Thread java class.
-void java_lang_Thread::set_thread_status(oop java_thread,
-                                         java_lang_Thread::ThreadStatus status) {
-  assert(JavaThread::current()->thread_state() == _thread_in_vm, "Java Thread is not running in vm");
-  // The threadStatus is only present starting in 1.5
-  if (_thread_status_offset > 0) {
-    java_thread->int_field_put(_thread_status_offset, status);
-  }
-}
-
-// Read thread status value from threadStatus field in java.lang.Thread java class.
-java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) {
-  assert(Thread::current()->is_VM_thread() ||
-         JavaThread::current()->thread_state() == _thread_in_vm,
-         "Java Thread is not running in vm");
-  // The threadStatus is only present starting in 1.5
-  if (_thread_status_offset > 0) {
-    return (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
-  } else {
-    // All we can easily figure out is if it is alive, but that is
-    // enough info for a valid unknown status.
-    // These aren't restricted to valid set ThreadStatus values, so
-    // use JVMTI values and cast.
-    JavaThread* thr = java_lang_Thread::thread(java_thread);
-    if (thr == NULL) {
-      // the thread hasn't run yet or is in the process of exiting
-      return NEW;
-    } 
-    return (java_lang_Thread::ThreadStatus)JVMTI_THREAD_STATE_ALIVE;
-  }
-}
-
-
-jlong java_lang_Thread::thread_id(oop java_thread) {
-  // The thread ID field is only present starting in 1.5
-  if (_tid_offset > 0) {
-    return java_thread->long_field(_tid_offset);
-  } else {
-    return 0;
-  }
-}
-
-oop java_lang_Thread::park_blocker(oop java_thread) {
-  assert(JDK_Version::supports_thread_park_blocker() && _park_blocker_offset != 0, 
-         "Must support parkBlocker field");
-
-  if (_park_blocker_offset > 0) {
-    return java_thread->obj_field(_park_blocker_offset);
-  }
-
-  return NULL;
-}
-
-jlong java_lang_Thread::park_event(oop java_thread) {
-  if (_park_event_offset > 0) {
-    return java_thread->long_field(_park_event_offset);
-  }
-  return 0;
-}
- 
-bool java_lang_Thread::set_park_event(oop java_thread, jlong ptr) {
-  if (_park_event_offset > 0) {
-    java_thread->long_field_put(_park_event_offset, ptr);
-    return true;
-  }
-  return false;
-}
-
-
-const char* java_lang_Thread::thread_status_name(oop java_thread) {
-  assert(JDK_Version::is_gte_jdk15x_version() && _thread_status_offset != 0, "Must have thread status");
-  ThreadStatus status = (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
-  switch (status) {
-    case NEW                      : return "NEW";
-    case RUNNABLE                 : return "RUNNABLE";
-    case SLEEPING                 : return "TIMED_WAITING (sleeping)";
-    case IN_OBJECT_WAIT           : return "WAITING (on object monitor)";
-    case IN_OBJECT_WAIT_TIMED     : return "TIMED_WAITING (on object monitor)";
-    case PARKED                   : return "WAITING (parking)";
-    case PARKED_TIMED             : return "TIMED_WAITING (parking)";
-    case BLOCKED_ON_MONITOR_ENTER : return "BLOCKED (on object monitor)";
-    case TERMINATED               : return "TERMINATED";
-    default                       : return "UNKNOWN";
-  };
-}
-int java_lang_ThreadGroup::_parent_offset = 0;
-int java_lang_ThreadGroup::_name_offset = 0;
-int java_lang_ThreadGroup::_threads_offset = 0;
-int java_lang_ThreadGroup::_groups_offset = 0;
-int java_lang_ThreadGroup::_maxPriority_offset = 0;
-int java_lang_ThreadGroup::_destroyed_offset = 0;
-int java_lang_ThreadGroup::_daemon_offset = 0;
-int java_lang_ThreadGroup::_vmAllowSuspension_offset = 0;
-int java_lang_ThreadGroup::_nthreads_offset = 0;
-int java_lang_ThreadGroup::_ngroups_offset = 0;
-
-oop  java_lang_ThreadGroup::parent(oop java_thread_group) {
-  assert(java_thread_group->is_oop(), "thread group must be oop");
-  return java_thread_group->obj_field(_parent_offset);
-}
-
-// ("name as oop" accessor is not necessary)
-
-typeArrayOop java_lang_ThreadGroup::name(oop java_thread_group) {
-  oop name = java_thread_group->obj_field(_name_offset);
-  // ThreadGroup.name can be null
-  return name == NULL ? (typeArrayOop)NULL : java_lang_String::value(name);
-}
-
-int java_lang_ThreadGroup::nthreads(oop java_thread_group) {
-  assert(java_thread_group->is_oop(), "thread group must be oop");
-  return java_thread_group->int_field(_nthreads_offset);
-}
-
-objArrayOop java_lang_ThreadGroup::threads(oop java_thread_group) {
-  oop threads = java_thread_group->obj_field(_threads_offset);
-  assert(threads != NULL, "threadgroups should have threads");
-  assert(threads->is_objArray(), "just checking"); // Todo: Add better type checking code
-  return objArrayOop(threads);
-}
-
-int java_lang_ThreadGroup::ngroups(oop java_thread_group) {
-  assert(java_thread_group->is_oop(), "thread group must be oop");
-  return java_thread_group->int_field(_ngroups_offset);
-}
-
-objArrayOop java_lang_ThreadGroup::groups(oop java_thread_group) {
-  oop groups = java_thread_group->obj_field(_groups_offset);
-  assert(groups == NULL || groups->is_objArray(), "just checking"); // Todo: Add better type checking code
-  return objArrayOop(groups);
-}
-
-ThreadPriority java_lang_ThreadGroup::maxPriority(oop java_thread_group) {
-  assert(java_thread_group->is_oop(), "thread group must be oop");
-  return (ThreadPriority) java_thread_group->int_field(_maxPriority_offset);
-}
-
-bool java_lang_ThreadGroup::is_destroyed(oop java_thread_group) {
-  assert(java_thread_group->is_oop(), "thread group must be oop");
-  return java_thread_group->bool_field(_destroyed_offset) != 0;
-}
-
-bool java_lang_ThreadGroup::is_daemon(oop java_thread_group) {
-  assert(java_thread_group->is_oop(), "thread group must be oop");
-  return java_thread_group->bool_field(_daemon_offset) != 0;
-}
-
-bool java_lang_ThreadGroup::is_vmAllowSuspension(oop java_thread_group) {
-  assert(java_thread_group->is_oop(), "thread group must be oop");
-  return java_thread_group->bool_field(_vmAllowSuspension_offset) != 0;
-}
-
-void java_lang_ThreadGroup::compute_offsets() {
-  assert(_parent_offset == 0, "offsets should be initialized only once");
-
-  klassOop k = SystemDictionary::threadGroup_klass();
-
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _parent_offset,      k, vmSymbols::parent_name(),      vmSymbols::threadgroup_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _name_offset,        k, vmSymbols::name_name(),        vmSymbols::string_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _threads_offset,     k, vmSymbols::threads_name(),     vmSymbols::thread_array_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _groups_offset,      k, vmSymbols::groups_name(),      vmSymbols::threadgroup_array_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _maxPriority_offset, k, vmSymbols::maxPriority_name(), vmSymbols::int_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _destroyed_offset,   k, vmSymbols::destroyed_name(),   vmSymbols::bool_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _daemon_offset,      k, vmSymbols::daemon_name(),      vmSymbols::bool_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _vmAllowSuspension_offset, k, vmSymbols::vmAllowSuspension_name(), vmSymbols::bool_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _nthreads_offset,    k, vmSymbols::nthreads_name(),    vmSymbols::int_signature());
-  COMPUTE_OFFSET("java.lang.ThreadGroup", _ngroups_offset,     k, vmSymbols::ngroups_name(),     vmSymbols::int_signature());
-}
-
-oop java_lang_Throwable::backtrace(oop throwable) {
-  return throwable->obj_field_acquire(backtrace_offset);
-}
-
-
-void java_lang_Throwable::set_backtrace(oop throwable, oop value) {
-  throwable->release_obj_field_put(backtrace_offset, value);
-}
-
-
-oop java_lang_Throwable::message(oop throwable) {
-  return throwable->obj_field(detailMessage_offset);
-}
-
-
-oop java_lang_Throwable::message(Handle throwable) {
-  return throwable->obj_field(detailMessage_offset);
-}
-
-
-void java_lang_Throwable::set_message(oop throwable, oop value) {
-  throwable->obj_field_put(detailMessage_offset, value);
-}
-
-
-void java_lang_Throwable::clear_stacktrace(oop throwable) {
-  assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
-  throwable->obj_field_put(stackTrace_offset, NULL);
-}
-
-
-void java_lang_Throwable::print(oop throwable, outputStream* st) {
-  ResourceMark rm;
-  klassOop k = throwable->klass();
-  assert(k != NULL, "just checking");
-  st->print("%s", instanceKlass::cast(k)->external_name());
-  oop msg = message(throwable);
-  if (msg != NULL) {
-    st->print(": %s", java_lang_String::as_utf8_string(msg));
-  }
-}
-
-
-void java_lang_Throwable::print(Handle throwable, outputStream* st) {
-  ResourceMark rm;
-  klassOop k = throwable->klass();
-  assert(k != NULL, "just checking");
-  st->print("%s", instanceKlass::cast(k)->external_name());
-  oop msg = message(throwable);
-  if (msg != NULL) {
-    st->print(": %s", java_lang_String::as_utf8_string(msg));
-  }
-}
-
-// Print stack trace element to resource allocated buffer
-char* java_lang_Throwable::print_stack_element_to_buffer(methodOop method, int bci) { 
-  // Get strings and string lengths
-  instanceKlass* klass = instanceKlass::cast(method->method_holder());
-  const char* klass_name  = klass->external_name();
-  int buf_len = (int)strlen(klass_name);
-  char* source_file_name;
-  if (klass->source_file_name() == NULL) {
-    source_file_name = NULL;
-  } else {
-    source_file_name = klass->source_file_name()->as_C_string();
-    buf_len += (int)strlen(source_file_name);
-  }
-  char* method_name = method->name()->as_C_string();
-  buf_len += (int)strlen(method_name);
-
-  // Allocate temporary buffer with extra space for formatting and line number
-  char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
-
-  // Print stack trace line in buffer
-  sprintf(buf, "\tat %s.%s", klass_name, method_name);
-  if (method->is_native()) {
-    strcat(buf, "(Native Method)");
-  } else {    
-    int line_number = method->line_number_from_bci(bci);
-    if (source_file_name != NULL && (line_number != -1)) {
-      // Sourcename and linenumber
-      sprintf(buf + (int)strlen(buf), "(%s:%d)", source_file_name, line_number);
-    } else if (source_file_name != NULL) {
-      // Just sourcename
-      sprintf(buf + (int)strlen(buf), "(%s)", source_file_name);      
-    } else {
-      // Neither soucename and linenumber
-      sprintf(buf + (int)strlen(buf), "(Unknown Source)");
-    }
-    nmethod* nm = method->code();
-    if (WizardMode && nm != NULL) {
-      sprintf(buf + (int)strlen(buf), "(nmethod %#x)", nm);
-    }
-  }
-
-  return buf;
-}
-
-
-void java_lang_Throwable::print_stack_element(Handle stream, methodOop method, int bci) {  
-  ResourceMark rm;
-  char* buf = print_stack_element_to_buffer(method, bci);
-  print_to_stream(stream, buf);
-}
-
-void java_lang_Throwable::print_stack_element(outputStream *st, methodOop method, int bci) {  
-  ResourceMark rm;
-  char* buf = print_stack_element_to_buffer(method, bci);
-  st->print_cr("%s", buf);
-}
-
-void java_lang_Throwable::print_to_stream(Handle stream, const char* str) {
-  if (stream.is_null()) {
-    tty->print_cr("%s", str);
-  } else {
-    EXCEPTION_MARK;
-    JavaValue result(T_VOID);
-    Handle arg (THREAD, oopFactory::new_charArray(str, THREAD));
-    if (!HAS_PENDING_EXCEPTION) {
-      JavaCalls::call_virtual(&result, 
-                              stream, 
-                              KlassHandle(THREAD, stream->klass()),
-                              vmSymbolHandles::println_name(), 
-                              vmSymbolHandles::char_array_void_signature(), 
-                              arg, 
-                              THREAD);
-    }
-    // Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
-    if (HAS_PENDING_EXCEPTION) CLEAR_PENDING_EXCEPTION;
-  }
-
-}
-
-
-const char* java_lang_Throwable::no_stack_trace_message() {
-  return "\t<<no stack trace available>>";
-}
-
-
-// Currently used only for exceptions occurring during startup
-void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
-  Thread *THREAD = Thread::current();
-  Handle h_throwable(THREAD, throwable);
-  while (h_throwable.not_null()) {
-    objArrayHandle result (THREAD, objArrayOop(backtrace(h_throwable())));
-    if (result.is_null()) {
-      st->print_cr(no_stack_trace_message());
-      return;
-    }
-  
-    while (result.not_null()) {
-      objArrayHandle methods (THREAD,
-                              objArrayOop(result->obj_at(trace_methods_offset)));
-      typeArrayHandle bcis (THREAD, 
-                            typeArrayOop(result->obj_at(trace_bcis_offset)));
-
-      if (methods.is_null() || bcis.is_null()) {
-        st->print_cr(no_stack_trace_message());
-        return;
-      }
-
-      int length = methods()->length();
-      for (int index = 0; index < length; index++) {
-        methodOop method = methodOop(methods()->obj_at(index));
-        if (method == NULL) goto handle_cause;
-        int bci = bcis->ushort_at(index);
-        print_stack_element(st, method, bci);
-      }
-      result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
-    }
-  handle_cause:
-    {
-      EXCEPTION_MARK;
-      JavaValue result(T_OBJECT);
-      JavaCalls::call_virtual(&result,
-                              h_throwable,
-                              KlassHandle(THREAD, h_throwable->klass()),
-                              vmSymbolHandles::getCause_name(),
-                              vmSymbolHandles::void_throwable_signature(),
-                              THREAD);
-      // Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
-      if (HAS_PENDING_EXCEPTION) {
-        CLEAR_PENDING_EXCEPTION;
-        h_throwable = Handle();
-      } else {
-        h_throwable = Handle(THREAD, (oop) result.get_jobject());
-        if (h_throwable.not_null()) {
-          st->print("Caused by: ");
-          print(h_throwable, st); 
-          st->cr();
-        }
-      }
-    }
-  }
-}
-
-
-void java_lang_Throwable::print_stack_trace(oop throwable, oop print_stream) {
-  // Note: this is no longer used in Merlin, but we support it for compatibility.
-  Thread *thread = Thread::current();
-  Handle stream(thread, print_stream);
-  objArrayHandle result (thread, objArrayOop(backtrace(throwable)));
-  if (result.is_null()) {
-    print_to_stream(stream, no_stack_trace_message());
-    return;
-  }
-  
-  while (result.not_null()) {
-    objArrayHandle methods (thread,
-                            objArrayOop(result->obj_at(trace_methods_offset)));
-    typeArrayHandle bcis (thread, 
-                          typeArrayOop(result->obj_at(trace_bcis_offset)));
-
-    if (methods.is_null() || bcis.is_null()) {
-      print_to_stream(stream, no_stack_trace_message());
-      return;
-    }
-
-    int length = methods()->length();
-    for (int index = 0; index < length; index++) {
-      methodOop method = methodOop(methods()->obj_at(index));
-      if (method == NULL) return;
-      int bci = bcis->ushort_at(index);
-      print_stack_element(stream, method, bci);
-    }
-    result = objArrayHandle(thread, objArrayOop(result->obj_at(trace_next_offset)));
-  }
-}
-
-// This class provides a simple wrapper over the internal structure of
-// exception backtrace to insulate users of the backtrace from needing
-// to know what it looks like.
-class BacktraceBuilder: public StackObj {
- private:
-  Handle          _backtrace;
-  objArrayOop     _head;
-  objArrayOop     _methods;
-  typeArrayOop    _bcis;
-  int             _index;
-  bool            _dirty;
-  bool            _done;
-  No_Safepoint_Verifier _nsv;
-
- public:
-
-  enum {
-    trace_methods_offset = java_lang_Throwable::trace_methods_offset,
-    trace_bcis_offset    = java_lang_Throwable::trace_bcis_offset,
-    trace_next_offset    = java_lang_Throwable::trace_next_offset,
-    trace_size           = java_lang_Throwable::trace_size,
-    trace_chunk_size     = java_lang_Throwable::trace_chunk_size
-  };
-
-  // constructor for new backtrace
-  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) {
-    expand(CHECK);
-    _backtrace = _head;
-    _index = 0;
-    _dirty = false;
-    _done = false;
-  }
-
-  void flush() {
-    if (_dirty && _methods != NULL) {
-      BarrierSet* bs = Universe::heap()->barrier_set();
-      assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-      bs->write_ref_array(MemRegion((HeapWord*)_methods->obj_at_addr(0),
-                                    _methods->length() * HeapWordsPerOop));
-      _dirty = false;
-    }
-  }
-
-  void expand(TRAPS) {
-    flush();
-
-    objArrayHandle old_head(THREAD, _head);
-    Pause_No_Safepoint_Verifier pnsv(&_nsv);
-
-    objArrayOop head = oopFactory::new_objectArray(trace_size, CHECK);
-    objArrayHandle new_head(THREAD, head);
-
-    objArrayOop methods = oopFactory::new_objectArray(trace_chunk_size, CHECK);
-    objArrayHandle new_methods(THREAD, methods);
-
-    typeArrayOop bcis = oopFactory::new_shortArray(trace_chunk_size, CHECK);
-    typeArrayHandle new_bcis(THREAD, bcis);
-
-    if (!old_head.is_null()) {
-      old_head->obj_at_put(trace_next_offset, new_head());
-    }
-    new_head->obj_at_put(trace_methods_offset, new_methods());
-    new_head->obj_at_put(trace_bcis_offset, new_bcis());
-
-    _head    = new_head();
-    _methods = new_methods();
-    _bcis    = new_bcis();
-    _index = 0;
-  }
-
-  oop backtrace() {
-    flush();
-    return _backtrace();
-  }
-
-  inline void push(methodOop method, short bci, TRAPS) {
-    if (_index >= trace_chunk_size) {
-      methodHandle mhandle(THREAD, method);
-      expand(CHECK);
-      method = mhandle();
-    }
-
-    // _methods->obj_at_put(_index, method);
-    *_methods->obj_at_addr(_index) = method;
-    _bcis->ushort_at_put(_index, bci);
-    _index++;
-    _dirty = true;
-  }
-
-  methodOop current_method() {
-    assert(_index >= 0 && _index < trace_chunk_size, "out of range");
-    return methodOop(_methods->obj_at(_index));
-  }
-
-  jushort current_bci() {
-    assert(_index >= 0 && _index < trace_chunk_size, "out of range");
-    return _bcis->ushort_at(_index);
-  }
-};
-
-
-void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) {
-  if (!StackTraceInThrowable) return;
-  ResourceMark rm(THREAD);
-
-  // Start out by clearing the backtrace for this object, in case the VM
-  // runs out of memory while allocating the stack trace
-  set_backtrace(throwable(), NULL);
-  if (JDK_Version::is_gte_jdk14x_version()) {
-    // New since 1.4, clear lazily constructed Java level stacktrace if
-    // refilling occurs
-    clear_stacktrace(throwable());
-  }
-
-  int max_depth = MaxJavaStackTraceDepth;
-  JavaThread* thread = (JavaThread*)THREAD;
-  BacktraceBuilder bt(CHECK);
-
-  // Instead of using vframe directly, this version of fill_in_stack_trace 
-  // basically handles everything by hand. This significantly improved the 
-  // speed of this method call up to 28.5% on Solaris sparc. 27.1% on Windows.  
-  // See bug 6333838 for  more details.
-  // The "ASSERT" here is to verify this method generates the exactly same stack
-  // trace as utilizing vframe.
-#ifdef ASSERT 
-  vframeStream st(thread);
-  methodHandle st_method(THREAD, st.method());
-#endif
-  int total_count = 0;
-  RegisterMap map(thread, false);
-  int decode_offset = 0;
-  nmethod* nm = NULL;
-  bool skip_fillInStackTrace_check = false;
-  bool skip_throwableInit_check = false;
- 
-  for (frame fr = thread->last_frame(); max_depth != total_count;) {
-    methodOop method = NULL;
-    int bci = 0;
-    
-    // Compiled java method case.
-    if (decode_offset != 0) {
-      DebugInfoReadStream stream(nm, decode_offset);
-      decode_offset = stream.read_int();
-      method = (methodOop)nm->oop_at(stream.read_int());
-      bci = stream.read_bci();
-    } else {
-      if (fr.is_first_frame()) break;      
-      address pc = fr.pc();
-      if (AbstractInterpreter::contains(pc)) {
-        intptr_t bcx = fr.interpreter_frame_bcx();
-        method = fr.interpreter_frame_method();
-        bci =  fr.is_bci(bcx) ? bcx : method->bci_from((address)bcx);
-        fr = fr.sender(&map);
-      } else {
-        CodeBlob* cb = fr.cb();
-        // HMMM QQQ might be nice to have frame return nm as NULL if cb is non-NULL
-        // but non nmethod
-        fr = fr.sender(&map);
-        if (cb == NULL || !cb->is_nmethod()) {
-          continue;
-        }
-        nm = (nmethod*)cb;
-        if (nm->method()->is_native()) {
-          method = nm->method();
-          bci = 0;
-        } else {
-          PcDesc* pd = nm->pc_desc_at(pc);
-          decode_offset = pd->scope_decode_offset();
-          // if decode_offset is not equal to 0, it will execute the 
-          // "compiled java method case" at the beginning of the loop.
-          continue;
-        }
-      }
-    }  
-#ifdef ASSERT
-  assert(st_method() == method && st.bci() == bci,
-         "Wrong stack trace");
-  st.next();
-  // vframeStream::method isn't GC-safe so store off a copy
-  // of the methodOop in case we GC.
-  if (!st.at_end()) {
-    st_method = st.method();
-  }
-#endif
-    if (!skip_fillInStackTrace_check) {
-      // check "fillInStackTrace" only once, so we negate the flag
-      // after the first time check.
-      skip_fillInStackTrace_check = true;
-      if (method->name() == vmSymbols::fillInStackTrace_name()) {
-        continue;
-      }
-    }
-    // skip <init> methods of the exceptions klass. If there is <init> methods
-    // that belongs to a superclass of the exception  we are going to skipping
-    // them in stack trace. This is simlar to classic VM.
-    if (!skip_throwableInit_check) {
-      if (method->name() == vmSymbols::object_initializer_name() &&  
-          throwable->is_a(method->method_holder())) {
-        continue;
-      } else {
-        // if no "Throwable.init()" method found, we stop checking it next time.
-        skip_throwableInit_check = true;
-      }
-    }
-    bt.push(method, bci, CHECK);
-    total_count++;
-  }
-
-  // Put completed stack trace into throwable object
-  set_backtrace(throwable(), bt.backtrace());
-}
-
-void java_lang_Throwable::fill_in_stack_trace(Handle throwable) {
-  // No-op if stack trace is disabled
-  if (!StackTraceInThrowable) {
-    return;
-  }
- 
-  // Disable stack traces for some preallocated out of memory errors
-  if (!Universe::should_fill_in_stack_trace(throwable)) {
-    return;
-  }
- 
-  PRESERVE_EXCEPTION_MARK;
- 
-  JavaThread* thread = JavaThread::active();
-  fill_in_stack_trace(throwable, thread);
-  // ignore exceptions thrown during stack trace filling
-  CLEAR_PENDING_EXCEPTION;  
-}
-
-void java_lang_Throwable::allocate_backtrace(Handle throwable, TRAPS) {
-  // Allocate stack trace - backtrace is created but not filled in
-
-  // No-op if stack trace is disabled 
-  if (!StackTraceInThrowable) return;
-
-  objArrayOop h_oop = oopFactory::new_objectArray(trace_size, CHECK);
-  objArrayHandle backtrace  (THREAD, h_oop);
-  objArrayOop m_oop = oopFactory::new_objectArray(trace_chunk_size, CHECK);
-  objArrayHandle methods (THREAD, m_oop);
-  typeArrayOop b = oopFactory::new_shortArray(trace_chunk_size, CHECK);
-  typeArrayHandle bcis(THREAD, b);
-  
-  // backtrace has space for one chunk (next is NULL)
-  backtrace->obj_at_put(trace_methods_offset, methods());
-  backtrace->obj_at_put(trace_bcis_offset, bcis());
-  set_backtrace(throwable(), backtrace());
-}
-
-
-void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle throwable) {
-  // Fill in stack trace into preallocated backtrace (no GC)
-
-  // No-op if stack trace is disabled
-  if (!StackTraceInThrowable) return;
-
-  assert(throwable->is_a(SystemDictionary::throwable_klass()), "sanity check");
-
-  oop backtrace = java_lang_Throwable::backtrace(throwable());
-  assert(backtrace != NULL, "backtrace not preallocated");
-
-  oop m = objArrayOop(backtrace)->obj_at(trace_methods_offset);
-  objArrayOop methods = objArrayOop(m);
-  assert(methods != NULL && methods->length() > 0, "method array not preallocated");
-  
-  oop b = objArrayOop(backtrace)->obj_at(trace_bcis_offset);
-  typeArrayOop bcis = typeArrayOop(b);
-  assert(bcis != NULL, "bci array not preallocated");
-
-  assert(methods->length() == bcis->length(), "method and bci arrays should match");
-
-  JavaThread* thread = JavaThread::current();
-  ResourceMark rm(thread);
-  vframeStream st(thread); 
-
-  // Unlike fill_in_stack_trace we do not skip fillInStackTrace or throwable init 
-  // methods as preallocated errors aren't created by "java" code. 
-
-  // fill in as much stack trace as possible
-  int max_chunks = MIN2(methods->length(), (int)MaxJavaStackTraceDepth);
-  int chunk_count = 0;
-
-  for (;!st.at_end(); st.next()) {    
-    // add element
-    bcis->ushort_at_put(chunk_count, st.bci());
-    methods->obj_at_put(chunk_count, st.method());
-
-    chunk_count++;
-
-    // Bail-out for deep stacks
-    if (chunk_count >= max_chunks) break;
-  }
-}
-
-
-int java_lang_Throwable::get_stack_trace_depth(oop throwable, TRAPS) {
-  if (throwable == NULL) {
-    THROW_0(vmSymbols::java_lang_NullPointerException());
-  }
-  objArrayOop chunk = objArrayOop(backtrace(throwable));
-  int depth = 0;
-  if (chunk != NULL) {
-    // Iterate over chunks and count full ones
-    while (true) {
-      objArrayOop next = objArrayOop(chunk->obj_at(trace_next_offset));
-      if (next == NULL) break;
-      depth += trace_chunk_size;
-      chunk = next;
-    }
-    assert(chunk != NULL && chunk->obj_at(trace_next_offset) == NULL, "sanity check");
-    // Count element in remaining partial chunk
-    objArrayOop methods = objArrayOop(chunk->obj_at(trace_methods_offset));
-    typeArrayOop bcis = typeArrayOop(chunk->obj_at(trace_bcis_offset));
-    assert(methods != NULL && bcis != NULL, "sanity check");
-    for (int i = 0; i < methods->length(); i++) {
-      if (methods->obj_at(i) == NULL) break;
-      depth++;
-    }
-  }
-  return depth;
-}
-
-
-oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS) {
-  if (throwable == NULL) {
-    THROW_0(vmSymbols::java_lang_NullPointerException());
-  }
-  if (index < 0) {
-    THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
-  }
-  // Compute how many chunks to skip and index into actual chunk
-  objArrayOop chunk = objArrayOop(backtrace(throwable));
-  int skip_chunks = index / trace_chunk_size;
-  int chunk_index = index % trace_chunk_size;
-  while (chunk != NULL && skip_chunks > 0) {
-    chunk = objArrayOop(chunk->obj_at(trace_next_offset));
-	skip_chunks--;
-  }
-  if (chunk == NULL) {
-    THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
-  }
-  // Get method,bci from chunk
-  objArrayOop methods = objArrayOop(chunk->obj_at(trace_methods_offset));
-  typeArrayOop bcis = typeArrayOop(chunk->obj_at(trace_bcis_offset));
-  assert(methods != NULL && bcis != NULL, "sanity check");
-  methodHandle method(THREAD, methodOop(methods->obj_at(chunk_index)));
-  int bci = bcis->ushort_at(chunk_index);
-  // Chunk can be partial full
-  if (method.is_null()) {
-    THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
-  }
-
-  oop element = java_lang_StackTraceElement::create(method, bci, CHECK_0);
-  return element;
-}
-
-oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
-  // SystemDictionary::stackTraceElement_klass() will be null for pre-1.4 JDKs
-  assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
-
-  // Allocate java.lang.StackTraceElement instance
-  klassOop k = SystemDictionary::stackTraceElement_klass();
-  instanceKlassHandle ik (THREAD, k);
-  if (ik->should_be_initialized()) {
-    ik->initialize(CHECK_0);
-  }
-
-  Handle element = ik->allocate_instance_handle(CHECK_0);
-  // Fill in class name
-  ResourceMark rm(THREAD);
-  const char* str = instanceKlass::cast(method->method_holder())->external_name();
-  oop classname = StringTable::intern((char*) str, CHECK_0);
-  java_lang_StackTraceElement::set_declaringClass(element(), classname);
-  // Fill in method name
-  oop methodname = StringTable::intern(method->name(), CHECK_0);
-  java_lang_StackTraceElement::set_methodName(element(), methodname);
-  // Fill in source file name
-  symbolOop source = instanceKlass::cast(method->method_holder())->source_file_name();
-  oop filename = StringTable::intern(source, CHECK_0);
-  java_lang_StackTraceElement::set_fileName(element(), filename);
-  // File in source line number
-  int line_number;
-  if (method->is_native()) {
-    // Negative value different from -1 below, enabling Java code in 
-    // class java.lang.StackTraceElement to distinguish "native" from
-    // "no LineNumberTable".
-    line_number = -2;
-  } else {
-    // Returns -1 if no LineNumberTable, and otherwise actual line number
-    line_number = method->line_number_from_bci(bci);
-  }
-  java_lang_StackTraceElement::set_lineNumber(element(), line_number);
-
-  return element();
-}
-
-
-void java_lang_reflect_AccessibleObject::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_accessible_object_klass();
-  COMPUTE_OFFSET("java.lang.reflect.AccessibleObject", override_offset, k, vmSymbols::override_name(), vmSymbols::bool_signature());
-}
-
-jboolean java_lang_reflect_AccessibleObject::override(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return (jboolean) reflect->bool_field(override_offset);
-}
-
-void java_lang_reflect_AccessibleObject::set_override(oop reflect, jboolean value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  reflect->bool_field_put(override_offset, (int) value);
-}
-
-void java_lang_reflect_Method::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_method_klass();
-  COMPUTE_OFFSET("java.lang.reflect.Method", clazz_offset,          k, vmSymbols::clazz_name(),          vmSymbols::class_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Method", name_offset,           k, vmSymbols::name_name(),           vmSymbols::string_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Method", returnType_offset,     k, vmSymbols::returnType_name(),     vmSymbols::class_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Method", parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Method", exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Method", slot_offset,           k, vmSymbols::slot_name(),           vmSymbols::int_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Method", modifiers_offset,      k, vmSymbols::modifiers_name(),      vmSymbols::int_signature());
-  // The generic signature and annotations fields are only present in 1.5
-  signature_offset = -1;
-  annotations_offset = -1;
-  parameter_annotations_offset = -1;
-  annotation_default_offset = -1;
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", signature_offset,             k, vmSymbols::signature_name(),             vmSymbols::string_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", annotations_offset,           k, vmSymbols::annotations_name(),           vmSymbols::byte_array_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Method", annotation_default_offset,    k, vmSymbols::annotation_default_name(),    vmSymbols::byte_array_signature());
-}
-
-Handle java_lang_reflect_Method::create(TRAPS) {  
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  klassOop klass = SystemDictionary::reflect_method_klass();
-  // This class is eagerly initialized during VM initialization, since we keep a refence
-  // to one of the methods
-  assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized");  
-  return instanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH);
-}
-
-oop java_lang_reflect_Method::clazz(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return reflect->obj_field(clazz_offset);
-}
-
-void java_lang_reflect_Method::set_clazz(oop reflect, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-   reflect->obj_field_put(clazz_offset, value);
-}
-
-int java_lang_reflect_Method::slot(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return reflect->int_field(slot_offset);
-}
-
-void java_lang_reflect_Method::set_slot(oop reflect, int value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  reflect->int_field_put(slot_offset, value);
-}
-
-oop java_lang_reflect_Method::name(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return method->obj_field(name_offset);
-}
-
-void java_lang_reflect_Method::set_name(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  method->obj_field_put(name_offset, value);
-}
-
-oop java_lang_reflect_Method::return_type(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return method->obj_field(returnType_offset);
-}
-
-void java_lang_reflect_Method::set_return_type(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  method->obj_field_put(returnType_offset, value);
-}
-
-oop java_lang_reflect_Method::parameter_types(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return method->obj_field(parameterTypes_offset);
-}
-
-void java_lang_reflect_Method::set_parameter_types(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  method->obj_field_put(parameterTypes_offset, value);
-}
-
-oop java_lang_reflect_Method::exception_types(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return method->obj_field(exceptionTypes_offset);
-}
-
-void java_lang_reflect_Method::set_exception_types(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  method->obj_field_put(exceptionTypes_offset, value);
-}
-
-int java_lang_reflect_Method::modifiers(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return method->int_field(modifiers_offset);
-}
-
-void java_lang_reflect_Method::set_modifiers(oop method, int value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  method->int_field_put(modifiers_offset, value);
-}
-
-bool java_lang_reflect_Method::has_signature_field() {
-  return (signature_offset >= 0);
-}
-
-oop java_lang_reflect_Method::signature(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_signature_field(), "signature field must be present");
-  return method->obj_field(signature_offset);
-}
-
-void java_lang_reflect_Method::set_signature(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_signature_field(), "signature field must be present");
-  method->obj_field_put(signature_offset, value);
-}
-
-bool java_lang_reflect_Method::has_annotations_field() {
-  return (annotations_offset >= 0);
-}
-
-oop java_lang_reflect_Method::annotations(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotations_field(), "annotations field must be present");
-  return method->obj_field(annotations_offset);
-}
-
-void java_lang_reflect_Method::set_annotations(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotations_field(), "annotations field must be present");
-  method->obj_field_put(annotations_offset, value);
-}
-
-bool java_lang_reflect_Method::has_parameter_annotations_field() {
-  return (parameter_annotations_offset >= 0);
-}
-
-oop java_lang_reflect_Method::parameter_annotations(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
-  return method->obj_field(parameter_annotations_offset);
-}
-
-void java_lang_reflect_Method::set_parameter_annotations(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
-  method->obj_field_put(parameter_annotations_offset, value);
-}
-
-bool java_lang_reflect_Method::has_annotation_default_field() {
-  return (annotation_default_offset >= 0);
-}
-
-oop java_lang_reflect_Method::annotation_default(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotation_default_field(), "annotation default field must be present");
-  return method->obj_field(annotation_default_offset);
-}
-
-void java_lang_reflect_Method::set_annotation_default(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotation_default_field(), "annotation default field must be present");
-  method->obj_field_put(annotation_default_offset, value);
-}
-
-void java_lang_reflect_Constructor::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_constructor_klass();
-  COMPUTE_OFFSET("java.lang.reflect.Constructor", clazz_offset,          k, vmSymbols::clazz_name(),          vmSymbols::class_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Constructor", parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Constructor", exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Constructor", slot_offset,           k, vmSymbols::slot_name(),           vmSymbols::int_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Constructor", modifiers_offset,      k, vmSymbols::modifiers_name(),      vmSymbols::int_signature());
-  // The generic signature and annotations fields are only present in 1.5
-  signature_offset = -1;
-  annotations_offset = -1;
-  parameter_annotations_offset = -1;
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", signature_offset,             k, vmSymbols::signature_name(),             vmSymbols::string_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", annotations_offset,           k, vmSymbols::annotations_name(),           vmSymbols::byte_array_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Constructor", parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
-}
-
-Handle java_lang_reflect_Constructor::create(TRAPS) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  symbolHandle name = vmSymbolHandles::java_lang_reflect_Constructor();
-  klassOop k = SystemDictionary::resolve_or_fail(name, true, CHECK_NH);
-  instanceKlassHandle klass (THREAD, k);
-  // Ensure it is initialized
-  klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
-}
-
-oop java_lang_reflect_Constructor::clazz(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return reflect->obj_field(clazz_offset);
-}
-
-void java_lang_reflect_Constructor::set_clazz(oop reflect, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-   reflect->obj_field_put(clazz_offset, value);
-}
-
-oop java_lang_reflect_Constructor::parameter_types(oop constructor) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return constructor->obj_field(parameterTypes_offset);
-}
-
-void java_lang_reflect_Constructor::set_parameter_types(oop constructor, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  constructor->obj_field_put(parameterTypes_offset, value);
-}
-
-oop java_lang_reflect_Constructor::exception_types(oop constructor) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return constructor->obj_field(exceptionTypes_offset);
-}
-
-void java_lang_reflect_Constructor::set_exception_types(oop constructor, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  constructor->obj_field_put(exceptionTypes_offset, value);
-}
-
-int java_lang_reflect_Constructor::slot(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return reflect->int_field(slot_offset);
-}
-
-void java_lang_reflect_Constructor::set_slot(oop reflect, int value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  reflect->int_field_put(slot_offset, value);
-}
-
-int java_lang_reflect_Constructor::modifiers(oop constructor) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return constructor->int_field(modifiers_offset);
-}
-
-void java_lang_reflect_Constructor::set_modifiers(oop constructor, int value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  constructor->int_field_put(modifiers_offset, value);
-}
-
-bool java_lang_reflect_Constructor::has_signature_field() {
-  return (signature_offset >= 0);
-}
-
-oop java_lang_reflect_Constructor::signature(oop constructor) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_signature_field(), "signature field must be present");
-  return constructor->obj_field(signature_offset);
-}
-
-void java_lang_reflect_Constructor::set_signature(oop constructor, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_signature_field(), "signature field must be present");
-  constructor->obj_field_put(signature_offset, value);
-}
-
-bool java_lang_reflect_Constructor::has_annotations_field() {
-  return (annotations_offset >= 0);
-}
-
-oop java_lang_reflect_Constructor::annotations(oop constructor) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotations_field(), "annotations field must be present");
-  return constructor->obj_field(annotations_offset);
-}
-
-void java_lang_reflect_Constructor::set_annotations(oop constructor, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotations_field(), "annotations field must be present");
-  constructor->obj_field_put(annotations_offset, value);
-}
-
-bool java_lang_reflect_Constructor::has_parameter_annotations_field() {
-  return (parameter_annotations_offset >= 0);
-}
-
-oop java_lang_reflect_Constructor::parameter_annotations(oop method) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
-  return method->obj_field(parameter_annotations_offset);
-}
-
-void java_lang_reflect_Constructor::set_parameter_annotations(oop method, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_parameter_annotations_field(), "parameter annotations field must be present");
-  method->obj_field_put(parameter_annotations_offset, value);
-}
-
-void java_lang_reflect_Field::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_field_klass();
-  COMPUTE_OFFSET("java.lang.reflect.Field", clazz_offset,     k, vmSymbols::clazz_name(),     vmSymbols::class_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Field", name_offset,      k, vmSymbols::name_name(),      vmSymbols::string_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Field", type_offset,      k, vmSymbols::type_name(),      vmSymbols::class_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Field", slot_offset,      k, vmSymbols::slot_name(),      vmSymbols::int_signature());
-  COMPUTE_OFFSET("java.lang.reflect.Field", modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature());
-  // The generic signature and annotations fields are only present in 1.5
-  signature_offset = -1;
-  annotations_offset = -1;
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Field", signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature());
-  COMPUTE_OPTIONAL_OFFSET("java.lang.reflect.Field", annotations_offset,  k, vmSymbols::annotations_name(),  vmSymbols::byte_array_signature());
-}
-
-Handle java_lang_reflect_Field::create(TRAPS) {  
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  symbolHandle name = vmSymbolHandles::java_lang_reflect_Field();
-  klassOop k = SystemDictionary::resolve_or_fail(name, true, CHECK_NH);
-  instanceKlassHandle klass (THREAD, k);
-  // Ensure it is initialized
-  klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
-}
-
-oop java_lang_reflect_Field::clazz(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return reflect->obj_field(clazz_offset);
-}
-
-void java_lang_reflect_Field::set_clazz(oop reflect, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-   reflect->obj_field_put(clazz_offset, value);
-}
-
-oop java_lang_reflect_Field::name(oop field) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return field->obj_field(name_offset);
-}
-
-void java_lang_reflect_Field::set_name(oop field, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  field->obj_field_put(name_offset, value);
-}
-
-oop java_lang_reflect_Field::type(oop field) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return field->obj_field(type_offset);
-}
-
-void java_lang_reflect_Field::set_type(oop field, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  field->obj_field_put(type_offset, value);
-}
-
-int java_lang_reflect_Field::slot(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return reflect->int_field(slot_offset);
-}
-
-void java_lang_reflect_Field::set_slot(oop reflect, int value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  reflect->int_field_put(slot_offset, value);
-}
-
-int java_lang_reflect_Field::modifiers(oop field) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return field->int_field(modifiers_offset);
-}
-
-void java_lang_reflect_Field::set_modifiers(oop field, int value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  field->int_field_put(modifiers_offset, value);
-}
-
-bool java_lang_reflect_Field::has_signature_field() {
-  return (signature_offset >= 0);
-}
-
-oop java_lang_reflect_Field::signature(oop field) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_signature_field(), "signature field must be present");
-  return field->obj_field(signature_offset);
-}
-
-void java_lang_reflect_Field::set_signature(oop field, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_signature_field(), "signature field must be present");
-  field->obj_field_put(signature_offset, value);
-}
-
-bool java_lang_reflect_Field::has_annotations_field() {
-  return (annotations_offset >= 0);
-}
-
-oop java_lang_reflect_Field::annotations(oop field) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotations_field(), "annotations field must be present");
-  return field->obj_field(annotations_offset);
-}
-
-void java_lang_reflect_Field::set_annotations(oop field, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  assert(has_annotations_field(), "annotations field must be present");
-  field->obj_field_put(annotations_offset, value);
-}
-
-
-void sun_reflect_ConstantPool::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_constant_pool_klass();
-  // This null test can be removed post beta
-  if (k != NULL) {
-    COMPUTE_OFFSET("sun.reflect.ConstantPool", _cp_oop_offset, k, vmSymbols::constantPoolOop_name(), vmSymbols::object_signature());
-  }
-}
-
-
-Handle sun_reflect_ConstantPool::create(TRAPS) {  
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  klassOop k = SystemDictionary::reflect_constant_pool_klass();
-  instanceKlassHandle klass (THREAD, k);
-  // Ensure it is initialized
-  klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
-}
-
-
-oop sun_reflect_ConstantPool::cp_oop(oop reflect) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  return reflect->obj_field(_cp_oop_offset);
-}
-
-
-void sun_reflect_ConstantPool::set_cp_oop(oop reflect, oop value) {
-  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
-  reflect->obj_field_put(_cp_oop_offset, value);
-}
-
-void sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() {
-  klassOop k = SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass();
-  // This null test can be removed post beta
-  if (k != NULL) {
-    COMPUTE_OFFSET("sun.reflect.UnsafeStaticFieldAccessorImpl", _base_offset, k,
-                   vmSymbols::base_name(), vmSymbols::object_signature());
-  }
-}
-
-oop java_lang_boxing_object::initialize_and_allocate(klassOop k, TRAPS) {
- instanceKlassHandle h (THREAD, k);
- if (!h->is_initialized()) h->initialize(CHECK_0);
- return h->allocate_instance(THREAD);
-}
-
-
-oop java_lang_boxing_object::create(BasicType type, jvalue* value, TRAPS) {
-  oop box;
-  switch (type) {
-    case T_BOOLEAN:
-      box = initialize_and_allocate(SystemDictionary::boolean_klass(), CHECK_0);
-      box->bool_field_put(value_offset, value->z);
-      break;
-    case T_CHAR:
-      box = initialize_and_allocate(SystemDictionary::char_klass(), CHECK_0);
-      box->char_field_put(value_offset, value->c);
-      break;
-    case T_FLOAT:
-      box = initialize_and_allocate(SystemDictionary::float_klass(), CHECK_0);
-      box->float_field_put(value_offset, value->f);
-      break;
-    case T_DOUBLE:
-      box = initialize_and_allocate(SystemDictionary::double_klass(), CHECK_0);
-      box->double_field_put(value_offset, value->d);
-      break;
-    case T_BYTE:
-      box = initialize_and_allocate(SystemDictionary::byte_klass(), CHECK_0);
-      box->byte_field_put(value_offset, value->b);
-      break;
-    case T_SHORT:
-      box = initialize_and_allocate(SystemDictionary::short_klass(), CHECK_0);
-      box->short_field_put(value_offset, value->s);
-      break;
-    case T_INT:
-      box = initialize_and_allocate(SystemDictionary::int_klass(), CHECK_0);
-      box->int_field_put(value_offset, value->i);
-      break;
-    case T_LONG:
-      box = initialize_and_allocate(SystemDictionary::long_klass(), CHECK_0);
-      box->long_field_put(value_offset, value->j);
-      break;
-    default:
-      return NULL;
-  }
-  return box;
-}
-
-
-BasicType java_lang_boxing_object::get_value(oop box, jvalue* value) {
-  klassOop k = box->klass();
-  if (k == SystemDictionary::boolean_klass()) {
-    value->z = box->bool_field(value_offset);
-    return T_BOOLEAN;
-  }
-  if (k == SystemDictionary::char_klass()) {
-    value->c = box->char_field(value_offset);
-    return T_CHAR;
-  }
-  if (k == SystemDictionary::float_klass()) {
-    value->f = box->float_field(value_offset);
-    return T_FLOAT;
-  }
-  if (k == SystemDictionary::double_klass()) {
-    value->d = box->double_field(value_offset);
-    return T_DOUBLE;
-  }
-  if (k == SystemDictionary::byte_klass()) {
-    value->b = box->byte_field(value_offset);
-    return T_BYTE;
-  }
-  if (k == SystemDictionary::short_klass()) {
-    value->s = box->short_field(value_offset);
-    return T_SHORT;
-  }
-  if (k == SystemDictionary::int_klass()) {
-    value->i = box->int_field(value_offset);
-    return T_INT;
-  }
-  if (k == SystemDictionary::long_klass()) {
-    value->j = box->long_field(value_offset);
-    return T_LONG;
-  }
-  return T_ILLEGAL;
-}
-
-
-BasicType java_lang_boxing_object::set_value(oop box, jvalue* value) {
-  klassOop k = box->klass();
-  if (k == SystemDictionary::boolean_klass()) {
-    box->bool_field_put(value_offset, value->z);
-    return T_BOOLEAN;
-  }
-  if (k == SystemDictionary::char_klass()) {
-    box->char_field_put(value_offset, value->c);
-    return T_CHAR;
-  }
-  if (k == SystemDictionary::float_klass()) {
-    box->float_field_put(value_offset, value->f);
-    return T_FLOAT;
-  }
-  if (k == SystemDictionary::double_klass()) {
-    box->double_field_put(value_offset, value->d);
-    return T_DOUBLE;
-  }
-  if (k == SystemDictionary::byte_klass()) {
-    box->byte_field_put(value_offset, value->b);
-    return T_BYTE;
-  }
-  if (k == SystemDictionary::short_klass()) {
-    box->short_field_put(value_offset, value->s);
-    return T_SHORT;
-  }
-  if (k == SystemDictionary::int_klass()) {
-    box->int_field_put(value_offset, value->i);
-    return T_INT;
-  }
-  if (k == SystemDictionary::long_klass()) {
-    box->long_field_put(value_offset, value->j);
-    return T_LONG;
-  }
-  return T_ILLEGAL;
-}
-
-
-// Support for java_lang_ref_Reference
-
-void java_lang_ref_Reference::set_referent(oop ref, oop value) {
-  ref->obj_field_put(referent_offset, value);
-}
-
-oop* java_lang_ref_Reference::referent_addr(oop ref) {
-  return ref->obj_field_addr(referent_offset);
-}
-
-void java_lang_ref_Reference::set_next(oop ref, oop value) {
-  ref->obj_field_put(next_offset, value);
-}
-
-oop* java_lang_ref_Reference::next_addr(oop ref) {
-  return ref->obj_field_addr(next_offset);
-}
-
-void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
-  ref->obj_field_put(discovered_offset, value);
-}
-
-oop* java_lang_ref_Reference::discovered_addr(oop ref) {
-  return ref->obj_field_addr(discovered_offset);
-}
-
-oop* java_lang_ref_Reference::pending_list_lock_addr() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
-  return (oop*)(((char *)ik->start_of_static_fields()) + static_lock_offset);
-}
-
-oop* java_lang_ref_Reference::pending_list_addr() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
-  return (oop *)(((char *)ik->start_of_static_fields()) + static_pending_offset);
-}
-
-
-// Support for java_lang_ref_SoftReference
-
-jlong java_lang_ref_SoftReference::timestamp(oop ref) {
-  return ref->long_field(timestamp_offset);
-}
-
-jlong java_lang_ref_SoftReference::clock() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
-  int offset = ik->offset_of_static_fields() + static_clock_offset;
-
-  return SystemDictionary::soft_reference_klass()->long_field(offset);
-}
-
-void java_lang_ref_SoftReference::set_clock(jlong value) {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
-  int offset = ik->offset_of_static_fields() + static_clock_offset;
-
-  SystemDictionary::soft_reference_klass()->long_field_put(offset, value);
-}
-
-
-// Support for java_security_AccessControlContext
-
-int java_security_AccessControlContext::_context_offset = 0;
-int java_security_AccessControlContext::_privilegedContext_offset = 0;
-int java_security_AccessControlContext::_isPrivileged_offset = 0;
-
-
-void java_security_AccessControlContext::compute_offsets() {
-  assert(_isPrivileged_offset == 0, "offsets should be initialized only once");
-  fieldDescriptor fd;
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::AccessControlContext_klass());
-
-  if (!ik->find_local_field(vmSymbols::context_name(), vmSymbols::protectiondomain_signature(), &fd)) {
-    fatal("Invalid layout of java.security.AccessControlContext");
-  }
-  _context_offset = fd.offset();
-
-  if (!ik->find_local_field(vmSymbols::privilegedContext_name(), vmSymbols::accesscontrolcontext_signature(), &fd)) {
-    fatal("Invalid layout of java.security.AccessControlContext");
-  }
-  _privilegedContext_offset = fd.offset();
-
-  if (!ik->find_local_field(vmSymbols::isPrivileged_name(), vmSymbols::bool_signature(), &fd)) {
-    fatal("Invalid layout of java.security.AccessControlContext");
-  }
-  _isPrivileged_offset = fd.offset();
-}
-
-
-oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) {  
-  assert(_isPrivileged_offset != 0, "offsets should have been initialized");
-  // Ensure klass is initialized
-  instanceKlass::cast(SystemDictionary::AccessControlContext_klass())->initialize(CHECK_0);
-  // Allocate result
-  oop result = instanceKlass::cast(SystemDictionary::AccessControlContext_klass())->allocate_instance(CHECK_0);
-  // Fill in values
-  result->obj_field_put(_context_offset, context());
-  result->obj_field_put(_privilegedContext_offset, privileged_context());
-  result->bool_field_put(_isPrivileged_offset, isPrivileged);
-  return result;
-}
-
-
-// Support for java_lang_ClassLoader
-
-oop java_lang_ClassLoader::parent(oop loader) {
-  assert(loader->is_oop(), "loader must be oop");
-  return loader->obj_field(parent_offset);
-}
-
-
-bool java_lang_ClassLoader::is_trusted_loader(oop loader) {
-  // Fix for 4474172; see evaluation for more details
-  loader = non_reflection_class_loader(loader);
-
-  oop cl = SystemDictionary::java_system_loader();
-  while(cl != NULL) {
-    if (cl == loader) return true;
-    cl = parent(cl);
-  }
-  return false;
-}
-
-oop java_lang_ClassLoader::non_reflection_class_loader(oop loader) {
-  if (loader != NULL) {
-    // See whether this is one of the class loaders associated with
-    // the generated bytecodes for reflection, and if so, "magically"
-    // delegate to its parent to prevent class loading from occurring
-    // in places where applications using reflection didn't expect it.
-    klassOop delegating_cl_class = SystemDictionary::reflect_delegating_classloader_klass();
-    // This might be null in non-1.4 JDKs
-    if (delegating_cl_class != NULL && loader->is_a(delegating_cl_class)) {
-      return parent(loader);
-    }
-  }
-  return loader;
-}
-
-
-// Support for java_lang_System
-
-void java_lang_System::compute_offsets() {
-  assert(offset_of_static_fields == 0, "offsets should be initialized only once");
-
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::system_klass());
-  offset_of_static_fields = ik->offset_of_static_fields();
-}
-
-int java_lang_System::in_offset_in_bytes() {
-  return (offset_of_static_fields + static_in_offset);
-}
-
-
-int java_lang_System::out_offset_in_bytes() {
-  return (offset_of_static_fields + static_out_offset);
-}
-
-
-int java_lang_System::err_offset_in_bytes() {
-  return (offset_of_static_fields + static_err_offset);
-}
-
-
-
-int java_lang_String::value_offset;
-int java_lang_String::offset_offset;
-int java_lang_String::count_offset;
-int java_lang_String::hash_offset;
-int java_lang_Class::klass_offset;
-int java_lang_Class::array_klass_offset;
-int java_lang_Class::resolved_constructor_offset;
-int java_lang_Class::number_of_fake_oop_fields;
-int java_lang_Throwable::backtrace_offset;
-int java_lang_Throwable::detailMessage_offset;
-int java_lang_Throwable::cause_offset;
-int java_lang_Throwable::stackTrace_offset;
-int java_lang_reflect_AccessibleObject::override_offset;
-int java_lang_reflect_Method::clazz_offset;
-int java_lang_reflect_Method::name_offset;
-int java_lang_reflect_Method::returnType_offset;
-int java_lang_reflect_Method::parameterTypes_offset;
-int java_lang_reflect_Method::exceptionTypes_offset;
-int java_lang_reflect_Method::slot_offset;
-int java_lang_reflect_Method::modifiers_offset;
-int java_lang_reflect_Method::signature_offset;
-int java_lang_reflect_Method::annotations_offset;
-int java_lang_reflect_Method::parameter_annotations_offset;
-int java_lang_reflect_Method::annotation_default_offset;
-int java_lang_reflect_Constructor::clazz_offset;
-int java_lang_reflect_Constructor::parameterTypes_offset;
-int java_lang_reflect_Constructor::exceptionTypes_offset;
-int java_lang_reflect_Constructor::slot_offset;
-int java_lang_reflect_Constructor::modifiers_offset;
-int java_lang_reflect_Constructor::signature_offset;
-int java_lang_reflect_Constructor::annotations_offset;
-int java_lang_reflect_Constructor::parameter_annotations_offset;
-int java_lang_reflect_Field::clazz_offset;
-int java_lang_reflect_Field::name_offset;
-int java_lang_reflect_Field::type_offset;
-int java_lang_reflect_Field::slot_offset;
-int java_lang_reflect_Field::modifiers_offset;
-int java_lang_reflect_Field::signature_offset;
-int java_lang_reflect_Field::annotations_offset;
-int java_lang_boxing_object::value_offset;
-int java_lang_ref_Reference::referent_offset;
-int java_lang_ref_Reference::queue_offset;
-int java_lang_ref_Reference::next_offset;
-int java_lang_ref_Reference::discovered_offset;
-int java_lang_ref_Reference::static_lock_offset;
-int java_lang_ref_Reference::static_pending_offset;
-int java_lang_ref_Reference::number_of_fake_oop_fields;
-int java_lang_ref_SoftReference::timestamp_offset;
-int java_lang_ref_SoftReference::static_clock_offset;
-int java_lang_ClassLoader::parent_offset;
-int java_lang_System::offset_of_static_fields;
-int java_lang_System::static_in_offset;
-int java_lang_System::static_out_offset;
-int java_lang_System::static_err_offset;
-int java_lang_StackTraceElement::declaringClass_offset;
-int java_lang_StackTraceElement::methodName_offset;
-int java_lang_StackTraceElement::fileName_offset;
-int java_lang_StackTraceElement::lineNumber_offset;
-int java_lang_AssertionStatusDirectives::classes_offset;
-int java_lang_AssertionStatusDirectives::classEnabled_offset;
-int java_lang_AssertionStatusDirectives::packages_offset;
-int java_lang_AssertionStatusDirectives::packageEnabled_offset;
-int java_lang_AssertionStatusDirectives::deflt_offset;
-int java_nio_Buffer::_limit_offset;
-int sun_misc_AtomicLongCSImpl::_value_offset;
-int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
-int sun_reflect_ConstantPool::_cp_oop_offset;
-int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
-
-
-// Support for java_lang_StackTraceElement
-
-void java_lang_StackTraceElement::set_fileName(oop element, oop value) {
-  element->obj_field_put(fileName_offset, value);
-}
-
-void java_lang_StackTraceElement::set_declaringClass(oop element, oop value) {
-  element->obj_field_put(declaringClass_offset, value);
-}
-
-void java_lang_StackTraceElement::set_methodName(oop element, oop value) {
-  element->obj_field_put(methodName_offset, value);
-}
-
-void java_lang_StackTraceElement::set_lineNumber(oop element, int value) {
-  element->int_field_put(lineNumber_offset, value);
-}
-  
-  
-// Support for java Assertions - java_lang_AssertionStatusDirectives.
-
-void java_lang_AssertionStatusDirectives::set_classes(oop o, oop val) {
-  o->obj_field_put(classes_offset, val);
-}
-
-void java_lang_AssertionStatusDirectives::set_classEnabled(oop o, oop val) {
-  o->obj_field_put(classEnabled_offset, val);
-}
-
-void java_lang_AssertionStatusDirectives::set_packages(oop o, oop val) {
-  o->obj_field_put(packages_offset, val);
-}
-
-void java_lang_AssertionStatusDirectives::set_packageEnabled(oop o, oop val) {
-  o->obj_field_put(packageEnabled_offset, val);
-}
-
-void java_lang_AssertionStatusDirectives::set_deflt(oop o, bool val) {
-  o->bool_field_put(deflt_offset, val);
-}
-
-
-// Support for intrinsification of java.nio.Buffer.checkIndex
-int java_nio_Buffer::limit_offset() {
-  return _limit_offset;
-}
-
-
-void java_nio_Buffer::compute_offsets() {
-  klassOop k = SystemDictionary::java_nio_Buffer_klass();
-  COMPUTE_OFFSET("java.nio.Buffer", _limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
-}
-
-// Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate
-int sun_misc_AtomicLongCSImpl::value_offset() {
-  assert(SystemDictionary::sun_misc_AtomicLongCSImpl_klass() != NULL, "can't call this");
-  return _value_offset;
-}
-
-
-void sun_misc_AtomicLongCSImpl::compute_offsets() {
-  klassOop k = SystemDictionary::sun_misc_AtomicLongCSImpl_klass();
-  // If this class is not present, its value field offset won't be referenced.
-  if (k != NULL) {
-    COMPUTE_OFFSET("sun.misc.AtomicLongCSImpl", _value_offset, k, vmSymbols::sun_misc_AtomicLongCSImpl_value_name(), vmSymbols::long_signature());
-  }
-}
-
-void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
-  if (_owner_offset != 0) return;
-
-  assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later");
-  SystemDictionary::load_abstract_ownable_synchronizer_klass(CHECK);
-  klassOop k = SystemDictionary::abstract_ownable_synchronizer_klass();
-  COMPUTE_OFFSET("java.util.concurrent.locks.AbstractOwnableSynchronizer", _owner_offset, k, 
-                 vmSymbols::exclusive_owner_thread_name(), vmSymbols::thread_signature());
-}
-
-oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(oop obj) {
-  assert(_owner_offset != 0, "Must be initialized");
-  return obj->obj_field(_owner_offset);
-}
-
-// Compute hard-coded offsets
-// Invoked before SystemDictionary::initialize, so pre-loaded classes
-// are not available to determine the offset_of_static_fields.
-void JavaClasses::compute_hard_coded_offsets() {
-  const int x = wordSize;  			
-  const int header = instanceOopDesc::header_size_in_bytes();
-
-  // Do the String Class
-  java_lang_String::value_offset  = java_lang_String::hc_value_offset  * x + header;
-  java_lang_String::offset_offset = java_lang_String::hc_offset_offset * x + header;
-  java_lang_String::count_offset  = java_lang_String::offset_offset + sizeof (jint);
-  java_lang_String::hash_offset   = java_lang_String::count_offset + sizeof (jint);
-
-  // Do the Class Class
-  java_lang_Class::klass_offset = java_lang_Class::hc_klass_offset * x + header;
-  java_lang_Class::array_klass_offset = java_lang_Class::hc_array_klass_offset * x + header;
-  java_lang_Class::resolved_constructor_offset = java_lang_Class::hc_resolved_constructor_offset * x + header;
-
-  // This is NOT an offset
-  java_lang_Class::number_of_fake_oop_fields = java_lang_Class::hc_number_of_fake_oop_fields;
-
-  // Throwable Class
-  java_lang_Throwable::backtrace_offset  = java_lang_Throwable::hc_backtrace_offset  * x + header;
-  java_lang_Throwable::detailMessage_offset = java_lang_Throwable::hc_detailMessage_offset * x + header;
-  java_lang_Throwable::cause_offset      = java_lang_Throwable::hc_cause_offset      * x + header;
-  java_lang_Throwable::stackTrace_offset = java_lang_Throwable::hc_stackTrace_offset * x + header;
-
-  // java_lang_boxing_object
-  java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset * x + header;
-
-  // java_lang_ref_Reference:
-  java_lang_ref_Reference::referent_offset = java_lang_ref_Reference::hc_referent_offset * x + header;
-  java_lang_ref_Reference::queue_offset = java_lang_ref_Reference::hc_queue_offset * x + header;
-  java_lang_ref_Reference::next_offset  = java_lang_ref_Reference::hc_next_offset * x + header;
-  java_lang_ref_Reference::discovered_offset  = java_lang_ref_Reference::hc_discovered_offset * x + header;
-  java_lang_ref_Reference::static_lock_offset = java_lang_ref_Reference::hc_static_lock_offset *  x;
-  java_lang_ref_Reference::static_pending_offset = java_lang_ref_Reference::hc_static_pending_offset * x;
-  // Artificial fields for java_lang_ref_Reference
-  // The first field is for the discovered field added in 1.4
-  java_lang_ref_Reference::number_of_fake_oop_fields = 1;
-
-  // java_lang_ref_SoftReference Class
-  java_lang_ref_SoftReference::timestamp_offset = java_lang_ref_SoftReference::hc_timestamp_offset * x + header;
-  // Don't multiply static fields because they are always in wordSize units
-  java_lang_ref_SoftReference::static_clock_offset = java_lang_ref_SoftReference::hc_static_clock_offset * x;
-
-  // java_lang_ClassLoader
-  java_lang_ClassLoader::parent_offset = java_lang_ClassLoader::hc_parent_offset * x + header;
-
-  // java_lang_System
-  java_lang_System::static_in_offset  = java_lang_System::hc_static_in_offset  * x;
-  java_lang_System::static_out_offset = java_lang_System::hc_static_out_offset * x;
-  java_lang_System::static_err_offset = java_lang_System::hc_static_err_offset * x;
-
-  // java_lang_StackTraceElement
-  java_lang_StackTraceElement::declaringClass_offset = java_lang_StackTraceElement::hc_declaringClass_offset  * x + header;
-  java_lang_StackTraceElement::methodName_offset = java_lang_StackTraceElement::hc_methodName_offset * x + header;
-  java_lang_StackTraceElement::fileName_offset   = java_lang_StackTraceElement::hc_fileName_offset   * x + header;
-  java_lang_StackTraceElement::lineNumber_offset = java_lang_StackTraceElement::hc_lineNumber_offset * x + header;
-  java_lang_AssertionStatusDirectives::classes_offset = java_lang_AssertionStatusDirectives::hc_classes_offset * x + header;
-  java_lang_AssertionStatusDirectives::classEnabled_offset = java_lang_AssertionStatusDirectives::hc_classEnabled_offset * x + header;
-  java_lang_AssertionStatusDirectives::packages_offset = java_lang_AssertionStatusDirectives::hc_packages_offset * x + header;
-  java_lang_AssertionStatusDirectives::packageEnabled_offset = java_lang_AssertionStatusDirectives::hc_packageEnabled_offset * x + header;
-  java_lang_AssertionStatusDirectives::deflt_offset = java_lang_AssertionStatusDirectives::hc_deflt_offset * x + header;
-
-}
-  
-
-// Compute non-hard-coded field offsets of all the classes in this file
-void JavaClasses::compute_offsets() {
-
-  java_lang_Class::compute_offsets();
-  java_lang_System::compute_offsets();
-  java_lang_Thread::compute_offsets();
-  java_lang_ThreadGroup::compute_offsets();
-  java_security_AccessControlContext::compute_offsets();
-  // Initialize reflection classes. The layouts of these classes
-  // changed with the new reflection implementation in JDK 1.4, and
-  // since the Universe doesn't know what JDK version it is until this
-  // point we defer computation of these offsets until now.
-  java_lang_reflect_AccessibleObject::compute_offsets();
-  java_lang_reflect_Method::compute_offsets();
-  java_lang_reflect_Constructor::compute_offsets();
-  java_lang_reflect_Field::compute_offsets();
-  if (JDK_Version::is_gte_jdk14x_version()) {
-    java_nio_Buffer::compute_offsets();
-  }
-  if (JDK_Version::is_gte_jdk15x_version()) {
-    sun_reflect_ConstantPool::compute_offsets();
-    sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
-  }
-  sun_misc_AtomicLongCSImpl::compute_offsets();
-}
-
-#ifndef PRODUCT
-
-// These functions exist to assert the validity of hard-coded field offsets to guard 
-// against changes in the class files
-
-bool JavaClasses::check_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) {
-  EXCEPTION_MARK;
-  fieldDescriptor fd;
-  symbolHandle klass_sym = oopFactory::new_symbol_handle(klass_name, CATCH);
-  klassOop k = SystemDictionary::resolve_or_fail(klass_sym, true, CATCH);
-  instanceKlassHandle h_klass (THREAD, k);
-  //instanceKlassHandle h_klass(klass);
-  symbolHandle f_name = oopFactory::new_symbol_handle(field_name, CATCH);
-  symbolHandle f_sig  = oopFactory::new_symbol_handle(field_sig, CATCH);
-  if (!h_klass->find_local_field(f_name(), f_sig(), &fd)) {
-    tty->print_cr("Nonstatic field %s.%s not found", klass_name, field_name);
-    return false;
-  }
-  if (fd.is_static()) {
-    tty->print_cr("Nonstatic field %s.%s appears to be static", klass_name, field_name);
-    return false;
-  }
-  if (fd.offset() == hardcoded_offset ) {
-    return true;
-  } else {
-    tty->print_cr("Offset of nonstatic field %s.%s is hardcoded as %d but should really be %d.", 
-                  klass_name, field_name, hardcoded_offset, fd.offset());
-    return false;
-  }
-}
-
-
-bool JavaClasses::check_static_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) {
-  EXCEPTION_MARK;
-  fieldDescriptor fd;
-  symbolHandle klass_sym = oopFactory::new_symbol_handle(klass_name, CATCH);
-  klassOop k = SystemDictionary::resolve_or_fail(klass_sym, true, CATCH);
-  instanceKlassHandle h_klass (THREAD, k);
-  symbolHandle f_name = oopFactory::new_symbol_handle(field_name, CATCH);
-  symbolHandle f_sig  = oopFactory::new_symbol_handle(field_sig, CATCH);
-  if (!h_klass->find_local_field(f_name(), f_sig(), &fd)) {
-    tty->print_cr("Static field %s.%s not found", klass_name, field_name);
-    return false;
-  }
-  if (!fd.is_static()) {
-    tty->print_cr("Static field %s.%s appears to be nonstatic", klass_name, field_name);
-    return false;
-  }
-  if (fd.offset() == hardcoded_offset + h_klass->offset_of_static_fields()) {
-    return true;
-  } else {
-    tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - h_klass->offset_of_static_fields());
-    return false;
-  }
-}
-
-
-// Check the hard-coded field offsets of all the classes in this file
-
-void JavaClasses::check_offsets() {
-  bool valid = true;
-
-#define CHECK_OFFSET(klass_name, cpp_klass_name, field_name, field_sig) \
-  valid &= check_offset(klass_name, cpp_klass_name :: field_name ## _offset, #field_name, field_sig)
-
-#define CHECK_STATIC_OFFSET(klass_name, cpp_klass_name, field_name, field_sig) \
-  valid &= check_static_offset(klass_name, cpp_klass_name :: static_ ## field_name ## _offset, #field_name, field_sig)
-
-  // java.lang.String
-
-  CHECK_OFFSET("java/lang/String", java_lang_String, value, "[C");
-  CHECK_OFFSET("java/lang/String", java_lang_String, offset, "I");
-  CHECK_OFFSET("java/lang/String", java_lang_String, count, "I");
-  CHECK_OFFSET("java/lang/String", java_lang_String, hash, "I");
-  
-  // java.lang.Class
-
-  // Fake fields
-  // CHECK_OFFSET("java/lang/Class", java_lang_Class, klass); // %%% this needs to be checked
-  // CHECK_OFFSET("java/lang/Class", java_lang_Class, array_klass); // %%% this needs to be checked
-  // CHECK_OFFSET("java/lang/Class", java_lang_Class, resolved_constructor); // %%% this needs to be checked
-
-  // java.lang.Throwable
-
-  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, backtrace, "Ljava/lang/Object;");
-  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, detailMessage, "Ljava/lang/String;");
-  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, cause, "Ljava/lang/Throwable;");
-  CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, stackTrace, "[Ljava/lang/StackTraceElement;");
-  
-  // Boxed primitive objects (java_lang_boxing_object)
-
-  CHECK_OFFSET("java/lang/Boolean",   java_lang_boxing_object, value, "Z");
-  CHECK_OFFSET("java/lang/Character", java_lang_boxing_object, value, "C");
-  CHECK_OFFSET("java/lang/Float",     java_lang_boxing_object, value, "F");
-  CHECK_OFFSET("java/lang/Double",    java_lang_boxing_object, value, "D");
-  CHECK_OFFSET("java/lang/Byte",      java_lang_boxing_object, value, "B");
-  CHECK_OFFSET("java/lang/Short",     java_lang_boxing_object, value, "S");
-  CHECK_OFFSET("java/lang/Integer",   java_lang_boxing_object, value, "I");
-  CHECK_OFFSET("java/lang/Long",      java_lang_boxing_object, value, "J");
-
-  // java.lang.ClassLoader
-
-  CHECK_OFFSET("java/lang/ClassLoader", java_lang_ClassLoader, parent,      "Ljava/lang/ClassLoader;");
-
-  // java.lang.System
-
-  CHECK_STATIC_OFFSET("java/lang/System", java_lang_System,  in, "Ljava/io/InputStream;");
-  CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, out, "Ljava/io/PrintStream;");
-  CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, err, "Ljava/io/PrintStream;");
-
-  // java.lang.StackTraceElement
-
-  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement, declaringClass, "Ljava/lang/String;");
-  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement, methodName, "Ljava/lang/String;");
-  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement,   fileName, "Ljava/lang/String;");
-  CHECK_OFFSET("java/lang/StackTraceElement", java_lang_StackTraceElement, lineNumber, "I");
-
-  // java.lang.ref.Reference
-
-  CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, referent, "Ljava/lang/Object;");
-  CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, queue, "Ljava/lang/ref/ReferenceQueue;");
-  CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, next, "Ljava/lang/ref/Reference;");
-  // Fake field
-  //CHECK_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, discovered, "Ljava/lang/ref/Reference;");
-  CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, lock, "Ljava/lang/ref/Reference$Lock;");
-  CHECK_STATIC_OFFSET("java/lang/ref/Reference", java_lang_ref_Reference, pending, "Ljava/lang/ref/Reference;");
-
-  // java.lang.ref.SoftReference
-
-  CHECK_OFFSET("java/lang/ref/SoftReference", java_lang_ref_SoftReference, timestamp, "J");
-  CHECK_STATIC_OFFSET("java/lang/ref/SoftReference", java_lang_ref_SoftReference, clock, "J");
-
-  // java.lang.AssertionStatusDirectives
-  // 
-  // The CheckAssertionStatusDirectives boolean can be removed from here and
-  // globals.hpp after the AssertionStatusDirectives class has been integrated
-  // into merlin "for some time."  Without it, the vm will fail with early
-  // merlin builds.
-
-  if (CheckAssertionStatusDirectives && JDK_Version::is_gte_jdk14x_version()) {
-    const char* nm = "java/lang/AssertionStatusDirectives";
-    const char* sig = "[Ljava/lang/String;";
-    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, classes, sig);
-    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, classEnabled, "[Z");
-    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, packages, sig);
-    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, packageEnabled, "[Z");
-    CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, deflt, "Z");
-  }
-
-  if (!valid) vm_exit_during_initialization("Hard-coded field offset verification failed");
-}
-
-#endif // PRODUCT
-
-void javaClasses_init() {
-  JavaClasses::compute_offsets();
-  JavaClasses::check_offsets();
-  FilteredFieldsMap::initialize();  // must be done after computing offsets.
-}
--- a/hotspot/src/share/vm/memory/javaClasses.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,907 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)javaClasses.hpp	1.157 07/05/05 17:05:52 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// Interface for manipulating the basic Java classes.
-//
-// All dependencies on layout of actual Java classes should be kept here.
-// If the layout of any of the classes above changes the offsets must be adjusted.
-//
-// For most classes we hardwire the offsets for performance reasons. In certain
-// cases (e.g. java.security.AccessControlContext) we compute the offsets at
-// startup since the layout here differs between JDK1.2 and JDK1.3.
-// 
-// Note that fields (static and non-static) are arranged with oops before non-oops
-// on a per class basis. The offsets below have to reflect this ordering.
-//
-// When editing the layouts please update the check_offset verification code 
-// correspondingly. The names in the enums must be identical to the actual field 
-// names in order for the verification code to work.
-
-
-// Interface to java.lang.String objects
-
-class java_lang_String : AllStatic {
- private:
-  enum {
-    hc_value_offset  = 0,
-    hc_offset_offset = 1,
-    hc_count_offset  = 2,
-    hc_hash_offset   = 3
-  };
-
-  static int value_offset;
-  static int offset_offset;
-  static int count_offset;
-  static int hash_offset;
-
-  static Handle basic_create(int length, bool tenured, TRAPS);
-  static Handle basic_create_from_unicode(jchar* unicode, int length, bool tenured, TRAPS);
-
-  static void set_value( oop string, typeArrayOop buffer) { string->obj_field_put(value_offset,  (oop)buffer); }
-  static void set_offset(oop string, int offset)          { string->int_field_put(offset_offset, offset); }
-  static void set_count( oop string, int count)           { string->int_field_put(count_offset,  count);  }
-
- public:
-  // Instance creation
-  static Handle create_from_unicode(jchar* unicode, int len, TRAPS);
-  static Handle create_tenured_from_unicode(jchar* unicode, int len, TRAPS);
-  static oop    create_oop_from_unicode(jchar* unicode, int len, TRAPS);
-  static Handle create_from_str(const char* utf8_str, TRAPS);
-  static oop    create_oop_from_str(const char* utf8_str, TRAPS);
-  static Handle create_from_symbol(symbolHandle symbol, TRAPS);  
-  static Handle create_from_platform_dependent_str(const char* str, TRAPS);
-  static Handle char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS);
- 
-  static int value_offset_in_bytes()  { return value_offset;  }
-  static int count_offset_in_bytes()  { return count_offset;  }
-  static int offset_offset_in_bytes() { return offset_offset; }
-  static int hash_offset_in_bytes()   { return hash_offset;   }
-
-  // Accessors
-  static typeArrayOop value(oop java_string) {
-    assert(is_instance(java_string), "must be java_string");
-    return (typeArrayOop) java_string->obj_field(value_offset);
-  }
-  static int offset(oop java_string) {
-    assert(is_instance(java_string), "must be java_string");
-    return java_string->int_field(offset_offset);
-  }
-  static int length(oop java_string) {
-    assert(is_instance(java_string), "must be java_string");
-    return java_string->int_field(count_offset);
-  }
-  static int utf8_length(oop java_string);
-
-  // String converters
-  static char*  as_utf8_string(oop java_string);
-  static char*  as_utf8_string(oop java_string, int start, int len);
-  static jchar* as_unicode_string(oop java_string, int& length);
-
-  static bool equals(oop java_string, jchar* chars, int len);
-
-  // Conversion between '.' and '/' formats
-  static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
-  static Handle internalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '.', '/', THREAD); }    
-
-  // Conversion
-  static symbolHandle as_symbol(Handle java_string, TRAPS);
-
-  // Testers
-  static bool is_instance(oop obj) {
-    return obj != NULL && obj->klass() == SystemDictionary::string_klass();
-  }
-
-  // Debugging
-  static void print(Handle java_string, outputStream* st);
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.Class objects
-
-class java_lang_Class : AllStatic {
-   friend class VMStructs;
- private:
-  // The fake offsets are added by the class loader when java.lang.Class is loaded 
-
-  enum {
-    hc_klass_offset                = 0,
-    hc_array_klass_offset          = 1,
-    hc_resolved_constructor_offset = 2,
-    hc_number_of_fake_oop_fields   = 3
-  };
-
-  static int klass_offset;
-  static int resolved_constructor_offset;
-  static int array_klass_offset;
-  static int number_of_fake_oop_fields;
-
-  static void compute_offsets();
-  static bool offsets_computed;
-  static int classRedefinedCount_offset;
-
- public:
-  // Instance creation
-  static oop  create_mirror(KlassHandle k, TRAPS);
-  static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
-  // Conversion
-  static klassOop as_klassOop(oop java_class);
-  // Testing
-  static bool is_primitive(oop java_class);  
-  static BasicType primitive_type(oop java_class);  
-  static oop primitive_mirror(BasicType t);  
-  // JVM_NewInstance support
-  static methodOop resolved_constructor(oop java_class);
-  static void set_resolved_constructor(oop java_class, methodOop constructor);
-  // JVM_NewArray support
-  static klassOop array_klass(oop java_class);
-  static void set_array_klass(oop java_class, klassOop klass);
-  // compiler support for class operations
-  static int klass_offset_in_bytes() { return klass_offset; }
-  static int resolved_constructor_offset_in_bytes() { return resolved_constructor_offset; }
-  static int array_klass_offset_in_bytes() { return array_klass_offset; }
-  // Support for classRedefinedCount field
-  static int classRedefinedCount(oop the_class_mirror);
-  static void set_classRedefinedCount(oop the_class_mirror, int value);
-  // Debugging
-  friend class JavaClasses;
-  friend class instanceKlass;   // verification code accesses offsets
-  friend class ClassFileParser; // access to number_of_fake_fields
-};
-
-// Interface to java.lang.Thread objects
-
-class java_lang_Thread : AllStatic {
- private:
-  // Note that for this class the layout changed between JDK1.2 and JDK1.3,
-  // so we compute the offsets at startup rather than hard-wiring them.
-  static int _name_offset;
-  static int _group_offset;
-  static int _contextClassLoader_offset;
-  static int _inheritedAccessControlContext_offset;
-  static int _priority_offset;
-  static int _eetop_offset;
-  static int _daemon_offset;
-  static int _stillborn_offset;
-  static int _stackSize_offset;
-  static int _tid_offset;
-  static int _thread_status_offset; 
-  static int _park_blocker_offset; 
-  static int _park_event_offset ; 
-
-  static void compute_offsets();
-
- public:
-  // Instance creation
-  static oop create();
-  // Returns the JavaThread associated with the thread obj
-  static JavaThread* thread(oop java_thread);
-  // Set JavaThread for instance
-  static void set_thread(oop java_thread, JavaThread* thread);
-  // Name
-  static typeArrayOop name(oop java_thread);
-  static void set_name(oop java_thread, typeArrayOop name);
-  // Priority
-  static ThreadPriority priority(oop java_thread);
-  static void set_priority(oop java_thread, ThreadPriority priority);
-  // Thread group
-  static oop  threadGroup(oop java_thread);
-  // Stillborn
-  static bool is_stillborn(oop java_thread);
-  static void set_stillborn(oop java_thread);
-  // Alive (NOTE: this is not really a field, but provides the correct
-  // definition without doing a Java call)
-  static bool is_alive(oop java_thread);
-  // Daemon
-  static bool is_daemon(oop java_thread);
-  static void set_daemon(oop java_thread);
-  // Context ClassLoader
-  static oop context_class_loader(oop java_thread);
-  // Control context
-  static oop inherited_access_control_context(oop java_thread);
-  // Stack size hint
-  static jlong stackSize(oop java_thread);
-  // Thread ID
-  static jlong thread_id(oop java_thread);
-    
-  // Blocker object responsible for thread parking
-  static oop park_blocker(oop java_thread);
-
-  // Pointer to type-stable park handler, encoded as jlong. 
-  // Should be set when apparently null
-  // For details, see unsafe.cpp Unsafe_Unpark
-  static jlong park_event(oop java_thread);
-  static bool set_park_event(oop java_thread, jlong ptr);
-
-  // Java Thread Status for JVMTI and M&M use.
-  // This thread status info is saved in threadStatus field of
-  // java.lang.Thread java class.
-  enum ThreadStatus {
-    NEW                      = 0,
-    RUNNABLE                 = JVMTI_THREAD_STATE_ALIVE +          // runnable / running
-                               JVMTI_THREAD_STATE_RUNNABLE,
-    SLEEPING                 = JVMTI_THREAD_STATE_ALIVE +          // Thread.sleep()
-                               JVMTI_THREAD_STATE_WAITING +
-                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT + 
-                               JVMTI_THREAD_STATE_SLEEPING,
-    IN_OBJECT_WAIT           = JVMTI_THREAD_STATE_ALIVE +          // Object.wait()
-                               JVMTI_THREAD_STATE_WAITING +
-                               JVMTI_THREAD_STATE_WAITING_INDEFINITELY +
-                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT, 
-    IN_OBJECT_WAIT_TIMED     = JVMTI_THREAD_STATE_ALIVE +          // Object.wait(long)
-                               JVMTI_THREAD_STATE_WAITING +
-                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +
-                               JVMTI_THREAD_STATE_IN_OBJECT_WAIT, 
-    PARKED                   = JVMTI_THREAD_STATE_ALIVE +          // LockSupport.park()
-                               JVMTI_THREAD_STATE_WAITING +
-                               JVMTI_THREAD_STATE_WAITING_INDEFINITELY +
-                               JVMTI_THREAD_STATE_PARKED,
-    PARKED_TIMED             = JVMTI_THREAD_STATE_ALIVE +          // LockSupport.park(long)
-                               JVMTI_THREAD_STATE_WAITING +
-                               JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +
-                               JVMTI_THREAD_STATE_PARKED,  
-    BLOCKED_ON_MONITOR_ENTER = JVMTI_THREAD_STATE_ALIVE +          // (re-)entering a synchronization block 
-                               JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER,   
-    TERMINATED               = JVMTI_THREAD_STATE_TERMINATED
-  };
-  // Write thread status info to threadStatus field of java.lang.Thread.
-  static void set_thread_status(oop java_thread_oop, ThreadStatus status);
-  // Read thread status info from threadStatus field of java.lang.Thread. 
-  static ThreadStatus get_thread_status(oop java_thread_oop);
-
-  static const char*  thread_status_name(oop java_thread_oop);
-    
-  // Debugging
-  friend class JavaClasses;
-};
-
-// Interface to java.lang.ThreadGroup objects
-
-class java_lang_ThreadGroup : AllStatic {
- private:
-  static int _parent_offset;        
-  static int _name_offset;
-  static int _threads_offset;
-  static int _groups_offset;
-  static int _maxPriority_offset;
-  static int _destroyed_offset;
-  static int _daemon_offset;
-  static int _vmAllowSuspension_offset; 
-  static int _nthreads_offset;  
-  static int _ngroups_offset; 
-
-  static void compute_offsets();
-
- public:  
-  // parent ThreadGroup
-  static oop  parent(oop java_thread_group);
-  // name
-  static typeArrayOop name(oop java_thread_group);
-  // ("name as oop" accessor is not necessary)
-  // Number of threads in group
-  static int nthreads(oop java_thread_group);
-  // threads
-  static objArrayOop threads(oop java_thread_group);
-  // Number of threads in group
-  static int ngroups(oop java_thread_group);
-  // groups
-  static objArrayOop groups(oop java_thread_group);
-  // maxPriority in group
-  static ThreadPriority maxPriority(oop java_thread_group);
-  // Destroyed
-  static bool is_destroyed(oop java_thread_group);
-  // Daemon
-  static bool is_daemon(oop java_thread_group);
-  // vmAllowSuspension
-  static bool is_vmAllowSuspension(oop java_thread_group);
-  // Debugging
-  friend class JavaClasses;
-};
-  
-
-
-// Interface to java.lang.Throwable objects
-
-class java_lang_Throwable: AllStatic {
-  friend class BacktraceBuilder;
-
- private:
-  // Offsets
-  enum {
-    hc_backtrace_offset     =  0,
-    hc_detailMessage_offset =  1,
-    hc_cause_offset         =  2,  // New since 1.4
-    hc_stackTrace_offset    =  3   // New since 1.4
-  };
-  // Trace constants
-  enum {
-    trace_methods_offset = 0,
-    trace_bcis_offset    = 1,
-    trace_next_offset    = 2,
-    trace_size           = 3,
-    trace_chunk_size     = 32
-  };
-
-  static int backtrace_offset;
-  static int detailMessage_offset;
-  static int cause_offset;
-  static int stackTrace_offset;
-
-  // Printing
-  static char* print_stack_element_to_buffer(methodOop method, int bci);
-  static void print_to_stream(Handle stream, const char* str);
-  // StackTrace (programmatic access, new since 1.4)
-  static void clear_stacktrace(oop throwable);
-  // No stack trace available
-  static const char* no_stack_trace_message();
-
- public:
-  // Backtrace
-  static oop backtrace(oop throwable);
-  static void set_backtrace(oop throwable, oop value);
-  // Needed by JVMTI to filter out this internal field. 
-  static int get_backtrace_offset() { return backtrace_offset;}
-  static int get_detailMessage_offset() { return detailMessage_offset;}
-  // Message
-  static oop message(oop throwable);
-  static oop message(Handle throwable);
-  static void set_message(oop throwable, oop value);
-  // Print stack trace stored in exception by call-back to Java
-  // Note: this is no longer used in Merlin, but we still suppport
-  // it for compatibility.
-  static void print_stack_trace(oop throwable, oop print_stream);
-  static void print_stack_element(Handle stream, methodOop method, int bci);
-  static void print_stack_element(outputStream *st, methodOop method, int bci);
-  static void print_stack_usage(Handle stream);
-
-  // Allocate space for backtrace (created but stack trace not filled in)
-  static void allocate_backtrace(Handle throwable, TRAPS);
-  // Fill in current stack trace for throwable with preallocated backtrace (no GC)
-  static void fill_in_stack_trace_of_preallocated_backtrace(Handle throwable);
-
-  // Fill in current stack trace, can cause GC
-  static void fill_in_stack_trace(Handle throwable, TRAPS);
-  static void fill_in_stack_trace(Handle throwable);
-  // Programmatic access to stack trace
-  static oop  get_stack_trace_element(oop throwable, int index, TRAPS);
-  static int  get_stack_trace_depth(oop throwable, TRAPS);
-  // Printing
-  static void print(oop throwable, outputStream* st);
-  static void print(Handle throwable, outputStream* st);
-  static void print_stack_trace(oop throwable, outputStream* st);
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.reflect.AccessibleObject objects
-
-class java_lang_reflect_AccessibleObject: AllStatic {
- private:
-  // Note that to reduce dependencies on the JDK we compute these
-  // offsets at run-time.
-  static int override_offset; 
-
-  static void compute_offsets();
-
- public:
-  // Accessors
-  static jboolean override(oop reflect);
-  static void set_override(oop reflect, jboolean value);
-
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.reflect.Method objects
-
-class java_lang_reflect_Method : public java_lang_reflect_AccessibleObject {
- private:
-  // Note that to reduce dependencies on the JDK we compute these
-  // offsets at run-time.
-  static int clazz_offset;
-  static int name_offset;
-  static int returnType_offset;
-  static int parameterTypes_offset;
-  static int exceptionTypes_offset;
-  static int slot_offset; 
-  static int modifiers_offset; 
-  static int signature_offset;
-  static int annotations_offset;
-  static int parameter_annotations_offset;
-  static int annotation_default_offset;
-
-  static void compute_offsets();
-
- public:
-  // Allocation
-  static Handle create(TRAPS);
-
-  // Accessors
-  static oop clazz(oop reflect);
-  static void set_clazz(oop reflect, oop value);
-
-  static oop name(oop method);
-  static void set_name(oop method, oop value);
-
-  static oop return_type(oop method);
-  static void set_return_type(oop method, oop value);
-
-  static oop parameter_types(oop method);
-  static void set_parameter_types(oop method, oop value);
-
-  static oop exception_types(oop method);
-  static void set_exception_types(oop method, oop value);
-
-  static int slot(oop reflect);
-  static void set_slot(oop reflect, int value);
-
-  static int modifiers(oop method);
-  static void set_modifiers(oop method, int value);
-
-  static bool has_signature_field();
-  static oop signature(oop method);
-  static void set_signature(oop method, oop value);
-
-  static bool has_annotations_field();
-  static oop annotations(oop method);
-  static void set_annotations(oop method, oop value);
-
-  static bool has_parameter_annotations_field();
-  static oop parameter_annotations(oop method);
-  static void set_parameter_annotations(oop method, oop value);
-
-  static bool has_annotation_default_field();
-  static oop annotation_default(oop method);
-  static void set_annotation_default(oop method, oop value);
-
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.reflect.Constructor objects
-
-class java_lang_reflect_Constructor : public java_lang_reflect_AccessibleObject {
- private:
-  // Note that to reduce dependencies on the JDK we compute these
-  // offsets at run-time.
-  static int clazz_offset;
-  static int parameterTypes_offset;
-  static int exceptionTypes_offset;
-  static int slot_offset;
-  static int modifiers_offset;
-  static int signature_offset;
-  static int annotations_offset;
-  static int parameter_annotations_offset;
-
-  static void compute_offsets();
-
- public:
-  // Allocation
-  static Handle create(TRAPS);
-
-  // Accessors
-  static oop clazz(oop reflect);
-  static void set_clazz(oop reflect, oop value);
-
-  static oop parameter_types(oop constructor);
-  static void set_parameter_types(oop constructor, oop value);
-
-  static oop exception_types(oop constructor);
-  static void set_exception_types(oop constructor, oop value);
-
-  static int slot(oop reflect);
-  static void set_slot(oop reflect, int value);
-
-  static int modifiers(oop constructor);
-  static void set_modifiers(oop constructor, int value);
-
-  static bool has_signature_field();
-  static oop signature(oop constructor);
-  static void set_signature(oop constructor, oop value);
-
-  static bool has_annotations_field();
-  static oop annotations(oop constructor);
-  static void set_annotations(oop constructor, oop value);
-
-  static bool has_parameter_annotations_field();
-  static oop parameter_annotations(oop method);
-  static void set_parameter_annotations(oop method, oop value);
-
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.reflect.Field objects
-
-class java_lang_reflect_Field : public java_lang_reflect_AccessibleObject {
- private:
-  // Note that to reduce dependencies on the JDK we compute these
-  // offsets at run-time.
-  static int clazz_offset; 
-  static int name_offset;
-  static int type_offset;
-  static int slot_offset;
-  static int modifiers_offset;
-  static int signature_offset;
-  static int annotations_offset;
-
-  static void compute_offsets();
-
- public:
-  // Allocation
-  static Handle create(TRAPS);
-
-  // Accessors
-  static oop clazz(oop reflect);
-  static void set_clazz(oop reflect, oop value);
-
-  static oop name(oop field);
-  static void set_name(oop field, oop value);
-
-  static oop type(oop field);
-  static void set_type(oop field, oop value);
-
-  static int slot(oop reflect);
-  static void set_slot(oop reflect, int value);
-
-  static int modifiers(oop field);
-  static void set_modifiers(oop field, int value);
-
-  static bool has_signature_field();
-  static oop signature(oop constructor);
-  static void set_signature(oop constructor, oop value);
-
-  static bool has_annotations_field();
-  static oop annotations(oop constructor);
-  static void set_annotations(oop constructor, oop value);
-
-  static bool has_parameter_annotations_field();
-  static oop parameter_annotations(oop method);
-  static void set_parameter_annotations(oop method, oop value);
-
-  static bool has_annotation_default_field();
-  static oop annotation_default(oop method);
-  static void set_annotation_default(oop method, oop value);
-
-  // Debugging
-  friend class JavaClasses;
-}; 
-
-// Interface to sun.reflect.ConstantPool objects
-class sun_reflect_ConstantPool {
- private:
-  // Note that to reduce dependencies on the JDK we compute these
-  // offsets at run-time.
-  static int _cp_oop_offset; 
-
-  static void compute_offsets();
-
- public:
-  // Allocation
-  static Handle create(TRAPS);
-
-  // Accessors
-  static oop cp_oop(oop reflect);
-  static void set_cp_oop(oop reflect, oop value);
-  static int cp_oop_offset() {
-    return _cp_oop_offset;
-  }
-
-  // Debugging
-  friend class JavaClasses;
-}; 
-
-// Interface to sun.reflect.UnsafeStaticFieldAccessorImpl objects
-class sun_reflect_UnsafeStaticFieldAccessorImpl {
- private:
-  static int _base_offset; 
-  static void compute_offsets();
-
- public:
-  static int base_offset() {
-    return _base_offset;
-  }
-
-  // Debugging
-  friend class JavaClasses;
-}; 
-
-// Interface to java.lang primitive type boxing objects:
-//  - java.lang.Boolean
-//  - java.lang.Character
-//  - java.lang.Float
-//  - java.lang.Double
-//  - java.lang.Byte
-//  - java.lang.Short
-//  - java.lang.Integer
-//  - java.lang.Long
-
-// This could be separated out into 8 individual classes.
-
-class java_lang_boxing_object: AllStatic {
- private:
-  enum {
-   hc_value_offset = 0
-  };
-  static int value_offset; 
-
-  static oop initialize_and_allocate(klassOop klass, TRAPS);
- public:
-  // Allocation. Returns a boxed value, or NULL for invalid type.
-  static oop create(BasicType type, jvalue* value, TRAPS);
-  // Accessors. Returns the basic type being boxed, or T_ILLEGAL for invalid oop.
-  static BasicType get_value(oop box, jvalue* value);
-  static BasicType set_value(oop box, jvalue* value);
-
-  static int value_offset_in_bytes() { return value_offset; }
-
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-
-// Interface to java.lang.ref.Reference objects
-
-class java_lang_ref_Reference: AllStatic {
- public:
-  enum {
-   hc_referent_offset   = 0,
-   hc_queue_offset      = 1,
-   hc_next_offset       = 2,
-   hc_discovered_offset	= 3  // Is not last, see SoftRefs.
-  };
-  enum {
-   hc_static_lock_offset    = 0,
-   hc_static_pending_offset = 1
-  };
-
-  static int referent_offset;
-  static int queue_offset;
-  static int next_offset;
-  static int discovered_offset;
-  static int static_lock_offset;
-  static int static_pending_offset;
-  static int number_of_fake_oop_fields;
- 
-  // Accessors
-  static oop referent(oop ref)        { return *referent_addr(ref); }
-  static void set_referent(oop ref, oop value);
-  static oop* referent_addr(oop ref);
-
-  static oop next(oop ref)            { return *next_addr(ref); }
-  static void set_next(oop ref, oop value);
-  static oop* next_addr(oop ref);
-
-  static oop discovered(oop ref)      { return *discovered_addr(ref); }
-  static void set_discovered(oop ref, oop value);
-  static oop* discovered_addr(oop ref);
-
-  // Accessors for statics
-  static oop  pending_list_lock()     { return *pending_list_lock_addr(); }
-  static oop  pending_list()          { return *pending_list_addr(); }
-
-  static oop* pending_list_lock_addr();
-  static oop* pending_list_addr();
-};
-
-
-// Interface to java.lang.ref.SoftReference objects
-
-class java_lang_ref_SoftReference: public java_lang_ref_Reference {
- public:
-  enum {
-   // The timestamp is a long field and may need to be adjusted for alignment.
-   hc_timestamp_offset    = align_object_offset_(hc_discovered_offset + 1)
-  };
-  enum {
-   hc_static_clock_offset = 0
-  };
-
-  static int timestamp_offset;
-  static int static_clock_offset;
-
-  // Accessors
-  static jlong timestamp(oop ref);
-
-  // Accessors for statics
-  static jlong clock();
-  static void set_clock(jlong value);
-};
-
-
-// Interface to java.security.AccessControlContext objects
-
-class java_security_AccessControlContext: AllStatic {
- private:
-  // Note that for this class the layout changed between JDK1.2 and JDK1.3,
-  // so we compute the offsets at startup rather than hard-wiring them.
-  static int _context_offset;
-  static int _privilegedContext_offset;
-  static int _isPrivileged_offset;
-
-  static void compute_offsets();
- public:
-  static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);  
-
-  // Debugging/initialization
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.ClassLoader objects
-
-class java_lang_ClassLoader : AllStatic {
- private:
-  enum {
-   hc_parent_offset = 0
-  };
-
-  static int parent_offset;
-
- public:
-  static oop parent(oop loader);
-
-  static bool is_trusted_loader(oop loader);
-
-  // Fix for 4474172
-  static oop  non_reflection_class_loader(oop loader);
-
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.System objects
-
-class java_lang_System : AllStatic {
- private:
-  enum {
-   hc_static_in_offset  = 0,
-   hc_static_out_offset = 1,
-   hc_static_err_offset = 2
-  };
-
-  static int offset_of_static_fields;
-  static int  static_in_offset;
-  static int static_out_offset;
-  static int static_err_offset;
-
-  static void compute_offsets();
-
- public:
-  static int  in_offset_in_bytes();
-  static int out_offset_in_bytes();
-  static int err_offset_in_bytes();
-
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.StackTraceElement objects
-
-class java_lang_StackTraceElement: AllStatic {
- private:
-  enum {
-    hc_declaringClass_offset  = 0,
-    hc_methodName_offset = 1,
-    hc_fileName_offset   = 2,
-    hc_lineNumber_offset = 3
-  };
-
-  static int declaringClass_offset;
-  static int methodName_offset;
-  static int fileName_offset;
-  static int lineNumber_offset;
-
- public:
-  // Setters
-  static void set_declaringClass(oop element, oop value);
-  static void set_methodName(oop element, oop value);
-  static void set_fileName(oop element, oop value);
-  static void set_lineNumber(oop element, int value);
-
-  // Create an instance of StackTraceElement
-  static oop create(methodHandle m, int bci, TRAPS);
-
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-// Interface to java.lang.AssertionStatusDirectives objects
-
-class java_lang_AssertionStatusDirectives: AllStatic {
- private:
-  enum {
-    hc_classes_offset,
-    hc_classEnabled_offset,
-    hc_packages_offset,
-    hc_packageEnabled_offset,
-    hc_deflt_offset
-  };
-
-  static int classes_offset;
-  static int classEnabled_offset;
-  static int packages_offset;
-  static int packageEnabled_offset;
-  static int deflt_offset;
-
- public:
-  // Setters
-  static void set_classes(oop obj, oop val);
-  static void set_classEnabled(oop obj, oop val);
-  static void set_packages(oop obj, oop val);
-  static void set_packageEnabled(oop obj, oop val);
-  static void set_deflt(oop obj, bool val);
-  // Debugging
-  friend class JavaClasses;
-};
-
-
-class java_nio_Buffer: AllStatic {
- private:
-  static int _limit_offset;
-
- public:
-  static int  limit_offset();
-  static void compute_offsets();
-};
-
-class sun_misc_AtomicLongCSImpl: AllStatic {
- private:
-  static int _value_offset;
-
- public:
-  static int  value_offset();
-  static void compute_offsets();
-};
-
-class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
- private:
-  static int  _owner_offset;
- public:
-  static void initialize(TRAPS);
-  static oop  get_owner_threadObj(oop obj);
-};
-
-// Interface to hard-coded offset checking
-
-class JavaClasses : AllStatic {
- private:
-  static bool check_offset(const char *klass_name, int offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0;
-  static bool check_static_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) PRODUCT_RETURN0;
- public:
-  static void compute_hard_coded_offsets();
-  static void compute_offsets();
-  static void check_offsets() PRODUCT_RETURN;
-};
--- a/hotspot/src/share/vm/memory/loaderConstraints.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,515 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)loaderConstraints.cpp	1.18 07/05/05 17:05:52 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_loaderConstraints.cpp.incl"
-
-LoaderConstraintTable::LoaderConstraintTable(int nof_buckets)
-  : Hashtable(nof_buckets, sizeof(LoaderConstraintEntry)) {};
-
-
-LoaderConstraintEntry* LoaderConstraintTable::new_entry(
-                                 unsigned int hash, symbolOop name,
-                                 klassOop klass, int num_loaders,
-                                 int max_loaders) {
-  LoaderConstraintEntry* entry;
-  entry = (LoaderConstraintEntry*)Hashtable::new_entry(hash, klass);
-  entry->set_name(name);
-  entry->set_num_loaders(num_loaders);
-  entry->set_max_loaders(max_loaders);
-  return entry;
-}
-
-
-void LoaderConstraintTable::oops_do(OopClosure* f) {
-  for (int index = 0; index < table_size(); index++) {
-    for (LoaderConstraintEntry* probe = bucket(index);
-                                probe != NULL;
-                                probe = probe->next()) {
-      f->do_oop((oop*)(probe->name_addr()));
-      if (probe->klass() != NULL) {
-        f->do_oop((oop*)probe->klass_addr());
-      }
-      for (int n = 0; n < probe->num_loaders(); n++) {
-        if (probe->loader(n) != NULL) {
-          f->do_oop(probe->loader_addr(n));
-        }
-      }
-    }
-  }
-}
-
-// We must keep the symbolOop used in the name alive.  We'll use the
-// loaders to decide if a particular entry can be purged. 
-void LoaderConstraintTable::always_strong_classes_do(OopClosure* blk) {
-  // We must keep the symbolOop used in the name alive.
-  for (int cindex = 0; cindex < table_size(); cindex++) {
-    for (LoaderConstraintEntry* lc_probe = bucket(cindex);
-                                lc_probe != NULL;
-                                lc_probe = lc_probe->next()) {
-      assert (lc_probe->name() != NULL,  "corrupted loader constraint table");
-      blk->do_oop((oop*)lc_probe->name_addr());
-    }
-  }
-}
-
-
-// The only unlocked/non-safepoint reader of the loader constraints
-// appears to be find_defining_loader, below, which guards against
-// unordered writes. Therefore, no membar/volatile is required when
-// we're updating the loader constraints.
-// 
-// This is pretty tricky. It'd feel a little more comfortable if we
-// took the system dictionary lock in find_defining_loader. Should we
-// create a separate loader constraint lock?
-
-LoaderConstraintEntry** LoaderConstraintTable::find_loader_constraint(
-                                    symbolHandle name, Handle loader) {
-
-  unsigned int hash = compute_hash(name);
-  int index = hash_to_index(hash);
-  LoaderConstraintEntry** pp = bucket_addr(index);
-  while (*pp) {
-    LoaderConstraintEntry* p = *pp;
-    if (p->hash() == hash) {
-      if (p->name() == name()) {
-        for (int i = p->num_loaders() - 1; i >= 0; i--) {
-          if (p->loader(i) == loader()) {
-            return pp;
-          }
-        }
-      }
-    }
-    pp = p->next_addr();
-  }
-  return pp;
-}
-
-
-void LoaderConstraintTable::purge_loader_constraints(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
-  // Remove unloaded entries from constraint table
-  for (int index = 0; index < table_size(); index++) {
-    LoaderConstraintEntry** p = bucket_addr(index);
-    while(*p) {
-      LoaderConstraintEntry* probe = *p;
-      klassOop klass = probe->klass();
-      // Remove klass that is no longer alive
-      if (klass != NULL && !is_alive->do_object_b(klass)) {
-        probe->set_klass(NULL);
-	if (TraceLoaderConstraints) {
-	  ResourceMark rm;
-	  tty->print_cr("[Purging class object from constraint for name %s,"
-		     " loader list:", 
-		     probe->name()->as_C_string());
-  	  for (int i = 0; i < probe->num_loaders(); i++) {
-	    tty->print_cr("[   [%d]: %s", i, 
-			  SystemDictionary::loader_name(probe->loader(i)));
-	  }
-	}
-      }
-      // Remove entries no longer alive from loader array
-      int n = 0; 
-      while (n < probe->num_loaders()) {
-        if (probe->loader(n) != NULL) {
-          if (!is_alive->do_object_b(probe->loader(n))) {
-	    if (TraceLoaderConstraints) {
-	      ResourceMark rm;
-              tty->print_cr("[Purging loader %s from constraint for name %s",
-			    SystemDictionary::loader_name(probe->loader(n)),
-			    probe->name()->as_C_string()
-			    );
-	    }
-
-            // Compact array
-            int num = probe->num_loaders() - 1;
-            probe->set_num_loaders(num);
-            probe->set_loader(n, probe->loader(num));
-            probe->set_loader(num, NULL);
-
-	    if (TraceLoaderConstraints) {
-	      ResourceMark rm;
-              tty->print_cr("[New loader list:");
-	      for (int i = 0; i < probe->num_loaders(); i++) {
-                tty->print_cr("[   [%d]: %s", i, 
-			      SystemDictionary::loader_name(probe->loader(i)));
-	      }
-	    }
-
-            continue;  // current element replaced, so restart without
-                       // incrementing n
-          }
-        }
-        n++;
-      }
-      // Check whether entry should be purged
-      if (probe->num_loaders() < 2) {
-	    if (TraceLoaderConstraints) {
-	      ResourceMark rm;
-	      tty->print("[Purging complete constraint for name %s\n", 
-			 probe->name()->as_C_string());
-	    }
-
-        // Purge entry
-        *p = probe->next();
-        FREE_C_HEAP_ARRAY(oop, probe->loaders());
-        free_entry(probe);
-      } else {
-#ifdef ASSERT
-        assert(is_alive->do_object_b(probe->name()), "name should be live");
-        if (probe->klass() != NULL) {
-          assert(is_alive->do_object_b(probe->klass()), "klass should be live");
-        }
-        for (n = 0; n < probe->num_loaders(); n++) {
-          if (probe->loader(n) != NULL) {
-            assert(is_alive->do_object_b(probe->loader(n)), "loader should be live");
-          }
-        }
-#endif
-        // Go to next entry
-        p = probe->next_addr();
-      }
-    }
-  }
-}
-
-bool LoaderConstraintTable::add_entry(symbolHandle class_name,
-                                      klassOop klass1, Handle class_loader1,
-                                      klassOop klass2, Handle class_loader2) {
-  int failure_code = 0; // encode different reasons for failing
-
-  if (klass1 != NULL && klass2 != NULL && klass1 != klass2) {
-    failure_code = 1;
-  } else {
-    klassOop klass = klass1 != NULL ? klass1 : klass2;
-      
-    LoaderConstraintEntry** pp1 = find_loader_constraint(class_name,
-							 class_loader1);
-    if (*pp1 != NULL && (*pp1)->klass() != NULL) {
-      if (klass != NULL) {
-	if (klass != (*pp1)->klass()) {
-	  failure_code = 2;
-	}
-      } else {
-	klass = (*pp1)->klass();
-      }
-    }
-    
-    LoaderConstraintEntry** pp2 = find_loader_constraint(class_name,
-							 class_loader2);
-    if (*pp2 != NULL && (*pp2)->klass() != NULL) {
-      if (klass != NULL) {
-	if (klass != (*pp2)->klass()) {
-	  failure_code = 3;
-	}
-      } else {
-	klass = (*pp2)->klass();
-      }
-    }
-
-    if (failure_code == 0) {
-      if (*pp1 == NULL && *pp2 == NULL) {
-	unsigned int hash = compute_hash(class_name);
-	int index = hash_to_index(hash);
-	LoaderConstraintEntry* p;
-	p = new_entry(hash, class_name(), klass, 2, 2);
-	p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2));
-	p->set_loader(0, class_loader1());
-	p->set_loader(1, class_loader2());
-	p->set_klass(klass);
-	p->set_next(bucket(index));
-	set_entry(index, p);
-	if (TraceLoaderConstraints) {
-	  ResourceMark rm;
-	  tty->print("[Adding new constraint for name: %s, loader[0]: %s,"
-		     " loader[1]: %s ]\n",
-		     class_name()->as_C_string(), 
-		     SystemDictionary::loader_name(class_loader1()),
-		     SystemDictionary::loader_name(class_loader2())
-		     );
-	}
-      } else if (*pp1 == *pp2) {
-	/* constraint already imposed */
-	if ((*pp1)->klass() == NULL) {
-	  (*pp1)->set_klass(klass);
-	  if (TraceLoaderConstraints) {
-	    ResourceMark rm;
-	    tty->print("[Setting class object in existing constraint for"
-		       " name: %s and loader %s ]\n",
-		       class_name()->as_C_string(),
-		       SystemDictionary::loader_name(class_loader1())
-		       );
-	  }
-	} else {
-	  assert((*pp1)->klass() == klass, "loader constraints corrupted");
-	}
-      } else if (*pp1 == NULL) {
-	extend_loader_constraint(*pp2, class_loader1, klass);
-      } else if (*pp2 == NULL) {
-	extend_loader_constraint(*pp1, class_loader2, klass);
-      } else {
-	merge_loader_constraints(pp1, pp2, klass);
-      }
-    }
-  }
-  
-  if (failure_code != 0 && TraceLoaderConstraints) {
-    ResourceMark rm;
-    const char* reason = "";
-    switch(failure_code) {
-    case 1: reason = "the class objects presented by loader[0] and loader[1]"
-	      " are different"; break;
-    case 2: reason = "the class object presented by loader[0] does not match"
-	      " the stored class object in the constraint"; break;
-    case 3: reason = "the class object presented by loader[1] does not match"
-	      " the stored class object in the constraint"; break;
-    default: reason = "unknown reason code";
-    }
-    tty->print("[Failed to add constraint for name: %s, loader[0]: %s,"
-	       " loader[1]: %s, Reason: %s ]\n",
-	       class_name()->as_C_string(),
-	       SystemDictionary::loader_name(class_loader1()),
-	       SystemDictionary::loader_name(class_loader2()),
-	       reason
-	       );
-  }
-  
-  return failure_code == 0;
-}
-
-
-// return true if the constraint was updated, false if the constraint is
-// violated
-bool LoaderConstraintTable::check_or_update(instanceKlassHandle k,
-                                                   Handle loader,
-                                                   symbolHandle name) {
-  LoaderConstraintEntry* p = *(find_loader_constraint(name, loader));
-  if (p && p->klass() != NULL && p->klass() != k()) {
-    if (TraceLoaderConstraints) {
-      ResourceMark rm;
-      tty->print("[Constraint check failed for name %s, loader %s: "
-		 "the presented class object differs from that stored ]\n",
-		 name()->as_C_string(), 
-		 SystemDictionary::loader_name(loader()));
-    }
-    return false;
-  } else {
-    if (p && p->klass() == NULL) {
-      p->set_klass(k());
-      if (TraceLoaderConstraints) {
-	ResourceMark rm;
-	tty->print("[Updating constraint for name %s, loader %s, "
-		   "by setting class object ]\n",
-		   name()->as_C_string(), 
-		   SystemDictionary::loader_name(loader()));
-      }
-    }
-    return true;
-  }
-}
-
-klassOop LoaderConstraintTable::find_constrained_klass(symbolHandle name,
-                                                       Handle loader) {
-  LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
-  if (p != NULL && p->klass() != NULL)
-    return p->klass();
-
-  // No constraints, or else no klass loaded yet.
-  return NULL;
-}
-
-
-klassOop LoaderConstraintTable::find_constrained_elem_klass(symbolHandle name,
-                                                            symbolHandle elem_name,
-                                                            Handle loader,
-                                                            TRAPS) {
-  LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
-  if (p != NULL) {
-    assert(p->klass() == NULL, "Expecting null array klass");
-
-    // The array name has a constraint, but it will not have a class. Check
-    // each loader for an associated elem
-    for (int i = 0; i < p->num_loaders(); i++) {
-      Handle no_protection_domain;
-
-      klassOop k = SystemDictionary::find(elem_name, p->loader(i), no_protection_domain, THREAD);
-      if (k != NULL) {
-        // Return the first elem klass found.
-        return k;
-      }
-    }
-  }
-
-  // No constraints, or else no klass loaded yet.
-  return NULL;
-}
-
-
-void LoaderConstraintTable::ensure_loader_constraint_capacity(
-                                                     LoaderConstraintEntry *p,
-                                                    int nfree) {
-    if (p->max_loaders() - p->num_loaders() < nfree) {
-        int n = nfree + p->num_loaders();
-        oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n);
-        memcpy(new_loaders, p->loaders(), sizeof(oop) * p->num_loaders());
-        p->set_max_loaders(n);
-        FREE_C_HEAP_ARRAY(oop, p->loaders());
-        p->set_loaders(new_loaders);
-    }
-}
- 
-
-void LoaderConstraintTable::extend_loader_constraint(LoaderConstraintEntry* p,
-                                                     Handle loader,
-                                                     klassOop klass) {
-  ensure_loader_constraint_capacity(p, 1);
-  int num = p->num_loaders();
-  p->set_loader(num, loader());
-  p->set_num_loaders(num + 1);
-  if (TraceLoaderConstraints) {
-    ResourceMark rm;
-    tty->print("[Extending constraint for name %s by adding loader[%d]: %s %s",
-	       p->name()->as_C_string(),
-	       num,
-               SystemDictionary::loader_name(loader()),
-	       (p->klass() == NULL ? " and setting class object ]\n" : " ]\n")
-	       );
-  }
-  if (p->klass() == NULL) {
-    p->set_klass(klass);
-  } else {
-    assert(klass == NULL || p->klass() == klass, "constraints corrupted");
-  }
-}
-
-
-void LoaderConstraintTable::merge_loader_constraints(
-                                                   LoaderConstraintEntry** pp1,
-                                                   LoaderConstraintEntry** pp2,
-                                                   klassOop klass) {
-  // make sure *pp1 has higher capacity 
-  if ((*pp1)->max_loaders() < (*pp2)->max_loaders()) {
-    LoaderConstraintEntry** tmp = pp2;
-    pp2 = pp1;
-    pp1 = tmp;
-  }
-  
-  LoaderConstraintEntry* p1 = *pp1;
-  LoaderConstraintEntry* p2 = *pp2;
-  
-  ensure_loader_constraint_capacity(p1, p2->num_loaders());
-
-  for (int i = 0; i < p2->num_loaders(); i++) {
-    int num = p1->num_loaders();
-    p1->set_loader(num, p2->loader(i));
-    p1->set_num_loaders(num + 1);
-  }
-
-  if (TraceLoaderConstraints) {
-    ResourceMark rm;
-    tty->print_cr("[Merged constraints for name %s, new loader list:", 
-		  p1->name()->as_C_string()
-		  );
-  
-    for (int i = 0; i < p1->num_loaders(); i++) {
-      tty->print_cr("[   [%d]: %s", i, 
-		    SystemDictionary::loader_name(p1->loader(i)));
-    }
-    if (p1->klass() == NULL) {
-      tty->print_cr("[... and setting class object]");
-    }
-  }
-  
-  // p1->klass() will hold NULL if klass, p2->klass(), and old
-  // p1->klass() are all NULL.  In addition, all three must have
-  // matching non-NULL values, otherwise either the constraints would
-  // have been violated, or the constraints had been corrupted (and an
-  // assertion would fail).
-  if (p2->klass() != NULL) {
-    assert(p2->klass() == klass, "constraints corrupted");
-  }
-  if (p1->klass() == NULL) {
-    p1->set_klass(klass);
-  } else {
-    assert(p1->klass() == klass, "constraints corrupted");
-  }
-
-  *pp2 = p2->next();
-  FREE_C_HEAP_ARRAY(oop, p2->loaders());
-  free_entry(p2);
-  return;
-}
-
-
-void LoaderConstraintTable::verify(Dictionary* dictionary) {
-  Thread *thread = Thread::current();
-  for (int cindex = 0; cindex < _loader_constraint_size; cindex++) {
-    for (LoaderConstraintEntry* probe = bucket(cindex);
-                                probe != NULL;
-                                probe = probe->next()) {
-      guarantee(probe->name()->is_symbol(), "should be symbol");
-      if (probe->klass() != NULL) {
-        instanceKlass* ik = instanceKlass::cast(probe->klass()); 
-        guarantee(ik->name() == probe->name(), "name should match");
-        symbolHandle name (thread, ik->name());
-        Handle loader(thread, ik->class_loader());
-        unsigned int d_hash = dictionary->compute_hash(name, loader);
-        int d_index = dictionary->hash_to_index(d_hash);
-        klassOop k = dictionary->find_class(d_index, d_hash, name, loader);
-        guarantee(k == probe->klass(), "klass should be in dictionary");
-      }
-      for (int n = 0; n< probe->num_loaders(); n++) {
-        guarantee(probe->loader(n)->is_oop_or_null(), "should be oop");
-      }
-    }
-  }
-}
-
-#ifndef PRODUCT
-
-// Called with the system dictionary lock held
-void LoaderConstraintTable::print() {
-  ResourceMark rm;
-
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  tty->print_cr("Java loader constraints (entries=%d)", _loader_constraint_size);
-  for (int cindex = 0; cindex < _loader_constraint_size; cindex++) {
-    for (LoaderConstraintEntry* probe = bucket(cindex);
-                                probe != NULL;
-                                probe = probe->next()) {
-      tty->print("%4d: ", cindex);
-      probe->name()->print();
-      tty->print(" , loaders:");
-      for (int n = 0; n < probe->num_loaders(); n++) {
-        probe->loader(n)->print_value();
-        tty->print(", ");
-      }
-      tty->cr();
-    }
-  }
-}
-#endif
--- a/hotspot/src/share/vm/memory/loaderConstraints.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,136 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)loaderConstraints.hpp	1.14 07/05/05 17:05:52 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class LoaderConstraintEntry;
-
-class LoaderConstraintTable : public Hashtable {
-  friend class VMStructs;
-private:
-
-  enum Constants {
-    _loader_constraint_size = 107,                     // number of entries in constraint table
-    _nof_buckets            = 1009                     // number of buckets in hash table
-  };
-
-  LoaderConstraintEntry** find_loader_constraint(symbolHandle name,
-                                                 Handle loader);
-
-public:
-
-  LoaderConstraintTable(int nof_buckets);
-
-  LoaderConstraintEntry* new_entry(unsigned int hash, symbolOop name,
-                                   klassOop klass, int num_loaders,
-                                   int max_loaders);
-
-  LoaderConstraintEntry* bucket(int i) {
-    return (LoaderConstraintEntry*)Hashtable::bucket(i);
-  }
-
-  LoaderConstraintEntry** bucket_addr(int i) {
-    return (LoaderConstraintEntry**)Hashtable::bucket_addr(i);
-  }
-
-  // GC support
-  void oops_do(OopClosure* f);
-  void always_strong_classes_do(OopClosure* blk);
-
-  // Check class loader constraints
-  bool add_entry(symbolHandle name, klassOop klass1, Handle loader1,
-                                    klassOop klass2, Handle loader2);
-
-  void check_signature_loaders(symbolHandle signature, Handle loader1,
-                               Handle loader2, bool is_method, TRAPS);
-
-  klassOop find_constrained_klass(symbolHandle name, Handle loader);
-  klassOop find_constrained_elem_klass(symbolHandle name, symbolHandle elem_name,
-                                       Handle loader, TRAPS);
-
-
-  // Class loader constraints
-
-  void ensure_loader_constraint_capacity(LoaderConstraintEntry *p, int nfree);
-  void extend_loader_constraint(LoaderConstraintEntry* p, Handle loader,
-                                klassOop klass);
-  void merge_loader_constraints(LoaderConstraintEntry** pp1,
-                                LoaderConstraintEntry** pp2, klassOop klass);
-
-  bool check_or_update(instanceKlassHandle k, Handle loader,
-                              symbolHandle name);
-
-  
-  void purge_loader_constraints(BoolObjectClosure* is_alive);
-
-  void verify(Dictionary* dictionary);
-#ifndef PRODUCT
-  void print();
-#endif
-};
-
-class LoaderConstraintEntry : public HashtableEntry {
-  friend class VMStructs;
-private:
-  symbolOop              _name;                   // class name
-  int                    _num_loaders;
-  int                    _max_loaders;
-  oop*                   _loaders;                // initiating loaders
-
-public:
-
-  klassOop klass() { return (klassOop)literal(); }
-  klassOop* klass_addr() { return (klassOop*)literal_addr(); }
-  void set_klass(klassOop k) { set_literal(k); }
-
-  LoaderConstraintEntry* next() {
-    return (LoaderConstraintEntry*)HashtableEntry::next();
-  }
-
-  LoaderConstraintEntry** next_addr() {
-    return (LoaderConstraintEntry**)HashtableEntry::next_addr();
-  }
-  void set_next(LoaderConstraintEntry* next) {
-    HashtableEntry::set_next(next);
-  }
-
-  symbolOop name() { return _name; }
-  symbolOop* name_addr() { return &_name; }
-  void set_name(symbolOop name) { _name = name; }
-
-  int num_loaders() { return _num_loaders; }
-  void set_num_loaders(int i) { _num_loaders = i; }
-
-  int max_loaders() { return _max_loaders; }
-  void set_max_loaders(int i) { _max_loaders = i; }
-
-  oop* loaders() { return _loaders; }
-  void set_loaders(oop* loaders) { _loaders = loaders; }
-
-  oop loader(int i) { return _loaders[i]; }
-  oop* loader_addr(int i) { return &_loaders[i]; }
-  void set_loader(int i, oop p) { _loaders[i] = p; }
-
-};
--- a/hotspot/src/share/vm/memory/parGCAllocBuffer.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)parGCAllocBuffer.cpp	1.27 07/05/05 17:05:53 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_parGCAllocBuffer.cpp.incl"
-
-ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
-  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
-  _end(NULL), _hard_end(NULL),
-  _retained(false), _retained_filler(),
-  _allocated(0), _wasted(0)
-{
-  assert (min_size() > AlignmentReserve, "Inconsistency!");
-}
-
-const size_t ParGCAllocBuffer::FillerHeaderSize =
-             align_object_size(arrayOopDesc::header_size(T_INT));
-
-// If the minimum object size is greater than MinObjAlignment, we can
-// end up with a shard at the end of the buffer that's smaller than
-// the smallest object.  We can't allow that because the buffer must
-// look like it's full of objects when we retire it, so we make
-// sure we have enough space for a filler int array object.
-const size_t ParGCAllocBuffer::AlignmentReserve =
-             oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
-
-void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
-  assert(!retain || end_of_gc, "Can only retain at GC end.");
-  if (_retained) {
-    // If the buffer had been retained shorten the previous filler object.
-    assert(_retained_filler.end() <= _top, "INVARIANT");
-    SharedHeap::fill_region_with_object(_retained_filler);
-    // Wasted space book-keeping, otherwise (normally) done in invalidate()
-    _wasted += _retained_filler.word_size();
-    _retained = false;
-  }
-  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
-  if (_top < _hard_end) {
-    SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end));
-    if (!retain) {
-      invalidate();
-    } else {
-      // Is there wasted space we'd like to retain for the next GC?
-      if (pointer_delta(_end, _top) > FillerHeaderSize) {
-	_retained = true;
-	_retained_filler = MemRegion(_top, FillerHeaderSize);
-	_top = _top + FillerHeaderSize;
-      } else {
-        invalidate();
-      }
-    }
-  }
-}
-
-void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
-  assert(ResizePLAB, "Wasted work");
-  stats->add_allocated(_allocated);
-  stats->add_wasted(_wasted);
-  stats->add_unused(pointer_delta(_end, _top));
-}
-
-// Compute desired plab size and latch result for later
-// use. This should be called once at the end of parallel
-// scavenge; it clears the sensor accumulators.
-void PLABStats::adjust_desired_plab_sz() {
-  assert(ResizePLAB, "Not set");
-  if (_allocated == 0) {
-    assert(_unused == 0, "Inconsistency in PLAB stats");
-    _allocated = 1;
-  }
-  double wasted_frac    = (double)_unused/(double)_allocated;
-  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
-                                   TargetPLABWastePct);
-  if (target_refills == 0) {
-    target_refills = 1;
-  }
-  _used = _allocated - _wasted - _unused;
-  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
-  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
-  // Take historical weighted average
-  _filter.sample(plab_sz);
-  // Clip from above and below, and align to object boundary
-  plab_sz = MAX2(min_size(), (size_t)_filter.average());
-  plab_sz = MIN2(max_size(), plab_sz);
-  plab_sz = align_object_size(plab_sz);
-  // Latch the result
-  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
-  if (ResizePLAB) {
-    _desired_plab_sz = plab_sz;
-  }
-  // Now clear the accumulators for next round:
-  // note this needs to be fixed in the case where we
-  // are retaining across scavenges. FIX ME !!! XXX
-  _allocated = 0;
-  _wasted    = 0;
-  _unused    = 0;
-}
-
-#ifndef PRODUCT
-void ParGCAllocBuffer::print() {
-  gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
-             "_retained: %c _retained_filler: [%p,%p)\n",
-             _bottom, _top, _end, _hard_end,
-             "FT"[_retained], _retained_filler.start(), _retained_filler.end());
-}
-#endif // !PRODUCT
--- a/hotspot/src/share/vm/memory/parGCAllocBuffer.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,205 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)parGCAllocBuffer.hpp	1.29 07/05/05 17:05:53 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// Forward decl.
-
-class PLABStats;
-
-// A per-thread allocation buffer used during GC.
-class ParGCAllocBuffer: public CHeapObj {
-protected:
-  char head[32];
-  size_t _word_sz;          // in HeapWord units
-  HeapWord* _bottom;
-  HeapWord* _top;
-  HeapWord* _end;       // last allocatable address + 1
-  HeapWord* _hard_end;  // _end + AlignmentReserve
-  bool      _retained;  // whether we hold a _retained_filler
-  MemRegion _retained_filler;
-  // In support of ergonomic sizing of PLAB's
-  size_t    _allocated;     // in HeapWord units
-  size_t    _wasted;        // in HeapWord units
-  char tail[32];
-  static const size_t FillerHeaderSize;
-  static const size_t AlignmentReserve;
-
-public:
-  // Initializes the buffer to be empty, but with the given "word_sz".
-  // Must get initialized with "set_buf" for an allocation to succeed.
-  ParGCAllocBuffer(size_t word_sz);
-
-  static const size_t min_size() {
-    return ThreadLocalAllocBuffer::min_size();
-  }
-
-  static const size_t max_size() {
-    return ThreadLocalAllocBuffer::max_size();
-  }
-
-  // If an allocation of the given "word_sz" can be satisfied within the
-  // buffer, do the allocation, returning a pointer to the start of the
-  // allocated block.  If the allocation request cannot be satisfied,
-  // return NULL.
-  HeapWord* allocate(size_t word_sz) {
-    HeapWord* res = _top;
-    HeapWord* new_top = _top + word_sz;
-    if (new_top <= _end) {
-      _top = new_top;
-      return res;
-    } else {
-      return NULL;
-    }
-  }
-
-  // Undo the last allocation in the buffer, which is required to be of the 
-  // "obj" of the given "word_sz".
-  void undo_allocation(HeapWord* obj, size_t word_sz) {
-    assert(_top - word_sz >= _bottom
-	   && _top - word_sz == obj,
-	   "Bad undo_allocation");
-    _top = _top - word_sz;
-  }
-
-  // The total (word) size of the buffer, including both allocated and
-  // unallocted space.
-  size_t word_sz() { return _word_sz; }
-
-  // Should only be done if we are about to reset with a new buffer of the
-  // given size.
-  void set_word_size(size_t new_word_sz) {
-    assert(new_word_sz > AlignmentReserve, "Too small");
-    _word_sz = new_word_sz;
-  }
-
-  // The number of words of unallocated space remaining in the buffer.
-  size_t words_remaining() {
-    assert(_end >= _top, "Negative buffer");
-    return pointer_delta(_end, _top, HeapWordSize);
-  }
-
-  bool contains(void* addr) {
-    return (void*)_bottom <= addr && addr < (void*)_hard_end;
-  }
-
-  // Sets the space of the buffer to be [buf, space+word_sz()).
-  void set_buf(HeapWord* buf) {
-    _bottom   = buf;
-    _top      = _bottom;
-    _hard_end = _bottom + word_sz();
-    _end      = _hard_end - AlignmentReserve;
-    assert(_end >= _top, "Negative buffer");
-    // In support of ergonomic sizing
-    _allocated += word_sz();
-  }
-
-  // Flush the stats supporting ergonomic sizing of PLAB's
-  void flush_stats(PLABStats* stats);
-  void flush_stats_and_retire(PLABStats* stats, bool retain) {
-    // We flush the stats first in order to get a reading of
-    // unused space in the last buffer.
-    if (ResizePLAB) {
-      flush_stats(stats);
-    }
-    // Retire the last allocation buffer.
-    retire(true, retain);
-  }
-
-  // Force future allocations to fail and queries for contains()
-  // to return false
-  void invalidate() {
-    assert(!_retained, "Shouldn't retain an invalidated buffer.");
-    _end    = _hard_end;
-    _wasted += pointer_delta(_end, _top);  // unused  space
-    _top    = _end;      // force future allocations to fail
-    _bottom = _end;      // force future contains() queries to return false
-  }
-
-  // Fills in the unallocated portion of the buffer with a garbage object.
-  // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
-  // is true, attempt to re-use the unused portion in the next GC.
-  void retire(bool end_of_gc, bool retain);
-
-  void print() PRODUCT_RETURN;
-};
-
-// PLAB stats book-keeping
-class PLABStats VALUE_OBJ_CLASS_SPEC {
-  size_t _allocated;      // total allocated
-  size_t _wasted;         // of which wasted (internal fragmentation)
-  size_t _unused;         // Unused in last buffer
-  size_t _used;           // derived = allocated - wasted - unused
-  size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
-  AdaptiveWeightedAverage
-         _filter;         // integrator with decay
-  
- public:
-  PLABStats(size_t desired_plab_sz_, unsigned wt) :
-    _allocated(0),
-    _wasted(0),
-    _unused(0),
-    _used(0),
-    _desired_plab_sz(desired_plab_sz_),
-    _filter(wt)
-  {
-    size_t min_sz = min_size();
-    size_t max_sz = max_size();
-    size_t aligned_min_sz = align_object_size(min_sz);
-    size_t aligned_max_sz = align_object_size(max_sz);
-    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
-           min_sz <= max_sz,
-           "PLAB clipping computation in adjust_desired_plab_sz()"
-           " may be incorrect");
-  }
-
-  static const size_t min_size() {
-    return ParGCAllocBuffer::min_size();
-  }
-
-  static const size_t max_size() {
-    return ParGCAllocBuffer::max_size();
-  }
-
-  size_t desired_plab_sz() {
-    return _desired_plab_sz;
-  }
-
-  void adjust_desired_plab_sz(); // filter computation, latches output to
-                                 // _desired_plab_sz, clears sensor accumulators
-
-  void add_allocated(size_t v) {
-    Atomic::add_ptr(v, &_allocated);
-  }
-
-  void add_unused(size_t v) {
-    Atomic::add_ptr(v, &_unused);
-  }
-
-  void add_wasted(size_t v) {
-    Atomic::add_ptr(v, &_wasted);
-  }
-};
-
--- a/hotspot/src/share/vm/memory/parNewGeneration.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1244 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)parNewGeneration.cpp	1.98 07/05/05 17:05:53 JVM"
-#endif
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_parNewGeneration.cpp.incl"
-
-#ifdef _MSC_VER
-#pragma warning( push )
-#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
-#endif
-ParScanThreadState::ParScanThreadState(Space* to_space_,
-                                       ParNewGeneration* gen_,
-				       Generation* old_gen_,
-				       int thread_num_,
-				       ObjToScanQueueSet* work_queue_set_,
-                                       size_t desired_plab_sz_,
-                                       ParallelTaskTerminator& term_) :
-  _to_space(to_space_), _old_gen(old_gen_), _thread_num(thread_num_),
-  _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
-  _ageTable(false), // false ==> not the global age table, no perf data.
-  _to_space_alloc_buffer(desired_plab_sz_),
-  _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
-  _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
-  _older_gen_closure(gen_, this),
-  _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
-                      &_to_space_root_closure, gen_, &_old_gen_root_closure,
-                      work_queue_set_, &term_),
-  _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
-  _keep_alive_closure(&_scan_weak_ref_closure),
-  _pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0),
-  _strong_roots_time(0.0), _term_time(0.0)
-{
-  _survivor_chunk_array =
-    (ChunkArray*) old_gen()->get_data_recorder(thread_num());
-  _hash_seed = 17;  // Might want to take time-based random value.
-  _start = os::elapsedTime();
-  _old_gen_closure.set_generation(old_gen_);
-  _old_gen_root_closure.set_generation(old_gen_);
-}
-#ifdef _MSC_VER
-#pragma warning( pop )
-#endif
-
-void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
-                                              size_t plab_word_size) {
-  ChunkArray* sca = survivor_chunk_array();
-  if (sca != NULL) {
-    // A non-null SCA implies that we want the PLAB data recorded.
-    sca->record_sample(plab_start, plab_word_size);
-  }
-}
-
-bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
-  return new_obj->is_objArray() &&
-         arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
-         new_obj != old_obj;
-}
-
-void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
-  assert(old->is_objArray(), "must be obj array");
-  assert(old->is_forwarded(), "must be forwarded");
-  assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
-  assert(!_old_gen->is_in(old), "must be in young generation.");
-
-  objArrayOop obj = objArrayOop(old->forwardee());
-  // Process ParGCArrayScanChunk elements now
-  // and push the remainder back onto queue
-  int start     = arrayOop(old)->length();
-  int end       = obj->length();
-  int remainder = end - start;
-  assert(start <= end, "just checking");
-  if (remainder > 2 * ParGCArrayScanChunk) {
-    // Test above combines last partial chunk with a full chunk
-    end = start + ParGCArrayScanChunk;
-    arrayOop(old)->set_length(end);
-    // Push remainder.
-    bool ok = work_queue()->push(old);
-    assert(ok, "just popped, push must be okay");
-    note_push();
-  } else {
-    // Restore length so that it can be used if there
-    // is a promotion failure and forwarding pointers
-    // must be removed.
-    arrayOop(old)->set_length(end);
-  }
-  // process our set of indices (include header in first chunk)
-  oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start);
-  oop* end_addr   = obj->base() + end; // obj_at_addr(end) asserts end < length
-  MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
-  if ((HeapWord *)obj < young_old_boundary()) {
-    // object is in to_space
-    obj->oop_iterate(&_to_space_closure, mr);
-  } else {
-    // object is in old generation
-    obj->oop_iterate(&_old_gen_closure, mr);
-  }
-}
-
-
-void ParScanThreadState::trim_queues(int max_size) {
-  ObjToScanQueue* queue = work_queue();
-  while (queue->size() > (juint)max_size) { 
-    oop obj_to_scan;
-    if (queue->pop_local(obj_to_scan)) {
-      note_pop();
-
-      if ((HeapWord *)obj_to_scan < young_old_boundary()) {
-        if (obj_to_scan->is_objArray() &&
-            obj_to_scan->is_forwarded() &&
-            obj_to_scan->forwardee() != obj_to_scan) {
-          scan_partial_array_and_push_remainder(obj_to_scan);
-        } else {
-          // object is in to_space
-          obj_to_scan->oop_iterate(&_to_space_closure);
-        }
-      } else {
-        // object is in old generation
-        obj_to_scan->oop_iterate(&_old_gen_closure);
-      }
-    }
-  }
-}
-
-HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
-
-  // Otherwise, if the object is small enough, try to reallocate the
-  // buffer.
-  HeapWord* obj = NULL;
-  if (!_to_space_full) {
-    ParGCAllocBuffer* const plab = to_space_alloc_buffer();
-    Space*            const sp   = to_space();
-    if (word_sz * 100 <
-	ParallelGCBufferWastePct * plab->word_sz()) {
-      // Is small enough; abandon this buffer and start a new one.
-      plab->retire(false, false);
-      size_t buf_size = plab->word_sz();
-      HeapWord* buf_space = sp->par_allocate(buf_size);
-      if (buf_space == NULL) {
-        const size_t min_bytes =
-          ParGCAllocBuffer::min_size() << LogHeapWordSize;
-        size_t free_bytes = sp->free();
-        while(buf_space == NULL && free_bytes >= min_bytes) {
-          buf_size = free_bytes >> LogHeapWordSize;
-          assert(buf_size == (size_t)align_object_size(buf_size),
-                 "Invariant");
-	  buf_space  = sp->par_allocate(buf_size);
-          free_bytes = sp->free();
-        }
-      }
-      if (buf_space != NULL) {
-	plab->set_word_size(buf_size);
-	plab->set_buf(buf_space);
-        record_survivor_plab(buf_space, buf_size);
-	obj = plab->allocate(word_sz);
-        // Note that we cannot compare buf_size < word_sz below
-        // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
-	assert(obj != NULL || plab->words_remaining() < word_sz,
-               "Else should have been able to allocate");
-        // It's conceivable that we may be able to use the
-        // buffer we just grabbed for subsequent small requests
-        // even if not for this one.
-      } else {
-	// We're used up.
-	_to_space_full = true;
-      }
-
-    } else {
-      // Too large; allocate the object individually.
-      obj = sp->par_allocate(word_sz);
-    }
-  }
-  return obj;
-}
-
-
-void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
-						size_t word_sz) {
-  // Is the alloc in the current alloc buffer?
-  if (to_space_alloc_buffer()->contains(obj)) {
-    assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
-	   "Should contain whole object.");
-    to_space_alloc_buffer()->undo_allocation(obj, word_sz);
-  } else {
-    SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
-  }
-}
-
-class ParScanThreadStateSet: private ResourceArray {
-public:
-  // Initializes states for the specified number of threads;
-  ParScanThreadStateSet(int                     num_threads, 
-                        Space&                  to_space, 
-                        ParNewGeneration&       gen,
-                        Generation&             old_gen, 
-                        ObjToScanQueueSet&      queue_set, 
-                        size_t                  desired_plab_sz,
-                        ParallelTaskTerminator& term);
-  inline ParScanThreadState& thread_sate(int i);
-  int pushes() { return _pushes; }
-  int pops()   { return _pops; }
-  int steals() { return _steals; }
-  void reset();
-  void flush();
-private:
-  ParallelTaskTerminator& _term;
-  ParNewGeneration&       _gen;
-  Generation&             _next_gen;
-  // staticstics
-  int _pushes;
-  int _pops;
-  int _steals;
-};
-
-
-ParScanThreadStateSet::ParScanThreadStateSet(
-  int num_threads, Space& to_space, ParNewGeneration& gen,
-  Generation& old_gen, ObjToScanQueueSet& queue_set, 
-  size_t desired_plab_sz, ParallelTaskTerminator& term)
-  : ResourceArray(sizeof(ParScanThreadState), num_threads),
-    _gen(gen), _next_gen(old_gen), _term(term),
-    _pushes(0), _pops(0), _steals(0)
-{
-  assert(num_threads > 0, "sanity check!");
-  // Initialize states.
-  for (int i = 0; i < num_threads; ++i) {
-    new ((ParScanThreadState*)_data + i) 
-        ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
-                           desired_plab_sz, term);
-  }
-}
-
-inline ParScanThreadState& ParScanThreadStateSet::thread_sate(int i)
-{
-  assert(i >= 0 && i < length(), "sanity check!");
-  return ((ParScanThreadState*)_data)[i];
-}
-
-
-void ParScanThreadStateSet::reset()
-{
-  _term.reset_for_reuse();
-}
-
-void ParScanThreadStateSet::flush()
-{
-  for (int i = 0; i < length(); ++i) {
-    ParScanThreadState& par_scan_state = thread_sate(i);
-  
-    // Flush stats related to To-space PLAB activity and
-    // retire the last buffer.
-    par_scan_state.to_space_alloc_buffer()->
-      flush_stats_and_retire(_gen.plab_stats(),
-                             false /* !retain */);
-
-    // Every thread has its own age table.  We need to merge
-    // them all into one.
-    ageTable *local_table = par_scan_state.age_table();
-    _gen.age_table()->merge(local_table);
-
-    // Inform old gen that we're done.
-    _next_gen.par_promote_alloc_done(i);
-    _next_gen.par_oop_since_save_marks_iterate_done(i);
-
-    // Flush stats related to work queue activity (push/pop/steal)
-    // This could conceivably become a bottleneck; if so, we'll put the
-    // stat's gathering under the flag.
-    if (PAR_STATS_ENABLED) {
-      _pushes += par_scan_state.pushes();
-      _pops   += par_scan_state.pops();
-      _steals += par_scan_state.steals();
-      if (ParallelGCVerbose) {
-        gclog_or_tty->print("Thread %d complete:\n"
-                            "  Pushes: %7d    Pops: %7d    Steals %7d (in %d attempts)\n",
-                            i, par_scan_state.pushes(), par_scan_state.pops(),
-                            par_scan_state.steals(), par_scan_state.steal_attempts());
-        if (par_scan_state.overflow_pushes() > 0 ||
-            par_scan_state.overflow_refills() > 0) {
-          gclog_or_tty->print("  Overflow pushes: %7d    "
-                              "Overflow refills: %7d for %d objs.\n",
-                              par_scan_state.overflow_pushes(),
-                              par_scan_state.overflow_refills(),
-                              par_scan_state.overflow_refill_objs());
-        }
-
-        double elapsed = par_scan_state.elapsed();
-        double strong_roots = par_scan_state.strong_roots_time();
-        double term = par_scan_state.term_time();
-        gclog_or_tty->print(
-                            "  Elapsed: %7.2f ms.\n"
-                            "    Strong roots: %7.2f ms (%6.2f%%)\n"
-                            "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
-                           elapsed * 1000.0,
-                           strong_roots * 1000.0, (strong_roots*100.0/elapsed),
-                           term * 1000.0, (term*100.0/elapsed),
-                           par_scan_state.term_attempts());
-      }
-    }
-  }
-}
-
-
-ParScanClosure::ParScanClosure(ParNewGeneration* g,
-			       ParScanThreadState* par_scan_state) :
-  OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
-{
-  assert(_g->level() == 0, "Optimized for youngest generation");
-  _boundary = _g->reserved().end();
-}
-
-ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
-                                             ParScanThreadState* par_scan_state)
-  : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
-{
-}
-
-#ifdef WIN32
-#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
-#endif
-
-ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
-    ParScanThreadState* par_scan_state_,
-    ParScanWithoutBarrierClosure* to_space_closure_,
-    ParScanWithBarrierClosure* old_gen_closure_,
-    ParRootScanWithoutBarrierClosure* to_space_root_closure_,
-    ParNewGeneration* par_gen_,
-    ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
-    ObjToScanQueueSet* task_queues_,
-    ParallelTaskTerminator* terminator_) :
-
-    _par_scan_state(par_scan_state_),
-    _to_space_closure(to_space_closure_),
-    _old_gen_closure(old_gen_closure_),
-    _to_space_root_closure(to_space_root_closure_),
-    _old_gen_root_closure(old_gen_root_closure_),
-    _par_gen(par_gen_),
-    _task_queues(task_queues_),
-    _terminator(terminator_)
-{}
-
-void ParEvacuateFollowersClosure::do_void() {
-  ObjToScanQueue* work_q = par_scan_state()->work_queue();
-
-  while (true) {
-
-    // Scan to-space and old-gen objs until we run out of both.
-    oop obj_to_scan;
-    par_scan_state()->trim_queues(0);
-
-    // We have no local work, attempt to steal from other threads.
-
-    // attempt to steal work from promoted.
-    par_scan_state()->note_steal_attempt();
-    if (task_queues()->steal(par_scan_state()->thread_num(),
-                             par_scan_state()->hash_seed(),
-                             obj_to_scan)) {
-      par_scan_state()->note_steal();
-      bool res = work_q->push(obj_to_scan);
-      assert(res, "Empty queue should have room for a push.");
-
-      par_scan_state()->note_push();
-      //   if successful, goto Start.
-      continue;
-
-      // try global overflow list.
-    } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
-      continue;
-    }
-
-    // Otherwise, offer termination.
-    par_scan_state()->start_term_time();
-    if (terminator()->offer_termination()) break;
-    par_scan_state()->end_term_time();
-  }
-  // Finish the last termination pause.
-  par_scan_state()->end_term_time();
-}
-
-ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
-		HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
-    AbstractGangTask("ParNewGeneration collection"),
-    _gen(gen), _next_gen(next_gen),
-    _young_old_boundary(young_old_boundary),
-    _state_set(state_set)
-  {}
-
-void ParNewGenTask::work(int i) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  // Since this is being done in a separate thread, need new resource
-  // and handle marks.
-  ResourceMark rm;
-  HandleMark hm;
-  // We would need multiple old-gen queues otherwise.
-  guarantee(gch->n_gens() == 2,
-     "Par young collection currently only works with one older gen.");
-#ifdef JVMPI_SUPPORT
-  guarantee(!Universe::jvmpi_slow_allocation(),
-     "To support jvmpi_slow_allocation, must add new "
-     "ParScanClosure types.");
-#endif // JVMPI_SUPPORT
-
-  Generation* old_gen = gch->next_gen(_gen);
-
-  ParScanThreadState& par_scan_state = _state_set->thread_sate(i);
-  par_scan_state.set_young_old_boundary(_young_old_boundary);
-  
-  par_scan_state.start_strong_roots();
-  gch->gen_process_strong_roots(_gen->level(),
-                                true, // Process younger gens, if any,
-                                      // as strong roots.
-                                false,// not collecting perm generation.
-                                SharedHeap::SO_AllClasses,
-                                &par_scan_state.older_gen_closure(),
-                                &par_scan_state.to_space_root_closure());
-  par_scan_state.end_strong_roots();
-
-  // "evacuate followers".
-  par_scan_state.evacuate_followers_closure().do_void();
-}
-
-#ifdef _MSC_VER
-#pragma warning( push )
-#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
-#endif
-ParNewGeneration::
-ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
-  : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
-  _overflow_list(NULL),
-  _is_alive_closure(this),
-  _plab_stats(YoungPLABSize, PLABWeight)
-{
-  _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
-  guarantee(_task_queues != NULL, "task_queues allocation failure.");
-
-  for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
-    ObjToScanQueuePadded *q_padded = new ObjToScanQueuePadded();
-    guarantee(q_padded != NULL, "work_queue Allocation failure.");
-
-    _task_queues->register_queue(i1, &q_padded->work_queue);
-  }
-
-  for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
-    _task_queues->queue(i2)->initialize();
-
-  if (UsePerfData) {
-    EXCEPTION_MARK;
-    ResourceMark rm;
-
-    const char* cname =
-         PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
-                                     ParallelGCThreads, CHECK);
-  }
-}
-#ifdef _MSC_VER
-#pragma warning( pop )
-#endif
-
-// ParNewGeneration::
-ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
-  DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
-
-void
-// ParNewGeneration::
-ParKeepAliveClosure::do_oop(oop* p) {
-  // We never expect to see a null reference being processed
-  // as a weak reference.
-  assert (*p != NULL, "expected non-null ref");
-  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
-  _par_cl->do_oop_nv(p);
-
-  if (Universe::heap()->is_in_reserved(p)) {
-    _rs->write_ref_field_gc_par(p, *p);
-  }
-}
-
-// ParNewGeneration::
-KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
-  DefNewGeneration::KeepAliveClosure(cl) {}
-
-void
-// ParNewGeneration::
-KeepAliveClosure::do_oop(oop* p) {
-  // We never expect to see a null reference being processed
-  // as a weak reference.
-  assert (*p != NULL, "expected non-null ref");
-  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
-  _cl->do_oop_nv(p);
-
-  if (Universe::heap()->is_in_reserved(p)) {
-    _rs->write_ref_field_gc_par(p, *p);
-  }
-}
-
-void ScanClosureWithParBarrier::do_oop(oop* p) {
-  oop obj = *p;
-  // Should we copy the obj?
-  if (obj != NULL) {
-    if ((HeapWord*)obj < _boundary) {
-      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
-      if (obj->is_forwarded()) {
-        *p = obj->forwardee();
-      } else {        
-        *p = _g->DefNewGeneration::copy_to_survivor_space(obj, p);
-      }
-    }
-    if (_gc_barrier) {
-      // If p points to a younger generation, mark the card.
-      if ((HeapWord*)obj < _gen_boundary) {
-	_rs->write_ref_field_gc_par(p, obj);
-      }
-    }
-  }
-}
-
-class ParNewRefProcTaskProxy: public AbstractGangTask {
-  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
-public:
-  ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,		
-                         Generation& next_gen,
-                         HeapWord* young_old_boundary,
-                         ParScanThreadStateSet& state_set);
-
-private:
-  virtual void work(int i);
-  
-private:
-  ParNewGeneration&      _gen;
-  ProcessTask&           _task;
-  Generation&            _next_gen;
-  HeapWord*              _young_old_boundary;
-  ParScanThreadStateSet& _state_set;
-};
-
-ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
-    ProcessTask& task, ParNewGeneration& gen,		
-    Generation& next_gen, 
-    HeapWord* young_old_boundary,
-    ParScanThreadStateSet& state_set)
-  : AbstractGangTask("ParNewGeneration parallel reference processing"),
-    _gen(gen),
-    _task(task),
-    _next_gen(next_gen), 
-    _young_old_boundary(young_old_boundary),
-    _state_set(state_set)
-{
-}
-
-void ParNewRefProcTaskProxy::work(int i)
-{
-  ResourceMark rm;
-  HandleMark hm;
-  ParScanThreadState& par_scan_state = _state_set.thread_sate(i);
-  par_scan_state.set_young_old_boundary(_young_old_boundary);
-  _task.work(i, par_scan_state.is_alive_closure(), 
-             par_scan_state.keep_alive_closure(), 
-             par_scan_state.evacuate_followers_closure());
-}
-
-class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
-  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
-  EnqueueTask& _task;
-
-public:
-  ParNewRefEnqueueTaskProxy(EnqueueTask& task)
-    : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
-      _task(task)
-  { }
-
-  virtual void work(int i)
-  {
-    _task.work(i);
-  }
-};
-
-
-void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
-{
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-         "not a generational heap");
-  WorkGang* workers = gch->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
-                                 _generation.reserved().end(), _state_set);
-  workers->run_task(&rp_task);
-  _state_set.reset();
-}
-
-void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
-{
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  ParNewRefEnqueueTaskProxy enq_task(task);
-  workers->run_task(&enq_task);
-}
-
-void ParNewRefProcTaskExecutor::set_single_threaded_mode() 
-{ 
-  _state_set.flush(); 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->set_par_threads(0);  // 0 ==> non-parallel.
-  gch->save_marks();
-}
-
-ScanClosureWithParBarrier::
-ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
-  ScanClosure(g, gc_barrier) {}
-
-EvacuateFollowersClosureGeneral::
-EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
-				OopsInGenClosure* cur,
-				OopsInGenClosure* older) :
-  _gch(gch), _level(level),
-  _scan_cur_or_nonheap(cur), _scan_older(older)
-{}
-
-void EvacuateFollowersClosureGeneral::do_void() {
-  do {
-    // Beware: this call will lead to closure applications via virtual
-    // calls.
-    _gch->oop_since_save_marks_iterate(_level,
-				       _scan_cur_or_nonheap,
-				       _scan_older);
-  } while (!_gch->no_allocs_since_save_marks(_level));
-}
-
-
-bool ParNewGeneration::_avoid_promotion_undo = false;
-
-void ParNewGeneration::adjust_desired_tenuring_threshold() {
-  // Set the desired survivor size to half the real survivor space
-  _tenuring_threshold =
-    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
-}
-
-// A Generation that does parallel young-gen collection.
-
-void ParNewGeneration::collect(bool   full,
-                               bool   clear_all_soft_refs,
-			       size_t size,
-                               bool   is_tlab) {
-  assert(full || size > 0, "otherwise we don't want to collect");
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "not a CMS generational heap");
-  AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
-  WorkGang* workers = gch->workers();
-  _next_gen = gch->next_gen(this);
-  assert(_next_gen != NULL, 
-    "This must be the youngest gen, and not the only gen");
-  assert(gch->n_gens() == 2,
-	 "Par collection currently only works with single older gen.");
-  // Do we have to avoid promotion_undo?
-  if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
-    set_avoid_promotion_undo(true);
-  }
-
-  // If the next generation is too full to accomodate worst-case promotion
-  // from this generation, pass on collection; let the next generation
-  // do it.
-  if (!collection_attempt_is_safe()) {
-    gch->set_incremental_collection_will_fail();
-    return;
-  }
-  assert(to()->is_empty(), "Else not collection_attempt_is_safe");
-
-  init_assuming_no_promotion_failure();
-
-  if (UseAdaptiveSizePolicy) {
-    set_survivor_overflow(false);
-    size_policy->minor_collection_begin();
-  }
-
-  TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
-  // Capture heap used before collection (for printing).
-  size_t gch_prev_used = gch->used();
-
-  SpecializationStats::clear();
-
-  age_table()->clear();
-  to()->clear();
-
-  gch->save_marks();
-  assert(workers != NULL, "Need parallel worker threads.");
-  ParallelTaskTerminator _term(workers->total_workers(), task_queues());
-  ParScanThreadStateSet thread_state_set(workers->total_workers(),
-                                         *to(), *this, *_next_gen, *task_queues(), 
-                                         desired_plab_sz(), _term);
-
-  ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
-  int n_workers = workers->total_workers();
-  gch->set_par_threads(n_workers);
-  gch->change_strong_roots_parity();
-  gch->rem_set()->prepare_for_younger_refs_iterate(true);
-  // It turns out that even when we're using 1 thread, doing the work in a
-  // separate thread causes wide variance in run times.  We can't help this 
-  // in the multi-threaded case, but we special-case n=1 here to get
-  // repeatable measurements of the 1-thread overhead of the parallel code.
-  if (n_workers > 1) {
-    workers->run_task(&tsk);
-  } else {
-    tsk.work(0);
-  }
-  thread_state_set.reset();
-
-  if (PAR_STATS_ENABLED && ParallelGCVerbose) {
-    gclog_or_tty->print("Thread totals:\n"
-	       "  Pushes: %7d    Pops: %7d    Steals %7d (sum = %7d).\n",
-	       thread_state_set.pushes(), thread_state_set.pops(), 
-               thread_state_set.steals(),
-	       thread_state_set.pops()+thread_state_set.steals());
-  }
-  assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(),
-	 "Or else the queues are leaky.");
-
-  // For now, process discovered weak refs sequentially.
-#ifdef COMPILER2
-  ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
-#else
-  ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
- 
-  // Process (weak) reference objects found during scavenge.
-  IsAliveClosure is_alive(this);
-  ScanWeakRefClosure scan_weak_ref(this);
-  KeepAliveClosure keep_alive(&scan_weak_ref);
-  ScanClosure               scan_without_gc_barrier(this, false);
-  ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
-  set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
-  EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, 
-    &scan_without_gc_barrier, &scan_with_gc_barrier);
-  if (ref_processor()->processing_is_mt()) {
-    ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
-    ref_processor()->process_discovered_references(
-        soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, 
-        &task_executor);
-  } else {
-    thread_state_set.flush();
-    gch->set_par_threads(0);  // 0 ==> non-parallel.
-    gch->save_marks();
-    ref_processor()->process_discovered_references(
-      soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
-      NULL);
-  }
-  if (!promotion_failed()) {
-    // Swap the survivor spaces.
-    eden()->clear();
-    from()->clear();
-    swap_spaces();
-  
-    assert(to()->is_empty(), "to space should be empty now");
-  } else {
-    assert(HandlePromotionFailure, 
-      "Should only be here if promotion failure handling is on");
-    if (_promo_failure_scan_stack != NULL) {
-      // Can be non-null because of reference processing.
-      // Free stack with its elements.
-      delete _promo_failure_scan_stack;
-      _promo_failure_scan_stack = NULL;
-    }
-    remove_forwarding_pointers();
-    if (PrintGCDetails) {
-      gclog_or_tty->print(" (promotion failed)");
-    }
-    // All the spaces are in play for mark-sweep.
-    from()->set_next_compaction_space(to());
-    gch->set_incremental_collection_will_fail();
-  }
-  // set new iteration safe limit for the survivor spaces
-  from()->set_concurrent_iteration_safe_limit(from()->top());
-  to()->set_concurrent_iteration_safe_limit(to()->top());
-
-  adjust_desired_tenuring_threshold();
-  if (ResizePLAB) {
-    plab_stats()->adjust_desired_plab_sz();
-  }
-
-  if (PrintGC && !PrintGCDetails) {
-    gch->print_heap_change(gch_prev_used);
-  }
-
-  if (UseAdaptiveSizePolicy) {
-    size_policy->minor_collection_end(gch->gc_cause());
-    size_policy->avg_survived()->sample(from()->used());
-  }
-
-  update_time_of_last_gc(os::javaTimeMillis());
-
-  SpecializationStats::print();
-  
-  ref_processor()->set_enqueuing_is_done(true);
-  if (ref_processor()->processing_is_mt()) {
-    ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
-    ref_processor()->enqueue_discovered_references(&task_executor);
-  } else {
-    ref_processor()->enqueue_discovered_references(NULL);
-  }
-  ref_processor()->verify_no_references_recorded();
-}
-
-static int sum;
-void ParNewGeneration::waste_some_time() {
-  for (int i = 0; i < 100; i++) {
-    sum += i;
-  }
-}
-
-static const oop ClaimedForwardPtr = oop(0x4);
-
-// Because of concurrency, there are times where an object for which
-// "is_forwarded()" is true contains an "interim" forwarding pointer
-// value.  Such a value will soon be overwritten with a real value.
-// This method requires "obj" to have a forwarding pointer, and waits, if
-// necessary for a real one to be inserted, and returns it.
-
-oop ParNewGeneration::real_forwardee(oop obj) {
-  oop forward_ptr = obj->forwardee();
-  if (forward_ptr != ClaimedForwardPtr) {
-    return forward_ptr;
-  } else {
-    return real_forwardee_slow(obj);
-  }
-}
-
-oop ParNewGeneration::real_forwardee_slow(oop obj) {
-  // Spin-read if it is claimed but not yet written by another thread.
-  oop forward_ptr = obj->forwardee();
-  while (forward_ptr == ClaimedForwardPtr) {
-    waste_some_time();
-    assert(obj->is_forwarded(), "precondition");
-    forward_ptr = obj->forwardee();
-  }
-  return forward_ptr;
-}
-
-#ifdef ASSERT
-bool ParNewGeneration::is_legal_forward_ptr(oop p) {
-  return
-    (_avoid_promotion_undo && p == ClaimedForwardPtr)
-    || Universe::heap()->is_in_reserved(p);
-}
-#endif
-
-void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
-  if ((m != markOopDesc::prototype()) &&
-      (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) {
-    MutexLocker ml(ParGCRareEvent_lock);
-    DefNewGeneration::preserve_mark_if_necessary(obj, m);
-  }
-}
-
-// Multiple GC threads may try to promote an object.  If the object
-// is successfully promoted, a forwarding pointer will be installed in
-// the object in the young generation.  This method claims the right
-// to install the forwarding pointer before it copies the object,
-// thus avoiding the need to undo the copy as in
-// copy_to_survivor_space_avoiding_with_undo.
- 
-oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
-#ifdef JVMPI_SUPPORT
-	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m,
-	bool jvmpi_slow_alloc) {
-#else // !JVMPI_SUPPORT
-	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
-#endif // JVMPI_SUPPORT
-  // In the sequential version, this assert also says that the object is
-  // not forwarded.  That might not be the case here.  It is the case that
-  // the caller observed it to be not forwarded at some time in the past.
-  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
-
-  // The sequential code read "old->age()" below.  That doesn't work here,
-  // since the age is in the mark word, and that might be overwritten with
-  // a forwarding pointer by a parallel thread.  So we must save the mark
-  // word in a local and then analyze it.
-  oopDesc dummyOld;
-  dummyOld.set_mark(m);
-  assert(!dummyOld.is_forwarded(),
-	 "should not be called with forwarding pointer mark word.");
-  
-  oop new_obj = NULL;
-  oop forward_ptr;
-
-#ifdef JVMPI_SUPPORT
-  // Try allocating obj in to-space (unless too old or won't fit or JVMPI
-  // enabled)
-  if (dummyOld.age() < tenuring_threshold() &&
-      !jvmpi_slow_alloc) {
-      //!Universe::jvmpi_slow_allocation()
-#else // !JVMPI_SUPPORT
-  // Try allocating obj in to-space (unless too old)
-  if (dummyOld.age() < tenuring_threshold()) {
-#endif // JVMPI_SUPPORT
-    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
-    if (new_obj == NULL) {
-      set_survivor_overflow(true);
-    }
-  }
-
-  if (new_obj == NULL) {
-    // Either to-space is full or we decided to promote
-    // try allocating obj tenured
-
-    // Attempt to install a null forwarding pointer (atomically),
-    // to claim the right to install the real forwarding pointer.
-    forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
-    if (forward_ptr != NULL) {
-      // someone else beat us to it.
-	return real_forwardee(old);
-    }
-
-    new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
-				       old, m, sz);
-
-    if (new_obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
-        // is incorrectly set. In any case, its seriously wrong to be here!
-        vm_exit_out_of_memory(sz*wordSize, "promotion");
-      }
-      // promotion failed, forward to self
-      _promotion_failed = true;
-      new_obj = old;
-
-      preserve_mark_if_necessary(old, m);
-    }
-
-    old->forward_to(new_obj);
-    forward_ptr = NULL;
-  } else {
-    // Is in to-space; do copying ourselves.
-    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    forward_ptr = old->forward_to_atomic(new_obj);
-    // Restore the mark word copied above.
-    new_obj->set_mark(m);
-    // Increment age if obj still in new generation
-    new_obj->incr_age();
-    par_scan_state->age_table()->add(new_obj, sz);
-  }
-  assert(new_obj != NULL, "just checking");
-
-  if (forward_ptr == NULL) {
-#ifdef JVMPI_SUPPORT
-    if (Universe::jvmpi_move_event_enabled() && (new_obj != old)) {
-      Universe::jvmpi_object_move(old, new_obj);
-    }
-#endif // JVMPI_SUPPORT
-    oop obj_to_push = new_obj;
-    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
-      // Length field used as index of next element to be scanned.
-      // Real length can be obtained from real_forwardee()
-      arrayOop(old)->set_length(0);
-      obj_to_push = old;
-      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
-             "push forwarded object");
-    }
-    // Push it on one of the queues of to-be-scanned objects.
-    if (!par_scan_state->work_queue()->push(obj_to_push)) {
-      // Add stats for overflow pushes.
-      if (Verbose && PrintGCDetails) {
-        gclog_or_tty->print("queue overflow!\n");
-      }
-      push_on_overflow_list(old);
-      par_scan_state->note_overflow_push();
-    }
-    par_scan_state->note_push();
-
-    return new_obj;
-  } 
-
-  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
-  // allocate it?
-  if (is_in_reserved(new_obj)) {
-    // Must be in to_space.
-    assert(to()->is_in_reserved(new_obj), "Checking");
-    if (forward_ptr == ClaimedForwardPtr) {
-      // Wait to get the real forwarding pointer value.
-      forward_ptr = real_forwardee(old);
-    }
-    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
-  }
-
-  return forward_ptr;
-}
-
-
-// Multiple GC threads may try to promote the same object.  If two
-// or more GC threads copy the object, only one wins the race to install
-// the forwarding pointer.  The other threads have to undo their copy.
-
-oop ParNewGeneration::copy_to_survivor_space_with_undo(
-#ifdef JVMPI_SUPPORT
-	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m,
-	bool jvmpi_slow_alloc) {
-#else // !JVMPI_SUPPORT
-	ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
-#endif // JVMPI_SUPPORT
-
-  // In the sequential version, this assert also says that the object is
-  // not forwarded.  That might not be the case here.  It is the case that
-  // the caller observed it to be not forwarded at some time in the past.
-  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
-
-  // The sequential code read "old->age()" below.  That doesn't work here,
-  // since the age is in the mark word, and that might be overwritten with
-  // a forwarding pointer by a parallel thread.  So we must save the mark
-  // word here, install it in a local oopDesc, and then analyze it.
-  oopDesc dummyOld;
-  dummyOld.set_mark(m);
-  assert(!dummyOld.is_forwarded(),
-	 "should not be called with forwarding pointer mark word.");
-  
-  bool failed_to_promote = false;
-  oop new_obj = NULL;
-  oop forward_ptr;
-
-#ifdef JVMPI_SUPPORT
-  // Try allocating obj in to-space (unless too old or won't fit or JVMPI
-  // enabled)
-  if (dummyOld.age() < tenuring_threshold() &&
-      !jvmpi_slow_alloc) {
-      //!Universe::jvmpi_slow_allocation()
-#else // !JVMPI_SUPPORT
-  // Try allocating obj in to-space (unless too old)
-  if (dummyOld.age() < tenuring_threshold()) {
-#endif // JVMPI_SUPPORT
-    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
-    if (new_obj == NULL) {
-      set_survivor_overflow(true);
-    }
-  }
-
-  if (new_obj == NULL) {
-    // Either to-space is full or we decided to promote
-    // try allocating obj tenured
-    new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
-				       old, m, sz);
-
-    if (new_obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio
-        // flag is incorrectly set. In any case, its seriously wrong to be
-        // here!
-        vm_exit_out_of_memory(sz*wordSize, "promotion");
-      }
-      // promotion failed, forward to self
-      forward_ptr = old->forward_to_atomic(old);
-      new_obj = old;
-
-      if (forward_ptr != NULL) {
-        return forward_ptr;   // someone else succeeded
-      }
-
-      _promotion_failed = true;
-      failed_to_promote = true;
-
-      preserve_mark_if_necessary(old, m);
-    }
-  } else {
-    // Is in to-space; do copying ourselves.
-    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    // Restore the mark word copied above.
-    new_obj->set_mark(m);
-    // Increment age if new_obj still in new generation
-    new_obj->incr_age();
-    par_scan_state->age_table()->add(new_obj, sz);
-  }
-  assert(new_obj != NULL, "just checking");
-
-  // Now attempt to install the forwarding pointer (atomically).
-  // We have to copy the mark word before overwriting with forwarding
-  // ptr, so we can restore it below in the copy.
-  if (!failed_to_promote) {
-    forward_ptr = old->forward_to_atomic(new_obj);
-  }
-
-  if (forward_ptr == NULL) {
-#ifdef JVMPI_SUPPORT
-    if (Universe::jvmpi_move_event_enabled() && (new_obj != old)) {
-      Universe::jvmpi_object_move(old, new_obj);
-    }
-#endif // JVMPI_SUPPORT
-    oop obj_to_push = new_obj;
-    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
-      // Length field used as index of next element to be scanned.
-      // Real length can be obtained from real_forwardee()
-      arrayOop(old)->set_length(0);
-      obj_to_push = old;
-      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
-             "push forwarded object");
-    }
-    // Push it on one of the queues of to-be-scanned objects.
-    if (!par_scan_state->work_queue()->push(obj_to_push)) {
-      // Add stats for overflow pushes.
-      push_on_overflow_list(old);
-      par_scan_state->note_overflow_push();
-    }
-    par_scan_state->note_push();
-
-    return new_obj;
-  } 
-
-  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
-  // allocate it?
-  if (is_in_reserved(new_obj)) {
-    // Must be in to_space.
-    assert(to()->is_in_reserved(new_obj), "Checking");
-    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
-  } else {
-    assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
-    _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
-                                      (HeapWord*)new_obj, sz);
-  }
-
-  return forward_ptr;
-}
-
-void ParNewGeneration::push_on_overflow_list(oop from_space_obj) {
-  oop cur_overflow_list = _overflow_list;
-  // if the object has been forwarded to itself, then we cannot
-  // use the klass pointer for the linked list.  Instead we have
-  // to allocate an oopDesc in the C-Heap and use that for the linked list.
-  if (from_space_obj->forwardee() == from_space_obj) {
-    oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
-    listhead->forward_to(from_space_obj);
-    from_space_obj = listhead;
-  }
-  while (true) {
-    from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
-    oop observed_overflow_list =
-      (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
-    if (observed_overflow_list == cur_overflow_list) break;
-    // Otherwise...
-    cur_overflow_list = observed_overflow_list;
-  }
-}
-
-bool
-ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
-  ObjToScanQueue* work_q = par_scan_state->work_queue();
-  // How many to take?
-  int objsFromOverflow = MIN2(work_q->max_elems()/4,
-			      (juint)ParGCDesiredObjsFromOverflowList);
-
-  if (_overflow_list == NULL) return false;
-
-  // Otherwise, there was something there; try claiming the list.
-  oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
-
-  if (prefix == NULL) {
-    return false;
-  }
-  // Trim off a prefix of at most objsFromOverflow items
-  int i = 1;
-  oop cur = prefix;
-  while (i < objsFromOverflow && cur->klass() != NULL) {
-    i++; cur = oop(cur->klass());
-  }
-
-  // Reattach remaining (suffix) to overflow list
-  if (cur->klass() != NULL) {
-    oop suffix = oop(cur->klass());
-    cur->set_klass_to_list_ptr(NULL);
-
-    // Find last item of suffix list
-    oop last = suffix;
-    while (last->klass() != NULL) {
-      last = oop(last->klass());
-    }
-    // Atomically prepend suffix to current overflow list
-    oop cur_overflow_list = _overflow_list;
-    while (true) {
-      last->set_klass_to_list_ptr(cur_overflow_list);
-      oop observed_overflow_list =
-        (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
-      if (observed_overflow_list == cur_overflow_list) break;
-      // Otherwise...
-      cur_overflow_list = observed_overflow_list;
-    }
-  }
-
-  // Push objects on prefix list onto this thread's work queue
-  assert(cur != NULL, "program logic");
-  cur = prefix;
-  int n = 0;
-  while (cur != NULL) {
-    oop obj_to_push = cur->forwardee();
-    oop next        = oop(cur->klass());
-    cur->set_klass(obj_to_push->klass());
-    if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
-      obj_to_push = cur;
-      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
-    }
-    work_q->push(obj_to_push);
-    cur = next;
-    n++;
-  }
-  par_scan_state->note_overflow_refill(n);
-  return true;
-}
-
-void ParNewGeneration::ref_processor_init()
-{
-  if (_ref_processor == NULL) {
-    // Allocate and initialize a reference processor
-    _ref_processor = ReferenceProcessor::create_ref_processor(
-        _reserved,                  // span
-        refs_discovery_is_atomic(), // atomic_discovery
-        refs_discovery_is_mt(),     // mt_discovery
-        &_is_alive_closure,
-        ParallelGCThreads,
-        ParallelRefProcEnabled);
-  }
-}
-
-const char* ParNewGeneration::name() const {
-  return "par new generation";
-}
--- a/hotspot/src/share/vm/memory/parNewGeneration.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,414 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)parNewGeneration.hpp	1.47 07/05/05 17:05:54 JVM"
-#endif
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class ChunkArray;
-class ParScanWithoutBarrierClosure;
-class ParScanWithBarrierClosure;
-class ParRootScanWithoutBarrierClosure;
-class ParRootScanWithBarrierTwoGensClosure;
-class ParEvacuateFollowersClosure;
-
-// It would be better if these types could be kept local to the .cpp file,
-// but they must be here to allow ParScanClosure::do_oop_work to be defined 
-// in genOopClosures.inline.hpp.
-
-
-typedef OopTaskQueue    ObjToScanQueue;
-typedef OopTaskQueueSet ObjToScanQueueSet;
-
-// Enable this to get push/pop/steal stats.
-const int PAR_STATS_ENABLED = 0;
-
-class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
-  ParScanWeakRefClosure* _par_cl;
- public:
-  ParKeepAliveClosure(ParScanWeakRefClosure* cl);
-  void do_oop(oop* p);
-};
-
-// The state needed by thread performing parallel young-gen collection.
-class ParScanThreadState {
-  friend class ParScanThreadStateSet;
-  ObjToScanQueue *_work_queue;
-
-  ParGCAllocBuffer _to_space_alloc_buffer;
-
-  ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
-  ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
-  ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
-  // One of these two will be passed to process_strong_roots, which will
-  // set its generation.  The first is for two-gen configs where the
-  // old gen collects the perm gen; the second is for arbitrary configs.
-  // The second isn't used right now (it used to be used for the train, an
-  // incremental collector) but the declaration has been left as a reminder.
-  ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
-  // This closure will always be bound to the old gen; it will be used
-  // in evacuate_followers.
-  ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
-  ParEvacuateFollowersClosure          _evacuate_followers;
-  DefNewGeneration::IsAliveClosure     _is_alive_closure;
-  ParScanWeakRefClosure                _scan_weak_ref_closure;
-  ParKeepAliveClosure                  _keep_alive_closure;
-  
-
-  Space* _to_space;
-  Space* to_space() { return _to_space; }
-
-  Generation* _old_gen;
-  Generation* old_gen() { return _old_gen; }
-
-  HeapWord *_young_old_boundary;
-
-  int _hash_seed;
-  int _thread_num;
-  ageTable _ageTable;
-
-  bool _to_space_full;
-
-  int _pushes, _pops, _steals, _steal_attempts, _term_attempts;
-  int _overflow_pushes, _overflow_refills, _overflow_refill_objs;
-
-  // Timing numbers.
-  double _start;
-  double _start_strong_roots;
-  double _strong_roots_time;
-  double _start_term;
-  double _term_time;
-
-  // Helper for trim_queues. Scans subset of an array and makes
-  // remainder available for work stealing.
-  void scan_partial_array_and_push_remainder(oop obj);
-
-  // In support of CMS' parallel rescan of survivor space.
-  ChunkArray* _survivor_chunk_array;
-  ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
-
-  void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
-
-  ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, 
-                     Generation* old_gen_, int thread_num_,
-                     ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_,
-                     ParallelTaskTerminator& term_);
-
-public:
-  ageTable* age_table() {return &_ageTable;}
-  
-  ObjToScanQueue* work_queue() { return _work_queue; }
-
-  ParGCAllocBuffer* to_space_alloc_buffer() {
-    return &_to_space_alloc_buffer;
-  }
-  
-  ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
-  DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
-  ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
-  ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
-  ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
-  ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
-
-  // Decrease queue size below "max_size".
-  void trim_queues(int max_size);
-
-  // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
-  inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
-
-  int* hash_seed()  { return &_hash_seed; }
-  int  thread_num() { return _thread_num; }
-
-  // Allocate a to-space block of size "sz", or else return NULL.
-  HeapWord* alloc_in_to_space_slow(size_t word_sz);
-
-  HeapWord* alloc_in_to_space(size_t word_sz) {
-    HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
-    if (obj != NULL) return obj;
-    else return alloc_in_to_space_slow(word_sz);
-  }
-
-  HeapWord* young_old_boundary() { return _young_old_boundary; }
-
-  void set_young_old_boundary(HeapWord *boundary) {
-    _young_old_boundary = boundary;
-  }
-
-  // Undo the most recent allocation ("obj", of "word_sz").
-  void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
-
-  int pushes() { return _pushes; }
-  int pops()   { return _pops; }
-  int steals() { return _steals; }
-  int steal_attempts() { return _steal_attempts; }
-  int term_attempts()  { return _term_attempts; }
-  int overflow_pushes() { return _overflow_pushes; }
-  int overflow_refills() { return _overflow_refills; }
-  int overflow_refill_objs() { return _overflow_refill_objs; }
-
-  void note_push()  { if (PAR_STATS_ENABLED) _pushes++; }
-  void note_pop()   { if (PAR_STATS_ENABLED) _pops++; }
-  void note_steal() { if (PAR_STATS_ENABLED) _steals++; }
-  void note_steal_attempt() { if (PAR_STATS_ENABLED) _steal_attempts++; }
-  void note_term_attempt()  { if (PAR_STATS_ENABLED) _term_attempts++; }
-  void note_overflow_push() { if (PAR_STATS_ENABLED) _overflow_pushes++; }
-  void note_overflow_refill(int objs) {
-    if (PAR_STATS_ENABLED) {
-      _overflow_refills++;
-      _overflow_refill_objs += objs;
-    }
-  }
-
-  void start_strong_roots() {
-    _start_strong_roots = os::elapsedTime();
-  }
-  void end_strong_roots() {
-    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
-  }
-  double strong_roots_time() { return _strong_roots_time; }
-  void start_term_time() {
-    note_term_attempt();
-    _start_term = os::elapsedTime();
-  }
-  void end_term_time() {
-    _term_time += (os::elapsedTime() - _start_term);
-  }
-  double term_time() { return _term_time; }
-
-  double elapsed() {
-    return os::elapsedTime() - _start;
-  }
-
-};
-
-class ParNewGenTask: public AbstractGangTask {
-  ParNewGeneration* _gen;
-  Generation* _next_gen;
-  HeapWord* _young_old_boundary;
-  class ParScanThreadStateSet* _state_set;
-
-public:
-  ParNewGenTask(ParNewGeneration*      gen, 
-                Generation*            next_gen,
-		HeapWord*              young_old_boundary, 
-                ParScanThreadStateSet* state_set);
-
-  HeapWord* young_old_boundary() { return _young_old_boundary; }
-
-  void work(int i);
-};
-
-class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
- public:
-  KeepAliveClosure(ScanWeakRefClosure* cl);
-  void do_oop(oop* p);
-};
-
-class EvacuateFollowersClosureGeneral: public VoidClosure {
-    GenCollectedHeap* _gch;
-    int _level;
-    OopsInGenClosure* _scan_cur_or_nonheap;
-    OopsInGenClosure* _scan_older;
-  public:
-    EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
-                                    OopsInGenClosure* cur,
-                                    OopsInGenClosure* older);
-    void do_void();
-};
-
-// Closure for scanning ParNewGeneration.
-// Same as ScanClosure, except does parallel GC barrier.
-class ScanClosureWithParBarrier: public ScanClosure {
-public:
-  ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
-  void do_oop(oop* p);
-};
-
-// Implements AbstractRefProcTaskExecutor for ParNew.
-class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
-public:
-
-  ParNewRefProcTaskExecutor(ParNewGeneration& generation,
-                            ParScanThreadStateSet& state_set)
-    : _generation(generation), _state_set(state_set)
-  { }
-  
-  // Executes a task using worker threads.  
-  virtual void execute(ProcessTask& task);
-  virtual void execute(EnqueueTask& task);
-  // Switch to single threaded mode.
-  virtual void set_single_threaded_mode();
-private:
-  ParNewGeneration&      _generation;
-  ParScanThreadStateSet& _state_set;
-};
-
-
-// A Generation that does parallel young-gen collection.
-
-class ParNewGeneration: public DefNewGeneration {
-  friend class ParNewGenTask;
-  friend class ParNewRefProcTask;
-  friend class ParNewRefProcTaskExecutor;
-  friend class ParScanThreadStateSet;
-
-  // XXX use a global constant instead of 64!
-  struct ObjToScanQueuePadded {
-        ObjToScanQueue work_queue;
-        char pad[64 - sizeof(ObjToScanQueue)];  // prevent false sharing
-  };
-
-  // The per-thread work queues, available here for stealing.
-  ObjToScanQueueSet* _task_queues;
-
-  // Desired size of survivor space plab's
-  PLABStats _plab_stats;
-
-  // A list of from-space images of to-be-scanned objects, threaded through 
-  // klass-pointers (klass information already copied to the forwarded
-  // image.)  Manipulated with CAS.
-  oop _overflow_list;
-
-  // If true, older generation does not support promotion undo, so avoid.
-  static bool _avoid_promotion_undo;
-  
-  // This closure is used by the reference processor to filter out
-  // references to live referent.
-  DefNewGeneration::IsAliveClosure _is_alive_closure;
-
-  static oop real_forwardee_slow(oop obj);
-  static void waste_some_time();
-
-  // Preserve the mark of "obj", if necessary, in preparation for its mark 
-  // word being overwritten with a self-forwarding-pointer. 
-  void preserve_mark_if_necessary(oop obj, markOop m);
-
- protected:
-
-  bool _survivor_overflow;
-
-  bool avoid_promotion_undo() { return _avoid_promotion_undo; }
-  void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
-
-  bool survivor_overflow() { return _survivor_overflow; }
-  void set_survivor_overflow(bool v) { _survivor_overflow = v; }
-
-  // Adjust the tenuring threshold.  See the implementation for
-  // the details of the policy.
-  virtual void adjust_desired_tenuring_threshold();
-
-public:
-  ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
-
-  ~ParNewGeneration() {
-    for (uint i = 0; i < ParallelGCThreads; i++)
-        delete _task_queues->queue(i);
-
-    delete _task_queues;
-  }
-
-  virtual void ref_processor_init();
-  virtual Generation::Name kind()        { return Generation::ParNew; }
-  virtual const char* name() const;
-  virtual const char* short_name() const { return "ParNew"; }
-
-  // override
-  virtual bool refs_discovery_is_mt()     const {
-    assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
-    return ParallelGCThreads > 1;
-  }
-
-  // Make the collection virtual.
-  virtual void collect(bool   full,
-                       bool   clear_all_soft_refs,
-                       size_t size, 
-                       bool   is_tlab);
-
-  // This needs to be visible to the closure function.
-  // "obj" is the object to be copied, "m" is a recent value of its mark
-  // that must not contain a forwarding pointer (though one might be
-  // inserted in "obj"s mark word by a parallel thread).
-  inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
-#ifdef JVMPI_SUPPORT
-			     oop obj, size_t obj_sz, markOop m,
-			     bool jvmpi_slow_alloc) {
-#else // !JVMPI_SUPPORT
-			     oop obj, size_t obj_sz, markOop m) {
-#endif // JVMPI_SUPPORT
-    if (_avoid_promotion_undo) {
-       return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
-#ifdef JVMPI_SUPPORT
-                                         		     obj, obj_sz, m, jvmpi_slow_alloc);
-#else // !JVMPI_SUPPORT
-                                         		     obj, obj_sz, m);
-#endif // JVMPI_SUPPORT
-    }
-
-#ifdef JVMPI_SUPPORT
-    return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m, jvmpi_slow_alloc);
-#else // !JVMPI_SUPPORT
-    return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
-#endif // JVMPI_SUPPORT
-  }
-
-  oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
-#ifdef JVMPI_SUPPORT
-			     oop obj, size_t obj_sz, markOop m,
-			     bool jvmpi_slow_alloc);
-#else // !JVMPI_SUPPORT
-			     oop obj, size_t obj_sz, markOop m);
-#endif // JVMPI_SUPPORT
-
-  oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
-#ifdef JVMPI_SUPPORT
-			     oop obj, size_t obj_sz, markOop m,
-			     bool jvmpi_slow_alloc);
-#else // !JVMPI_SUPPORT
-			     oop obj, size_t obj_sz, markOop m);
-#endif // JVMPI_SUPPORT
-
-  // Push the given (from-space) object on the global overflow list.
-  void push_on_overflow_list(oop from_space_obj);
-
-  // If the global overflow list is non-empty, move some tasks from it
-  // onto "work_q" (which must be empty).  No more than 1/4 of the
-  // max_elems of "work_q" are moved.
-  bool take_from_overflow_list(ParScanThreadState* par_scan_state);
-
-  // The task queues to be used by parallel GC threads.
-  ObjToScanQueueSet* task_queues() {
-    return _task_queues;
-  }
-
-  PLABStats* plab_stats() {
-    return &_plab_stats;
-  }
-
-  size_t desired_plab_sz() {
-    return _plab_stats.desired_plab_sz();
-  }
-
-  static oop real_forwardee(oop obj);
-
-  DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
-};
--- a/hotspot/src/share/vm/memory/permGen.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/permGen.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)permGen.cpp	1.52 07/05/05 17:05:49 JVM"
+#pragma ident "@(#)permGen.cpp	1.53 07/05/17 15:55:06 JVM"
 #endif
 /*
  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -51,15 +51,6 @@
   HeapWord* obj = _gen->allocate(size, false);
   bool tried_collection = false;
   bool tried_expansion = false;
-  // Set the value of prev_capacity to 0 (a value the capacity
-  // will never have) so that the value of prev_capactiy is
-  // captured just before  any attempt to expand.  Currently there
-  // is a single place in this loop where an expansion is attempted.
-  // If any other expansions are added, the value of prev_capacity
-  // should be captured before the expansion.  Also consider whether
-  // the check _gen->capacity() == prev_capacity is still appropriate
-  // for a bail out from the loop.
-  size_t prev_capacity = 0;
   while (obj == NULL) {
     if (_gen->capacity() >= _capacity_expansion_limit || tried_expansion) {
       // Expansion limit reached, try collection before expanding further
@@ -67,29 +58,26 @@
       SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
       obj = _gen->allocate(size, false);
       tried_collection = true;
-      tried_expansion =  false;    // since collection; allows eventual
-                                   // expansion to max_capacity
+      tried_expansion =  false;    // ... following the collection:
+                                   // the collection may have shrunk the space.
     }
     if (obj == NULL && !tried_expansion) {
-      prev_capacity = _gen->capacity();
       obj = _gen->expand_and_allocate(size, false);
       tried_expansion = true;
     }
     if (obj == NULL && tried_collection && tried_expansion) {
-      assert(_gen->capacity() <= _gen->max_capacity(), "Invariant");
-      if (_gen->capacity() == prev_capacity) {
-        // We have reached our maximum size but not been able to
-        // allocate or we have tried to expand and have not been
-	// successful; we now make a last-ditch collection attempt that
-        // will try to reclaim as much space as possible; if even
-        // that does not succeed in freeing space to accomodate
-        // the allocation then we are truly out of space. 
-        SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
-        obj = _gen->allocate(size, false);
-        break;
+      // We have not been able to allocate despite a collection and
+      // an attempted space expansion. We now make a last-ditch collection
+      // attempt that will try to reclaim as much space as possible (for
+      // example by aggressively clearing all soft refs).
+      SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
+      obj = _gen->allocate(size, false);
+      if (obj == NULL) {
+        // An expansion attempt is necessary since the previous
+        // collection may have shrunk the space.
+        obj = _gen->expand_and_allocate(size, false);
       }
-      // Else we have not yet reached the max size, go around the
-      // loop once more.
+      break;
     }
   }
   return obj;
@@ -141,39 +129,32 @@
   // Since we want to minimize pause times, we will prefer
   // expanding the perm gen rather than doing a stop-world
   // collection to satisfy the allocation request.
-  NOT_PRODUCT(
-    char buf[64];
-    sprintf(buf, " trying to allocate %d bytes", size);
-    CMSLoopCountWarn loopX("CMSPermGen::mem_allocate", buf, 10);
-  )
-  for (; obj == NULL; NOT_PRODUCT(loopX.tick())) {
-    size_t prev_capacity = _gen->capacity();
+  if (obj == NULL) {
+    // Try to expand the perm gen and allocate space.
     obj = _gen->expand_and_allocate(size, false, false);
-    if ((obj == NULL) && (_gen->capacity() == prev_capacity)) {
-      // We have reached the max capacity limit or the expansion
-      // failed to increase the capacity, but have not been
-      // able to satisfy the allocation request. Let's see if a
-      // stop-world collection will free up enough space.
+    if (obj == NULL) {
+      // Let's see if a normal stop-world full collection will
+      // free up enough space.
       SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
       obj = _gen->allocate(size, false);
       if (obj == NULL) {
-        // We have reached our maximum size, and have tried a
-        // stop-world collection, but have not been able to allocate.
-        // We now make a last-ditch collection attempt that will try
-        // to reclaim as much space as possible. If even that does
-        // not succeed in freeing space to accomodate the allocation
-        // then we are truly out of space. 
+        // The collection above may have shrunk the space, so try
+        // to expand again and allocate space.
+        obj = _gen->expand_and_allocate(size, false, false);
+      }
+      if (obj == NULL) {
+        // We have not been able to allocate space despite a
+        // full stop-world collection. We now make a last-ditch collection
+        // attempt (in which soft refs are all aggressively freed)
+        // that will try to reclaim as much space as possible.
         SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
         obj = _gen->allocate(size, false);
-        if (_gen->capacity() == prev_capacity) {
-          break;
-        } 
-        // Else we can try expanding the heap again
+        if (obj == NULL) {
+          // Expand generation in case it was shrunk following the collection.
+          obj = _gen->expand_and_allocate(size, false, false);
+        }
       }
     }
-    // Else it's possible that another thread stole from the space
-    // we expanded; we have not yet reached the max size, go around
-    // the loop once more.
   }
   return obj;
 }
--- a/hotspot/src/share/vm/memory/placeholders.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,275 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)placeholders.cpp	1.19 07/05/05 17:05:53 JVM"
-#endif
-/*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_placeholders.cpp.incl"
-
-// Placeholder methods
-
-PlaceholderEntry* PlaceholderTable::new_entry(int hash, symbolOop name,
-                                              oop loader, bool havesupername, 
-                                              symbolOop supername) {
-  PlaceholderEntry* entry = (PlaceholderEntry*)Hashtable::new_entry(hash, name);
-  entry->set_loader(loader);
-  entry->set_havesupername(havesupername);
-  entry->set_supername(supername);
-  entry->set_superThreadQ(NULL);
-  entry->set_loadInstanceThreadQ(NULL);
-  entry->set_defineThreadQ(NULL);
-  entry->set_definer(NULL);
-  entry->set_instanceKlass(NULL);
-  return entry;
-}
-
-
-// Placeholder objects represent classes currently being loaded.
-// All threads examining the placeholder table must hold the
-// SystemDictionary_lock, so we don't need special precautions
-// on store ordering here.
-void PlaceholderTable::add_entry(int index, unsigned int hash,
-                                 symbolHandle class_name, Handle class_loader,
-                                 bool havesupername, symbolHandle supername){
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  assert(!class_name.is_null(), "adding NULL obj");
-
-  // Both readers and writers are locked so it's safe to just
-  // create the placeholder and insert it in the list without a membar.
-  PlaceholderEntry* entry = new_entry(hash, class_name(), class_loader(), havesupername, supername());
-  add_entry(index, entry);
-}
-
-
-// Remove a placeholder object. 
-void PlaceholderTable::remove_entry(int index, unsigned int hash,
-                                    symbolHandle class_name, 
-                                    Handle class_loader) {
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  PlaceholderEntry** p = bucket_addr(index);
-  while (*p) {
-    PlaceholderEntry *probe = *p;
-    if (probe->hash() == hash && probe->equals(class_name(), class_loader())) {
-      // Delete entry
-      *p = probe->next();
-      free_entry(probe);
-      return;
-    }
-    p = probe->next_addr();
-  }
-}
-
-PlaceholderEntry* PlaceholderTable::get_entry(int index, unsigned int hash,
-                                       symbolHandle class_name,
-                                       Handle class_loader) {
-  assert_locked_or_safepoint(SystemDictionary_lock);
-
-  symbolOop class_name_ = class_name();
-  oop class_loader_ = class_loader();
-
-  for (PlaceholderEntry *place_probe = bucket(index);
-                         place_probe != NULL;
-                         place_probe = place_probe->next()) {
-    if (place_probe->hash() == hash &&
-        place_probe->equals(class_name_, class_loader_)) {
-      return place_probe;
-    }
-  }
-  return NULL;
-}
-
-symbolOop PlaceholderTable::find_entry(int index, unsigned int hash,
-                                       symbolHandle class_name,
-                                       Handle class_loader) {
-  PlaceholderEntry* probe = get_entry(index, hash, class_name, class_loader);
-  return (probe? probe->klass(): symbolOop(NULL));
-}
-
-  // find_and_add returns probe pointer - old or new
-  // If no entry exists, add a placeholder entry 
-  // If entry exists, reuse entry 
-  // For both, push SeenThread for classloadAction
-  // if havesupername: this is used for circularity for instanceklass loading
-PlaceholderEntry* PlaceholderTable::find_and_add(int index, unsigned int hash, symbolHandle name, Handle loader, classloadAction action, symbolHandle supername, Thread* thread) {
-  PlaceholderEntry* probe = get_entry(index, hash, name, loader);
-  if (probe == NULL) {
-    // Nothing found, add place holder
-    add_entry(index, hash, name, loader, (action == LOAD_SUPER), supername);
-    probe = get_entry(index, hash, name, loader);
-  } else {
-    if (action == LOAD_SUPER) {
-      probe->set_havesupername(true);
-      probe->set_supername(supername());
-    }
-  }
-  if (probe) probe->add_seen_thread(thread, action);
-  return probe;
-}
-
-
-// placeholder used to track class loading internal states
-// placeholder existence now for loading superclass/superinterface
-// superthreadQ tracks class circularity, while loading superclass/superinterface
-// loadInstanceThreadQ tracks load_instance_class calls
-// definer() tracks the single thread that owns define token
-// defineThreadQ tracks waiters on defining thread's results
-// 1st claimant creates placeholder
-// find_and_add adds SeenThread entry for appropriate queue
-// All claimants remove SeenThread after completing action
-// On removal: if definer and all queues empty, remove entry
-// Note: you can be in both placeholders and systemDictionary
-// see parse_stream for redefine classes
-// Therefore - must always check SD first
-// Ignores the case where entry is not found
-void PlaceholderTable::find_and_remove(int index, unsigned int hash,
-                       symbolHandle name, Handle loader, Thread* thread) {
-    assert_locked_or_safepoint(SystemDictionary_lock);
-    PlaceholderEntry *probe = get_entry(index, hash, name, loader);
-    if (probe != NULL) {
-       // No other threads using this entry
-       if ((probe->superThreadQ() == NULL) && (probe->loadInstanceThreadQ() == NULL)
-          && (probe->defineThreadQ() == NULL) && (probe->definer() == NULL)) {
-         remove_entry(index, hash, name, loader);
-       }
-    }
-  }
-
-PlaceholderTable::PlaceholderTable(int table_size)
-    : TwoOopHashtable(table_size, sizeof(PlaceholderEntry)) {
-}
-
-
-void PlaceholderTable::oops_do(OopClosure* f) {
-  for (int index = 0; index < table_size(); index++) {
-    for (PlaceholderEntry* probe = bucket(index); 
-                           probe != NULL; 
-                           probe = probe->next()) {
-      probe->oops_do(f);
-    }
-  }
-}
-
-
-void PlaceholderEntry::oops_do(OopClosure* blk) {
-  assert(klass() != NULL, "should have a non-null klass");
-  blk->do_oop((oop*)klass_addr());
-  if (_loader != NULL) {
-    blk->do_oop(loader_addr());
-  }
-  if (_supername != NULL) {
-    blk->do_oop((oop*)supername_addr());
-  }
-  if (_instanceKlass != NULL) {
-    blk->do_oop((oop*)instanceKlass_addr());
-  }
-}
-
-// do all entries in the placeholder table
-void PlaceholderTable::entries_do(void f(symbolOop, oop)) {
-  for (int index = 0; index < table_size(); index++) {
-    for (PlaceholderEntry* probe = bucket(index); 
-                           probe != NULL; 
-                           probe = probe->next()) {
-      f(probe->klass(), probe->loader());             
-    }
-  }
-}
-
-
-#ifndef PRODUCT
-// Note, doesn't append a cr
-void PlaceholderEntry::print() const {
-  klass()->print_value();
-  if (loader() != NULL) {
-    tty->print(", loader ");
-    loader()->print_value();
-  }
-  if (supername() != NULL) {
-    tty->print(", supername ");
-    supername()->print_value();
-  }
-  if (definer() != NULL) {
-    tty->print(", definer ");
-    definer()->print_value();
-  }
-  if (instanceKlass() != NULL) {
-    tty->print(", instanceKlass ");
-    instanceKlass()->print_value();
-  }
-  tty->print("\n");
-  tty->print("loadInstanceThreadQ threads:");
-  loadInstanceThreadQ()->printActionQ();
-  tty->print("\n");
-  tty->print("superThreadQ threads:");
-  superThreadQ()->printActionQ();
-  tty->print("\n");
-  tty->print("defineThreadQ threads:");
-  defineThreadQ()->printActionQ();
-  tty->print("\n");
-}
-#endif
-
-
-void PlaceholderEntry::verify() const {
-  guarantee(loader() == NULL || loader()->is_instance(), 
-            "checking type of _loader");
-  guarantee(instanceKlass() == NULL || instanceKlass()->is_instance(),
-            "checking type of instanceKlass result");
-  klass()->verify();
-}
-
-
-void PlaceholderTable::verify() {
-  int element_count = 0;
-  for (int pindex = 0; pindex < table_size(); pindex++) {
-    for (PlaceholderEntry* probe = bucket(pindex); 
-                           probe != NULL; 
-                           probe = probe->next()) {
-      probe->verify();
-      element_count++;  // both klasses and place holders count
-    }
-  }
-  guarantee(number_of_entries() == element_count,
-            "Verify of system dictionary failed");
-}
-
-
-#ifndef PRODUCT
-void PlaceholderTable::print() {
-  for (int pindex = 0; pindex < table_size(); pindex++) {    
-    for (PlaceholderEntry* probe = bucket(pindex);
-                           probe != NULL; 
-                           probe = probe->next()) {
-      if (Verbose) tty->print("%4d: ", pindex);
-      tty->print(" place holder ");
-
-      probe->print();
-      tty->cr();
-    }
-  }
-}
-#endif
-
-
--- a/hotspot/src/share/vm/memory/placeholders.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,336 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)placeholders.hpp	1.21 07/05/05 17:05:54 JVM"
-#endif
-/*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class PlaceholderEntry;
-
-// Placeholder objects. These represent classes currently
-// being loaded, as well as arrays of primitives.
-//
-
-class PlaceholderTable : public TwoOopHashtable {
-  friend class VMStructs;
-
-public:
-  PlaceholderTable(int table_size);
-
-  PlaceholderEntry* new_entry(int hash, symbolOop name, oop loader, bool havesupername, symbolOop supername);
-
-  PlaceholderEntry* bucket(int i) {
-    return (PlaceholderEntry*)Hashtable::bucket(i);
-  }
-
-  PlaceholderEntry** bucket_addr(int i) {
-    return (PlaceholderEntry**)Hashtable::bucket_addr(i);
-  }
-
-  void add_entry(int index, PlaceholderEntry* new_entry) {
-    Hashtable::add_entry(index, (HashtableEntry*)new_entry);
-  }
-
-  void add_entry(int index, unsigned int hash, symbolHandle name, 
-                Handle loader, bool havesupername, symbolHandle supername);
-
-// This returns a symbolOop to match type for SystemDictionary
-  symbolOop find_entry(int index, unsigned int hash,
-                       symbolHandle name, Handle loader);
-
-  PlaceholderEntry* get_entry(int index, unsigned int hash,
-                       symbolHandle name, Handle loader);
-
-// caller to create a placeholder entry must enumerate an action
-// caller claims ownership of that action
-// For parallel classloading:
-// multiple LOAD_INSTANCE threads can proceed in parallel
-// multiple LOAD_SUPER threads can proceed in parallel
-// LOAD_SUPER needed to check for class circularity
-// DEFINE_CLASS: ultimately define class must be single threaded
-// on a class/classloader basis
-// so the head of that queue owns the token  
-// and the rest of the threads return the result the first thread gets
- enum classloadAction {
-    LOAD_INSTANCE = 1,             // calling load_instance_class
-    LOAD_SUPER = 2,                // loading superclass for this class
-    DEFINE_CLASS = 3               // find_or_define class
- };
-
-  // find_and_add returns probe pointer - old or new
-  // If no entry exists, add a placeholder entry and push SeenThread
-  // If entry exists, reuse entry and push SeenThread for classloadAction
-  PlaceholderEntry* find_and_add(int index, unsigned int hash, 
-                                 symbolHandle name, Handle loader, 
-                                 classloadAction action, symbolHandle supername, 
-                                 Thread* thread);
-
-  void remove_entry(int index, unsigned int hash,
-                    symbolHandle name, Handle loader);
-
-// Remove placeholder information
-  void find_and_remove(int index, unsigned int hash, 
-                       symbolHandle name, Handle loader, Thread* thread); 
-
-  // GC support.
-  void oops_do(OopClosure* f);
-
-  // JVMTI support
-  void entries_do(void f(symbolOop, oop));
-
-#ifndef PRODUCT
-  void print();
-#endif
-  void verify();
-};
-
-// SeenThread objects represent list of threads that are
-// currently performing a load action on a class.
-// For class circularity, set before loading a superclass.
-// For bootclasssearchpath, set before calling load_instance_class.
-// Defining must be single threaded on a class/classloader basis
-// For DEFINE_CLASS, the head of the queue owns the
-// define token and the rest of the threads wait to return the
-// result the first thread gets.
-class SeenThread: public CHeapObj {
-private:
-   Thread *_thread;
-   SeenThread* _stnext;
-   SeenThread* _stprev;
-public:
-   SeenThread(Thread *thread) {
-       _thread = thread;
-       _stnext = NULL;
-       _stprev = NULL;
-   }
-   Thread* thread()                const { return _thread;}
-   void set_thread(Thread *thread) { _thread = thread; }
-
-   SeenThread* next()              const { return _stnext;}
-   void set_next(SeenThread *seen) { _stnext = seen; }
-   void set_prev(SeenThread *seen) { _stprev = seen; }
-   
-#ifndef PRODUCT
-  void printActionQ() {
-    SeenThread* seen = this;
-    while (seen != NULL) {
-      seen->thread()->print_value();
-      tty->print(", ");
-      seen = seen->next();
-    }
-  }
-#endif // PRODUCT
-};
-
-// Placeholder objects represent classes currently being loaded.
-// All threads examining the placeholder table must hold the
-// SystemDictionary_lock, so we don't need special precautions
-// on store ordering here.
-// The system dictionary is the only user of this class.
-
-class PlaceholderEntry : public HashtableEntry {
-  friend class VMStructs;
-
-
- private:
-  oop               _loader;        // initiating loader
-  bool              _havesupername; // distinguish between null supername, and unknown
-  symbolOop         _supername;
-  Thread*           _definer;       // owner of define token
-  klassOop          _instanceKlass; // instanceKlass from successful define
-  SeenThread*       _superThreadQ;  // doubly-linked queue of Threads loading a superclass for this class
-  SeenThread*       _loadInstanceThreadQ;  // loadInstance thread 
-                                    // can be multiple threads if classloader object lock broken by application
-                                    // or if classloader supports parallel classloading
-                   
-  SeenThread*       _defineThreadQ; // queue of Threads trying to define this class
-                                    // including _definer
-                                    // _definer owns token 
-                                    // queue waits for and returns results from _definer
-
- public:
-  // Simple accessors, used only by SystemDictionary
-  symbolOop          klass()               const { return (symbolOop)literal(); }
-  symbolOop*         klass_addr()          { return (symbolOop*)literal_addr(); }
-
-  oop                loader()              const { return _loader; }
-  void               set_loader(oop loader) { _loader = loader; }
-  oop*               loader_addr()         { return &_loader; }
-
-  bool               havesupername()       const { return _havesupername; }
-  void               set_havesupername(bool havesupername) { _havesupername = havesupername; }
-
-  symbolOop          supername()           const { return _supername; }
-  void               set_supername(symbolOop supername) { _supername = supername; }
-  symbolOop*         supername_addr()      { return &_supername; }
-
-  Thread*            definer()             const {return _definer; }
-  void               set_definer(Thread* definer) { _definer = definer; }
-
-  klassOop           instanceKlass()     const {return _instanceKlass; }
-  void               set_instanceKlass(klassOop instanceKlass) { _instanceKlass = instanceKlass; }
-  klassOop*          instanceKlass_addr()   { return &_instanceKlass; }
-
-  SeenThread*        superThreadQ()        const { return _superThreadQ; }
-  void               set_superThreadQ(SeenThread* SeenThread) { _superThreadQ = SeenThread; }
-
-  SeenThread*        loadInstanceThreadQ() const { return _loadInstanceThreadQ; }
-  void               set_loadInstanceThreadQ(SeenThread* SeenThread) { _loadInstanceThreadQ = SeenThread; }
-
-  SeenThread*        defineThreadQ()        const { return _defineThreadQ; }
-  void               set_defineThreadQ(SeenThread* SeenThread) { _defineThreadQ = SeenThread; }
-
-  PlaceholderEntry* next() const {
-    return (PlaceholderEntry*)HashtableEntry::next();
-  }
-
-  PlaceholderEntry** next_addr() {
-    return (PlaceholderEntry**)HashtableEntry::next_addr();
-  }
-
-  // Test for equality
-  // Entries are unique for class/classloader name pair
-  bool equals(symbolOop class_name, oop class_loader) const {
-    return (klass() == class_name && loader() == class_loader);
-  }
-
-  SeenThread* actionToQueue(PlaceholderTable::classloadAction action) {
-    SeenThread* queuehead;
-    switch (action) {
-      case PlaceholderTable::LOAD_INSTANCE:
-         queuehead = _loadInstanceThreadQ;
-         break;
-      case PlaceholderTable::LOAD_SUPER:
-         queuehead = _superThreadQ;
-         break;
-      case PlaceholderTable::DEFINE_CLASS:
-         queuehead = _defineThreadQ;
-         break;
-      default: Unimplemented();
-    }
-    return queuehead;
-  }
-
-  void set_threadQ(SeenThread* seenthread, PlaceholderTable::classloadAction action) {
-    switch (action) {
-      case PlaceholderTable::LOAD_INSTANCE:
-         _loadInstanceThreadQ = seenthread;
-         break;
-      case PlaceholderTable::LOAD_SUPER:
-         _superThreadQ = seenthread;
-         break;
-      case PlaceholderTable::DEFINE_CLASS:
-         _defineThreadQ = seenthread;
-         break;
-      default: Unimplemented();
-    }
-    return;
-  }
-
-  bool super_load_in_progress() {
-     return (_superThreadQ != NULL);
-  } 
-
-  bool instance_load_in_progress() {
-    return (_loadInstanceThreadQ != NULL);
-  }
-
-  bool define_class_in_progress() {
-    return (_defineThreadQ != NULL);
-  }
-
-// Doubly-linked list of Threads per action for class/classloader pair
-// Class circularity support: links in thread before loading superclass
-// bootstrapsearchpath support: links in a thread before load_instance_class
-// definers: use as queue of define requestors, including owner of
-// define token. Appends for debugging of requestor order
-  void add_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
-    assert_lock_strong(SystemDictionary_lock);
-    SeenThread* threadEntry = new SeenThread(thread);
-    SeenThread* seen = actionToQueue(action);
-    
-    if (seen == NULL) {
-      set_threadQ(threadEntry, action);
-      return;
-    }
-    SeenThread* next;
-    while ((next = seen->next()) != NULL) {
-      seen = next;
-    }
-    seen->set_next(threadEntry);
-    threadEntry->set_prev(seen);
-    return;
-  }
-
-  bool check_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
-    assert_lock_strong(SystemDictionary_lock);
-    SeenThread* threadQ = actionToQueue(action);
-    SeenThread* seen = threadQ;
-    while (seen) {
-      if (thread == seen->thread()) {
-        return true;
-      }
-      seen = seen->next();
-    }
-    return false;
-  }
-
-  // returns true if seenthreadQ is now empty
-  // Note, caller must ensure probe still exists while holding
-  // SystemDictionary_lock
-  // ignores if cleanup has already been done
-  // if found, deletes SeenThread
-  bool remove_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
-    assert_lock_strong(SystemDictionary_lock);
-    SeenThread* threadQ = actionToQueue(action);
-    SeenThread* seen = threadQ;
-    SeenThread* prev = NULL;
-    while (seen) {
-      if (thread == seen->thread()) {
-        if (prev) {
-          prev->set_next(seen->next());
-        } else {
-          set_threadQ(seen->next(), action);
-        }
-        if (seen->next()) {
-          seen->next()->set_prev(prev);
-        }
-        delete seen;
-        break;
-      }
-      prev = seen;
-      seen = seen->next();
-    }
-    return (actionToQueue(action) == NULL);
-  }
-
-  // GC support
-  // Applies "f->do_oop" to all root oops in the placeholder table.
-  void oops_do(OopClosure* blk);
-
-  // Print method doesn't append a cr
-  void print() const  PRODUCT_RETURN;
-  void verify() const;
-};
-
-
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)referenceProcessor.cpp	1.50 07/04/20 18:03:43 JVM"
+#pragma ident "@(#)referenceProcessor.cpp	1.55 07/05/17 15:55:08 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
--- a/hotspot/src/share/vm/memory/resolutionErrors.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,124 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)resolutionErrors.cpp	1.6 07/05/05 17:05:54 JVM"
-#endif
-/*
- * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_resolutionErrors.cpp.incl"
-
-// add new entry to the table
-void ResolutionErrorTable::add_entry(int index, unsigned int hash, 
-				     constantPoolHandle pool, int cp_index, symbolHandle error)
-{
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  assert(!pool.is_null() && !error.is_null(), "adding NULL obj");
-
-  ResolutionErrorEntry* entry = new_entry(hash, pool(), cp_index, error());
-  add_entry(index, entry);
-}
-
-// find entry in the table
-ResolutionErrorEntry* ResolutionErrorTable::find_entry(int index, unsigned int hash, 
-						       constantPoolHandle pool, int cp_index)
-{
-  assert_locked_or_safepoint(SystemDictionary_lock);
-
-  for (ResolutionErrorEntry *error_probe = bucket(index);
-                         error_probe != NULL;
-                         error_probe = error_probe->next()) {
-  if (error_probe->hash() == hash && error_probe->pool() == pool()) {
-      return error_probe;;
-    }
-  }
-  return NULL;
-}
-
-// create new error entry
-ResolutionErrorEntry* ResolutionErrorTable::new_entry(int hash, constantPoolOop pool, 
-						      int cp_index, symbolOop error)
-{   
-  ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable::new_entry(hash, pool);
-  entry->set_cp_index(cp_index);
-  entry->set_error(error);
-  
-  return entry;
-}
-
-// create resolution error table
-ResolutionErrorTable::ResolutionErrorTable(int table_size)
-    : Hashtable(table_size, sizeof(ResolutionErrorEntry)) {
-}
-
-// GC support
-void ResolutionErrorTable::oops_do(OopClosure* f) {
-  for (int i = 0; i < table_size(); i++) {
-    for (ResolutionErrorEntry* probe = bucket(i); 
-                           probe != NULL; 
-                           probe = probe->next()) {
-      assert(probe->pool() != (constantPoolOop)NULL, "resolution error table is corrupt");
-      assert(probe->error() != (symbolOop)NULL, "resolution error table is corrupt");
-      probe->oops_do(f);
-    }
-  }
-}
-
-// GC support
-void ResolutionErrorEntry::oops_do(OopClosure* blk) {
-  blk->do_oop((oop*)pool_addr());
-  blk->do_oop((oop*)error_addr());
-}
-
-// We must keep the symbolOop used in the error alive. The constantPoolOop will
-// decide when the entry can be purged.
-void ResolutionErrorTable::always_strong_classes_do(OopClosure* blk) {
-  for (int i = 0; i < table_size(); i++) {
-    for (ResolutionErrorEntry* probe = bucket(i); 
-                           probe != NULL; 
-                           probe = probe->next()) {
-      assert(probe->error() != (symbolOop)NULL, "resolution error table is corrupt");
-      blk->do_oop((oop*)probe->error_addr());
-    }	
-  }
-}
-
-// Remove unloaded entries from the table
-void ResolutionErrorTable::purge_resolution_errors(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
-  for (int i = 0; i < table_size(); i++) {
-    for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) {
-      ResolutionErrorEntry* entry = *p;
-      assert(entry->pool() != (constantPoolOop)NULL, "resolution error table is corrupt");
-      constantPoolOop pool = entry->pool();
-      if (is_alive->do_object_b(pool)) {
-	p = entry->next_addr();
-      } else {
-	*p = entry->next();
-	free_entry(entry);
-      }
-    }
-  }
-}
-
-
--- a/hotspot/src/share/vm/memory/resolutionErrors.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)resolutionErrors.hpp	1.6 07/05/05 17:05:54 JVM"
-#endif
-/*
- * Copyright 2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class ResolutionErrorEntry;
-
-// ResolutionError objects are used to record errors encountered during
-// constant pool resolution (JVMS 5.4.3).
-
-class ResolutionErrorTable : public Hashtable {
-
-public:
-  ResolutionErrorTable(int table_size);
-
-  ResolutionErrorEntry* new_entry(int hash, constantPoolOop pool, int cp_index, symbolOop error);
-
-  ResolutionErrorEntry* bucket(int i) {
-    return (ResolutionErrorEntry*)Hashtable::bucket(i);
-  }
-
-  ResolutionErrorEntry** bucket_addr(int i) {
-    return (ResolutionErrorEntry**)Hashtable::bucket_addr(i);
-  }
-
-  void add_entry(int index, ResolutionErrorEntry* new_entry) {
-    Hashtable::add_entry(index, (HashtableEntry*)new_entry);
-  }
-  
-  void add_entry(int index, unsigned int hash,
-		 constantPoolHandle pool, int which, symbolHandle error);
-		 
-
-  // find error given the constant pool and constant pool index
-  ResolutionErrorEntry* find_entry(int index, unsigned int hash, 
-				   constantPoolHandle pool, int cp_index);
-
-
-  unsigned int compute_hash(constantPoolHandle pool, int cp_index) {
-    return (unsigned int) pool->identity_hash() + cp_index;
-  }
-
-  // purges unloaded entries from the table
-  void purge_resolution_errors(BoolObjectClosure* is_alive);	
- 
-  // this table keeps symbolOops alive 
-  void always_strong_classes_do(OopClosure* blk);
-
-  // GC support.
-  void oops_do(OopClosure* f);
-};
-
-
-class ResolutionErrorEntry : public HashtableEntry {
- private:
-  int		    _cp_index;
-  symbolOop	    _error;
-
- public:
-  constantPoolOop    pool() const 		{ return (constantPoolOop)literal(); }
-  constantPoolOop*   pool_addr()  		{ return (constantPoolOop*)literal_addr(); }
-
-  int		     cp_index() const		{ return _cp_index; }
-  void		     set_cp_index(int cp_index) { _cp_index = cp_index; }
-
-  symbolOop          error() const 		{ return _error; }
-  void		     set_error(symbolOop e)	{ _error = e; }
-  symbolOop*         error_addr()		{ return &_error; }
-
-  ResolutionErrorEntry* next() const {
-    return (ResolutionErrorEntry*)HashtableEntry::next();
-  }
-
-  ResolutionErrorEntry** next_addr() {
-    return (ResolutionErrorEntry**)HashtableEntry::next_addr();
-  }
-
-  // GC support
-  void oops_do(OopClosure* blk);
-};
-
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)sharedHeap.cpp	1.58 07/05/05 17:05:55 JVM"
+#pragma ident "@(#)sharedHeap.cpp	1.59 07/05/17 15:55:10 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -240,28 +240,7 @@
   perm_gen()->ref_processor_init();
 }
 
-#ifdef JVMPI_SUPPORT
-class JVMPIAllocEventDisabler: public StackObj {
-  bool _enabled;
-public:
-  JVMPIAllocEventDisabler() {
-    _enabled = Universe::jvmpi_alloc_event_enabled();
-    if (_enabled)
-      Universe::set_jvmpi_alloc_event_enabled(Universe::_jvmpi_disabled);
-  }
-  ~JVMPIAllocEventDisabler() {
-    if (_enabled)
-      Universe::set_jvmpi_alloc_event_enabled(Universe::_jvmpi_enabled);
-  }
-};
-#endif // JVMPI_SUPPORT
-
 void SharedHeap::fill_region_with_object(MemRegion mr) {
-#ifdef JVMPI_SUPPORT
-  // Disable allocation events, since this isn't a "real" allocation.
-  JVMPIAllocEventDisabler dis;  
-#endif // JVMPI_SUPPORT
-  
   // Disable the posting of JVMTI VMObjectAlloc events as we
   // don't want the filling of tlabs with filler arrays to be
   // reported to the profiler.
--- a/hotspot/src/share/vm/memory/space.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/space.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)space.hpp	1.147 07/05/05 17:05:42 JVM"
+#pragma ident "@(#)space.hpp	1.148 07/05/17 15:55:13 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -575,20 +575,6 @@
   cp->space->set_compaction_top(compact_top);                                \
 }
 
-#ifdef JVMPI_SUPPORT
-#define JVMPI_EVENT_MOVE1 \
-  /* JVMPI move notification must be done here since 'size' call goes		\
-   * through klass, which may have been moved. */				\
-  const bool jvmpi_move_event_enabled = Universe::jvmpi_move_event_enabled();
-
-#define JVMPI_EVENT_MOVE2 \
-      if (jvmpi_move_event_enabled) {						\
-        Universe::jvmpi_object_move((oop)q, (oop)compaction_top);		\
-      }
-#else // !JVMPI_SUPPORT
-#define JVMPI_EVENT_MOVE1
-#define JVMPI_EVENT_MOVE2
-#endif // JVMPI_SUPPORT
 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {				\
   /* adjust all the interior pointers to point at the new locations of objects	\
    * Used by MarkSweep::mark_sweep_phase3() */					\
@@ -697,10 +683,6 @@
     }										\
   }										\
 										\
-/* #ifdef JVMPI_SUPPORT */							\
-JVMPI_EVENT_MOVE1								\
-/* #endif // JVMPI_SUPPORT */							\
-										\
   const intx scan_interval = PrefetchScanIntervalInBytes;			\
   const intx copy_interval = PrefetchCopyIntervalInBytes;			\
   while (q < t) {								\
@@ -727,10 +709,6 @@
       oop(compaction_top)->init_mark();						\
       assert(oop(compaction_top)->klass() != NULL, "should have a class");	\
 										\
-/* #ifdef JVMPI_SUPPORT */							\
-JVMPI_EVENT_MOVE2								\
-/* #endif // JVMPI_SUPPORT */							\
-										\
       debug_only(prev_q = q);							\
       q += size;								\
     }										\
--- a/hotspot/src/share/vm/memory/symbolTable.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,487 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)symbolTable.cpp	1.69 07/05/05 17:05:55 JVM"
-#endif
-/*
- * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_symbolTable.cpp.incl"
-
-// --------------------------------------------------------------------------
-
-SymbolTable* SymbolTable::_the_table = NULL;
-
-// Lookup a symbol in a bucket.
-
-symbolOop SymbolTable::lookup(int index, const char* name,
-                              int len, unsigned int hash) {
-  for (HashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
-    if (e->hash() == hash) {
-      symbolOop sym = symbolOop(e->literal());
-      if (sym->equals(name, len)) {
-        return sym;
-      }
-    }
-  }
-  return NULL;
-}
-
-
-// We take care not to be blocking while holding the
-// SymbolTable_lock. Otherwise, the system might deadlock, since the
-// symboltable is used during compilation (VM_thread) The lock free
-// synchronization is simplified by the fact that we do not delete
-// entries in the symbol table during normal execution (only during
-// safepoints).
-
-symbolOop SymbolTable::lookup(const char* name, int len, TRAPS) {  
-  unsigned int hashValue = hash_symbol(name, len);
-  int index = the_table()->hash_to_index(hashValue);
-
-  symbolOop s = the_table()->lookup(index, name, len, hashValue);
-
-  // Found
-  if (s != NULL) return s;
-  
-  // Otherwise, add to symbol to table
-  return the_table()->basic_add(index, (u1*)name, len, hashValue, CHECK_NULL);
-}
-
-symbolOop SymbolTable::lookup(symbolHandle sym, int begin, int end, TRAPS) {
-  char* buffer;
-  int index, len;
-  unsigned int hashValue;
-  char* name;
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-
-    name = (char*)sym->base() + begin;
-    len = end - begin;
-    hashValue = hash_symbol(name, len);
-    index = the_table()->hash_to_index(hashValue);
-    symbolOop s = the_table()->lookup(index, name, len, hashValue);
-  
-    // Found
-    if (s != NULL) return s;
-  }
-   
-  // Otherwise, add to symbol to table. Copy to a C string first.
-  char stack_buf[128];
-  ResourceMark rm(THREAD);
-  if (len <= 128) {
-    buffer = stack_buf;
-  } else {
-    buffer = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, len);
-  }
-  for (int i=0; i<len; i++) {
-    buffer[i] = name[i];
-  }
-  // Make sure there is no safepoint in the code above since name can't move.
-  // We can't include the code in No_Safepoint_Verifier because of the
-  // ResourceMark.
-
-  return the_table()->basic_add(index, (u1*)buffer, len, hashValue, CHECK_NULL);
-}
-
-symbolOop SymbolTable::lookup_only(const char* name, int len,
-                                   unsigned int& hash) {  
-  hash = hash_symbol(name, len);
-  int index = the_table()->hash_to_index(hash);
-
-  return the_table()->lookup(index, name, len, hash);
-}
-
-void SymbolTable::add(constantPoolHandle cp, int names_count,
-                      const char** names, int* lengths, int* cp_indices,
-                      unsigned int* hashValues, TRAPS) {
-  SymbolTable* table = the_table();
-  bool added = table->basic_add(cp, names_count, names, lengths,
-                                cp_indices, hashValues, CHECK);
-  if (!added) {
-    // do it the hard way
-    for (int i=0; i<names_count; i++) {
-      int index = table->hash_to_index(hashValues[i]);
-      symbolOop sym = table->basic_add(index, (u1*)names[i], lengths[i],
-                                       hashValues[i], CHECK);
-      cp->symbol_at_put(cp_indices[i], sym);
-    }
-  }
-}
-
-// Needed for preloading classes in signatures when compiling.
-
-symbolOop SymbolTable::probe(const char* name, int len) {
-  unsigned int hashValue = hash_symbol(name, len);
-  int index = the_table()->hash_to_index(hashValue);
-  return the_table()->lookup(index, name, len, hashValue);
-}
-
-
-symbolOop SymbolTable::basic_add(int index, u1 *name, int len,
-                                 unsigned int hashValue, TRAPS) {  
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
-         "proposed name of symbol must be stable");
-
-  // We assume that lookup() has been called already, that it failed,
-  // and symbol was not found.  We create the symbol here.
-  symbolKlass* sk  = (symbolKlass*) Universe::symbolKlassObj()->klass_part();
-  symbolOop s_oop = sk->allocate_symbol(name, len, CHECK_NULL);
-  symbolHandle sym (THREAD, s_oop);
-
-  // Allocation must be done before grapping the SymbolTable_lock lock
-  MutexLocker ml(SymbolTable_lock, THREAD);
-
-  assert(sym->equals((char*)name, len), "symbol must be properly initialized");
-
-  // Since look-up was done lock-free, we need to check if another
-  // thread beat us in the race to insert the symbol.
-
-  symbolOop test = lookup(index, (char*)name, len, hashValue);
-  if (test != NULL) {
-    // A race occured and another thread introduced the symbol, this one
-    // will be dropped and collected.
-    return test;
-  }  
-
-  HashtableEntry* entry = new_entry(hashValue, sym());
-  add_entry(index, entry);
-  return sym();
-}
-
-bool SymbolTable::basic_add(constantPoolHandle cp, int names_count,
-                            const char** names, int* lengths,
-                            int* cp_indices, unsigned int* hashValues,
-                            TRAPS) {
-  symbolKlass* sk  = (symbolKlass*) Universe::symbolKlassObj()->klass_part();
-  symbolOop sym_oops[symbol_alloc_batch_size];
-  bool allocated = sk->allocate_symbols(names_count, names, lengths,
-                                        sym_oops, CHECK_false);
-  if (!allocated) {
-    return false;
-  }
-  symbolHandle syms[symbol_alloc_batch_size];
-  int i;
-  for (i=0; i<names_count; i++) {
-    syms[i] = symbolHandle(THREAD, sym_oops[i]);
-  }
-
-  // Allocation must be done before grabbing the SymbolTable_lock lock
-  MutexLocker ml(SymbolTable_lock, THREAD);
-
-  for (i=0; i<names_count; i++) {
-    assert(syms[i]->equals(names[i], lengths[i]), "symbol must be properly initialized");
-    // Since look-up was done lock-free, we need to check if another
-    // thread beat us in the race to insert the symbol.
-    int index = hash_to_index(hashValues[i]);
-    symbolOop test = lookup(index, names[i], lengths[i], hashValues[i]);
-    if (test != NULL) {
-      // A race occured and another thread introduced the symbol, this one
-      // will be dropped and collected. Use test instead.
-      cp->symbol_at_put(cp_indices[i], test);
-    } else {
-      symbolOop sym = syms[i]();
-      HashtableEntry* entry = new_entry(hashValues[i], sym);
-      add_entry(index, entry);
-      cp->symbol_at_put(cp_indices[i], sym);
-    }
-  }
-
-  return true;
-}
-
-
-void SymbolTable::verify() {
-  for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry* p = the_table()->bucket(i);
-    for ( ; p != NULL; p = p->next()) {
-      symbolOop s = symbolOop(p->literal());
-      guarantee(s != NULL, "symbol is NULL");
-      s->verify();
-      guarantee(s->is_perm(), "symbol not in permspace");
-      unsigned int h = hash_symbol((char*)s->bytes(), s->utf8_length());
-      guarantee(p->hash() == h, "broken hash in symbol table entry");
-      guarantee(the_table()->hash_to_index(h) == i,
-                "wrong index in symbol table");
-    }
-  }
-}
-
-
-//---------------------------------------------------------------------------
-// Non-product code
-
-#ifndef PRODUCT
-
-void SymbolTable::print_histogram() {
-  MutexLocker ml(SymbolTable_lock);
-  const int results_length = 100;
-  int results[results_length];
-  int i,j;
-  
-  // initialize results to zero
-  for (j = 0; j < results_length; j++) {
-    results[j] = 0;
-  }
-
-  int total = 0;
-  int max_symbols = 0;
-  int out_of_range = 0;
-  for (i = 0; i < the_table()->table_size(); i++) {
-    HashtableEntry* p = the_table()->bucket(i);
-    for ( ; p != NULL; p = p->next()) {
-      int counter = symbolOop(p->literal())->utf8_length();
-      total += counter;
-      if (counter < results_length) {
-        results[counter]++;
-      } else {
-        out_of_range++;
-      }
-      max_symbols = MAX2(max_symbols, counter);
-    }
-  }
-  tty->print_cr("Symbol Table:");
-  tty->print_cr("%8s %5d", "Total  ", total);
-  tty->print_cr("%8s %5d", "Maximum", max_symbols);
-  tty->print_cr("%8s %3.2f", "Average",
-	  ((float) total / (float) the_table()->table_size()));
-  tty->print_cr("%s", "Histogram:");
-  tty->print_cr(" %s %29s", "Length", "Number chains that length");
-  for (i = 0; i < results_length; i++) {
-    if (results[i] > 0) {
-      tty->print_cr("%6d %10d", i, results[i]);
-    }
-  }
-  int line_length = 70;    
-  tty->print_cr("%s %30s", " Length", "Number chains that length");
-  for (i = 0; i < results_length; i++) {
-    if (results[i] > 0) {
-      tty->print("%4d", i);
-      for (j = 0; (j < results[i]) && (j < line_length);  j++) {
-        tty->print("%1s", "*");
-      }
-      if (j == line_length) {
-        tty->print("%1s", "+");
-      }
-      tty->cr();
-    }
-  }  
-  tty->print_cr(" %s %d: %d\n", "Number chains longer than",
-	            results_length, out_of_range);
-}
-
-#endif // PRODUCT
-
-// --------------------------------------------------------------------------
-
-#ifdef ASSERT
-class StableMemoryChecker : public StackObj {
-  enum { _bufsize = wordSize*4 };
-
-  address _region;
-  jint    _size;
-  u1      _save_buf[_bufsize];
-
-  int sample(u1* save_buf) {
-    if (_size <= _bufsize) {
-      memcpy(save_buf, _region, _size);
-      return _size;
-    } else {
-      // copy head and tail
-      memcpy(&save_buf[0],          _region,                      _bufsize/2);
-      memcpy(&save_buf[_bufsize/2], _region + _size - _bufsize/2, _bufsize/2);
-      return (_bufsize/2)*2;
-    }
-  }
-
- public:
-  StableMemoryChecker(const void* region, jint size) {
-    _region = (address) region;
-    _size   = size;
-    sample(_save_buf);
-  }
-
-  bool verify() {
-    u1 check_buf[sizeof(_save_buf)];
-    int check_size = sample(check_buf);
-    return (0 == memcmp(_save_buf, check_buf, check_size));
-  }
-
-  void set_region(const void* region) { _region = (address) region; }
-};
-#endif
-
-
-// --------------------------------------------------------------------------
-
-
-// Compute the hash value for a java.lang.String object which would
-// contain the characters passed in. This hash value is used for at
-// least two purposes.
-//
-// (a) As the hash value used by the StringTable for bucket selection
-//     and comparison (stored in the HashtableEntry structures).  This
-//     is used in the String.intern() method.
-//
-// (b) As the hash value used by the String object itself, in
-//     String.hashCode().  This value is normally calculate in Java code
-//     in the String.hashCode method(), but is precomputed for String
-//     objects in the shared archive file.
-//
-//     For this reason, THIS ALGORITHM MUST MATCH String.hashCode().
-
-int StringTable::hash_string(jchar* s, int len) {
-  unsigned h = 0;
-  while (len-- > 0) {
-    h = 31*h + (unsigned) *s;
-    s++;
-  }
-  return h;
-}
-
-
-StringTable* StringTable::_the_table = NULL;
-
-oop StringTable::lookup(int index, jchar* name,
-                        int len, unsigned int hash) {
-  for (HashtableEntry* l = bucket(index); l != NULL; l = l->next()) {
-    if (l->hash() == hash) {
-      if (java_lang_String::equals(l->literal(), name, len)) {
-        return l->literal();
-      }
-    }
-  }
-  return NULL;
-}
-
-
-oop StringTable::basic_add(int index, Handle string_or_null, jchar* name,
-                           int len, unsigned int hashValue, TRAPS) {  
-  debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
-         "proposed name of symbol must be stable");
-
-  Handle string;
-  // try to reuse the string if possible
-  if (!string_or_null.is_null() && string_or_null()->is_perm()) {
-    string = string_or_null;
-  } else {
-    string = java_lang_String::create_tenured_from_unicode(name, len, CHECK_NULL);
-  }
-
-  // Allocation must be done before grapping the SymbolTable_lock lock
-  MutexLocker ml(StringTable_lock, THREAD);
-
-  assert(java_lang_String::equals(string(), name, len),
-         "string must be properly initialized");
-
-  // Since look-up was done lock-free, we need to check if another
-  // thread beat us in the race to insert the symbol.
-
-  oop test = lookup(index, name, len, hashValue); // calls lookup(u1*, int)
-  if (test != NULL) {
-    // Entry already added
-    return test;
-  }  
-
-  HashtableEntry* entry = new_entry(hashValue, string());
-  add_entry(index, entry);
-  return string();
-}
-
-
-oop StringTable::lookup(symbolOop symbol) {
-  ResourceMark rm;
-  int length;
-  jchar* chars = symbol->as_unicode(length);
-  unsigned int hashValue = hash_string(chars, length);
-  int index = the_table()->hash_to_index(hashValue);
-  return the_table()->lookup(index, chars, length, hashValue);
-}
-
-
-oop StringTable::intern(Handle string_or_null, jchar* name,
-                        int len, TRAPS) {
-  unsigned int hashValue = hash_string(name, len);
-  int index = the_table()->hash_to_index(hashValue);
-  oop string = the_table()->lookup(index, name, len, hashValue);
-
-  // Found
-  if (string != NULL) return string;
-  
-  // Otherwise, add to symbol to table
-  return the_table()->basic_add(index, string_or_null, name, len,
-                                hashValue, CHECK_NULL);  
-}
-
-oop StringTable::intern(symbolOop symbol, TRAPS) {
-  if (symbol == NULL) return NULL;
-  ResourceMark rm(THREAD);
-  int length;
-  jchar* chars = symbol->as_unicode(length);
-  Handle string;
-  oop result = intern(string, chars, length, CHECK_NULL);
-  return result;
-}
-
-
-oop StringTable::intern(oop string, TRAPS)
-{
-  if (string == NULL) return NULL;
-  ResourceMark rm(THREAD);
-  int length;
-  Handle h_string (THREAD, string);
-  jchar* chars = java_lang_String::as_unicode_string(string, length);
-  oop result = intern(h_string, chars, length, CHECK_NULL);
-  return result;
-}
-
-
-oop StringTable::intern(const char* utf8_string, TRAPS) {
-  if (utf8_string == NULL) return NULL;
-  ResourceMark rm(THREAD);
-  int length = UTF8::unicode_length(utf8_string);
-  jchar* chars = NEW_RESOURCE_ARRAY(jchar, length);
-  UTF8::convert_to_unicode(utf8_string, chars, length);
-  Handle string;
-  oop result = intern(string, chars, length, CHECK_NULL);
-  return result;
-}
-
-void StringTable::verify() {
-  for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry* p = the_table()->bucket(i);
-    for ( ; p != NULL; p = p->next()) {
-      oop s = p->literal();
-      guarantee(s != NULL, "interned string is NULL");
-      guarantee(s->is_perm(), "interned string not in permspace");
-
-      int length;
-      jchar* chars = java_lang_String::as_unicode_string(s, length);
-      unsigned int h = hash_string(chars, length);
-      guarantee(p->hash() == h, "broken hash in string table entry");
-      guarantee(the_table()->hash_to_index(h) == i,
-                "wrong index in string table");
-    }
-  }
-}
--- a/hotspot/src/share/vm/memory/symbolTable.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,216 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)symbolTable.hpp	1.48 07/05/05 17:05:56 JVM"
-#endif
-/*
- * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// The symbol table holds all symbolOops and corresponding interned strings.
-// symbolOops and literal strings should be canonicalized.
-//
-// The interned strings are created lazily.
-//
-// It is implemented as an open hash table with a fixed number of buckets.
-//
-// %note:
-//  - symbolTableEntrys are allocated in blocks to reduce the space overhead.
-
-class BoolObjectClosure;
-
-
-class SymbolTable : public Hashtable {
-  friend class VMStructs;
-
-private:
-  // The symbol table
-  static SymbolTable* _the_table;
-
-  // Adding elements    
-  symbolOop basic_add(int index, u1* name, int len,
-                      unsigned int hashValue, TRAPS);
-  bool basic_add(constantPoolHandle cp, int names_count,
-                 const char** names, int* lengths, int* cp_indices,
-                 unsigned int* hashValues, TRAPS);
-
-  // Table size
-  enum {
-    symbol_table_size = 20011
-  };
-
-  symbolOop lookup(int index, const char* name, int len, unsigned int hash);
-
-  SymbolTable()
-    : Hashtable(symbol_table_size, sizeof (HashtableEntry)) {}
-
-  SymbolTable(HashtableBucket* t, int number_of_entries)
-    : Hashtable(symbol_table_size, sizeof (HashtableEntry), t,
-                number_of_entries) {}
-
-
-public:
-  enum {
-    symbol_alloc_batch_size = 8
-  };
-
-  // The symbol table
-  static SymbolTable* the_table() { return _the_table; }
-
-  static void create_table() {
-    assert(_the_table == NULL, "One symbol table allowed.");
-    _the_table = new SymbolTable();
-  }
-
-  static void create_table(HashtableBucket* t, int length,
-                           int number_of_entries) {
-    assert(_the_table == NULL, "One symbol table allowed.");
-    assert(length == symbol_table_size * sizeof(HashtableBucket),
-           "bad shared symbol size.");
-    _the_table = new SymbolTable(t, number_of_entries);
-  }
-
-  static symbolOop lookup(const char* name, int len, TRAPS);
-  // lookup only, won't add. Also calculate hash.
-  static symbolOop lookup_only(const char* name, int len, unsigned int& hash);
-  // Only copy to C string to be added if lookup failed.
-  static symbolOop lookup(symbolHandle sym, int begin, int end, TRAPS);
-
-  static void add(constantPoolHandle cp, int names_count,
-                  const char** names, int* lengths, int* cp_indices,
-                  unsigned int* hashValues, TRAPS);
-
-  // GC support
-  //   Delete pointers to otherwise-unreachable objects.
-  static void unlink(BoolObjectClosure* cl) {
-    the_table()->Hashtable::unlink(cl);
-  }
-
-  // Invoke "f->do_oop" on the locations of all oops in the table.
-  static void oops_do(OopClosure* f) {
-    the_table()->Hashtable::oops_do(f);
-  }
-
-  // Symbol lookup
-  static symbolOop lookup(int index, const char* name, int len, TRAPS);
-
-  // Needed for preloading classes in signatures when compiling.
-  // Returns the symbol is already present in symbol table, otherwise
-  // NULL.  NO ALLOCATION IS GUARANTEED!
-  static symbolOop probe(const char* name, int len);
-
-  // Histogram
-  static void print_histogram()     PRODUCT_RETURN;
-
-  // Debugging
-  static void verify();
-
-  // Sharing
-  static void copy_buckets(char** top, char*end) {
-    the_table()->Hashtable::copy_buckets(top, end);
-  }
-  static void copy_table(char** top, char*end) {
-    the_table()->Hashtable::copy_table(top, end);
-  }
-  static void reverse(void* boundary = NULL) {
-    ((Hashtable*)the_table())->reverse(boundary);
-  }
-};
-
-
-class StringTable : public Hashtable {
-  friend class VMStructs;
-
-private:
-  // The string table
-  static StringTable* _the_table;
-
-  static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS);
-  oop basic_add(int index, Handle string_or_null, jchar* name, int len,
-                unsigned int hashValue, TRAPS);
-
-  // Table size
-  enum {
-    string_table_size = 1009
-  };
-
-  oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
-
-  StringTable() : Hashtable(string_table_size, sizeof (HashtableEntry)) {}
-
-  StringTable(HashtableBucket* t, int number_of_entries)
-    : Hashtable(string_table_size, sizeof (HashtableEntry), t,
-                number_of_entries) {}
-
-public:
-  // The string table
-  static StringTable* the_table() { return _the_table; }
-
-  static void create_table() {
-    assert(_the_table == NULL, "One string table allowed.");
-    _the_table = new StringTable();
-  }
-
-  static void create_table(HashtableBucket* t, int length,
-                           int number_of_entries) {
-    assert(_the_table == NULL, "One string table allowed.");
-    assert(length == string_table_size * sizeof(HashtableBucket),
-           "bad shared string size.");
-    _the_table = new StringTable(t, number_of_entries);
-  }
-
-
-  static int hash_string(jchar* s, int len);
-
-
-  // GC support
-  //   Delete pointers to otherwise-unreachable objects.
-  static void unlink(BoolObjectClosure* cl) {
-    the_table()->Hashtable::unlink(cl);
-  }
-
-  // Invoke "f->do_oop" on the locations of all oops in the table.
-  static void oops_do(OopClosure* f) {
-    the_table()->Hashtable::oops_do(f);
-  }
-
-  // Probing
-  static oop lookup(symbolOop symbol);
-
-  // Interning
-  static oop intern(symbolOop symbol, TRAPS);
-  static oop intern(oop string, TRAPS);
-  static oop intern(const char *utf8_string, TRAPS);
-
-  // Debugging
-  static void verify();
-
-  // Sharing
-  static void copy_buckets(char** top, char*end) {
-    the_table()->Hashtable::copy_buckets(top, end);
-  }
-  static void copy_table(char** top, char*end) {
-    the_table()->Hashtable::copy_table(top, end);
-  }
-  static void reverse() {
-    ((BasicHashtable*)the_table())->reverse();
-  }
-};
--- a/hotspot/src/share/vm/memory/systemDictionary.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2453 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)systemDictionary.cpp	1.356 07/05/05 17:05:58 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_systemDictionary.cpp.incl"
-
-
-Dictionary*       SystemDictionary::_dictionary = NULL;
-PlaceholderTable* SystemDictionary::_placeholders = NULL;
-Dictionary*       SystemDictionary::_shared_dictionary = NULL;
-LoaderConstraintTable* SystemDictionary::_loader_constraints = NULL;
-ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL;
-
-
-int         SystemDictionary::_number_of_modifications = 0;
-
-oop         SystemDictionary::_system_loader_lock_obj     =  NULL;
-
-klassOop    SystemDictionary::_object_klass               =  NULL;
-klassOop    SystemDictionary::_string_klass               =  NULL;
-klassOop    SystemDictionary::_class_klass                =  NULL;
-klassOop    SystemDictionary::_cloneable_klass            =  NULL;
-klassOop    SystemDictionary::_classloader_klass          =  NULL;
-klassOop    SystemDictionary::_serializable_klass         =  NULL;
-klassOop    SystemDictionary::_system_klass               =  NULL;
-
-klassOop    SystemDictionary::_throwable_klass            =  NULL;
-klassOop    SystemDictionary::_error_klass                =  NULL;
-klassOop    SystemDictionary::_threaddeath_klass          =  NULL;
-klassOop    SystemDictionary::_exception_klass            =  NULL;
-klassOop    SystemDictionary::_runtime_exception_klass    =  NULL;
-klassOop    SystemDictionary::_classNotFoundException_klass = NULL;
-klassOop    SystemDictionary::_noClassDefFoundError_klass = NULL;
-klassOop    SystemDictionary::_linkageError_klass         = NULL;
-klassOop    SystemDictionary::_classCastException_klass   =  NULL;
-klassOop    SystemDictionary::_arrayStoreException_klass  =  NULL;
-klassOop    SystemDictionary::_virtualMachineError_klass  =  NULL;
-klassOop    SystemDictionary::_outOfMemoryError_klass     =  NULL;
-klassOop    SystemDictionary::_StackOverflowError_klass   =  NULL;
-klassOop    SystemDictionary::_illegalMonitorStateException_klass   =  NULL;
-klassOop    SystemDictionary::_protectionDomain_klass     =  NULL;
-klassOop    SystemDictionary::_AccessControlContext_klass = NULL;
-
-klassOop    SystemDictionary::_reference_klass            =  NULL;
-klassOop    SystemDictionary::_soft_reference_klass       =  NULL;
-klassOop    SystemDictionary::_weak_reference_klass       =  NULL;
-klassOop    SystemDictionary::_final_reference_klass      =  NULL;
-klassOop    SystemDictionary::_phantom_reference_klass    =  NULL;
-klassOop    SystemDictionary::_finalizer_klass            =  NULL;
-
-klassOop    SystemDictionary::_thread_klass               =  NULL;
-klassOop    SystemDictionary::_threadGroup_klass          =  NULL;
-klassOop    SystemDictionary::_properties_klass           =  NULL;
-klassOop    SystemDictionary::_reflect_accessible_object_klass =  NULL;
-klassOop    SystemDictionary::_reflect_field_klass        =  NULL;
-klassOop    SystemDictionary::_reflect_method_klass       =  NULL;
-klassOop    SystemDictionary::_reflect_constructor_klass  =  NULL;
-klassOop    SystemDictionary::_reflect_magic_klass        =  NULL;
-klassOop    SystemDictionary::_reflect_method_accessor_klass = NULL;
-klassOop    SystemDictionary::_reflect_constructor_accessor_klass = NULL;
-klassOop    SystemDictionary::_reflect_delegating_classloader_klass = NULL;
-klassOop    SystemDictionary::_reflect_constant_pool_klass =  NULL;
-klassOop    SystemDictionary::_reflect_unsafe_static_field_accessor_impl_klass = NULL;
-
-klassOop    SystemDictionary::_vector_klass               =  NULL;
-klassOop    SystemDictionary::_hashtable_klass            =  NULL;
-klassOop    SystemDictionary::_stringBuffer_klass         =  NULL;
-
-klassOop    SystemDictionary::_stackTraceElement_klass    =  NULL;
-
-klassOop    SystemDictionary::_java_nio_Buffer_klass      =  NULL;
-
-klassOop    SystemDictionary::_sun_misc_AtomicLongCSImpl_klass = NULL;
-
-klassOop    SystemDictionary::_boolean_klass              =  NULL;
-klassOop    SystemDictionary::_char_klass                 =  NULL;
-klassOop    SystemDictionary::_float_klass                =  NULL;
-klassOop    SystemDictionary::_double_klass               =  NULL;
-klassOop    SystemDictionary::_byte_klass                 =  NULL;
-klassOop    SystemDictionary::_short_klass                =  NULL;
-klassOop    SystemDictionary::_int_klass                  =  NULL;
-klassOop    SystemDictionary::_long_klass                 =  NULL;
-klassOop    SystemDictionary::_box_klasses[T_VOID+1]      =  { NULL /*, NULL...*/ };
-
-oop         SystemDictionary::_int_mirror                 =  NULL;
-oop         SystemDictionary::_float_mirror               =  NULL;
-oop         SystemDictionary::_double_mirror              =  NULL;
-oop         SystemDictionary::_byte_mirror                =  NULL;
-oop         SystemDictionary::_bool_mirror                =  NULL;
-oop         SystemDictionary::_char_mirror                =  NULL;
-oop         SystemDictionary::_long_mirror                =  NULL;
-oop         SystemDictionary::_short_mirror               =  NULL;
-oop         SystemDictionary::_void_mirror                =  NULL;
-oop         SystemDictionary::_mirrors[T_VOID+1]          =  { NULL /*, NULL...*/ };
-
-oop         SystemDictionary::_java_system_loader         =  NULL;
-
-bool        SystemDictionary::_has_loadClassInternal      =  false;
-bool        SystemDictionary::_has_checkPackageAccess     =  false;
-
-// lazily initialized klass variables
-volatile klassOop    SystemDictionary::_abstract_ownable_synchronizer_klass = NULL;
-
-
-// ----------------------------------------------------------------------------
-// Java-level SystemLoader
-
-oop SystemDictionary::java_system_loader() {
-  return _java_system_loader;
-}
-
-void SystemDictionary::compute_java_system_loader(TRAPS) {
-  KlassHandle system_klass(THREAD, _classloader_klass);    
-  JavaValue result(T_OBJECT);
-  JavaCalls::call_static(&result, 
-                         KlassHandle(THREAD, _classloader_klass),
-                         vmSymbolHandles::getSystemClassLoader_name(),
-                         vmSymbolHandles::void_classloader_signature(),
-                         CHECK);
-    
-  _java_system_loader = (oop)result.get_jobject();    
-}
-
-
-// ----------------------------------------------------------------------------
-// debugging
-
-#ifdef ASSERT
-
-// return true if class_name contains no '.' (internal format is '/')
-bool SystemDictionary::is_internal_format(symbolHandle class_name) {
-  if (class_name.not_null()) {
-    ResourceMark rm;
-    char* name = class_name->as_C_string();
-    return strchr(name, '.') == NULL;
-  } else {
-    return true;
-  }
-}
-
-#endif
-
-// ----------------------------------------------------------------------------
-// Resolving of classes
-
-// Forwards to resolve_or_null
-
-klassOop SystemDictionary::resolve_or_fail(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS) {  
-  klassOop klass = resolve_or_null(class_name, class_loader, protection_domain, THREAD);
-  if (HAS_PENDING_EXCEPTION || klass == NULL) {
-    KlassHandle k_h(THREAD, klass);
-    // can return a null klass
-    klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD);
-  }
-  return klass;
-}
-
-klassOop SystemDictionary::handle_resolution_exception(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS) {
-  if (HAS_PENDING_EXCEPTION) {
-    // If we have a pending exception we forward it to the caller, unless throw_error is true,
-    // in which case we have to check whether the pending exception is a ClassNotFoundException,
-    // and if so convert it to a NoClassDefFoundError
-    // And chain the original ClassNotFoundException
-    if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass())) {
-      ResourceMark rm(THREAD);
-      assert(klass_h() == NULL, "Should not have result with exception pending");
-      Handle e(THREAD, PENDING_EXCEPTION);
-      CLEAR_PENDING_EXCEPTION;
-      THROW_MSG_CAUSE_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string(), e);
-    } else {
-      return NULL; 
-    }
-  }
-  // Class not found, throw appropriate error or exception depending on value of throw_error
-  if (klass_h() == NULL) {
-    ResourceMark rm(THREAD);
-    if (throw_error) {
-      THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string());
-    } else {      
-      THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());      
-    }
-  }
-  return (klassOop)klass_h(); 
-}
-
-
-klassOop SystemDictionary::resolve_or_fail(symbolHandle class_name,
-                                           bool throw_error, TRAPS)
-{
-  return resolve_or_fail(class_name, Handle(), Handle(), throw_error, THREAD);
-}
-
-
-// Forwards to resolve_instance_class_or_null
-
-klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS) {  
-  assert(!THREAD->is_Compiler_thread(), "Can not load classes with the Compiler thread");
-  if (FieldType::is_array(class_name())) {
-    return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
-  } else {
-    return resolve_instance_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
-  }
-}
-
-klassOop SystemDictionary::resolve_or_null(symbolHandle class_name, TRAPS) {  
-  return resolve_or_null(class_name, Handle(), Handle(), THREAD);
-}
-
-// Forwards to resolve_instance_class_or_null
-
-klassOop SystemDictionary::resolve_array_class_or_null(symbolHandle class_name,
-                                                       Handle class_loader, 
-                                                       Handle protection_domain,
-                                                       TRAPS) {  
-  assert(FieldType::is_array(class_name()), "must be array");
-  jint dimension;
-  symbolOop object_key;
-  klassOop k = NULL;  
-  // dimension and object_key are assigned as a side-effect of this call
-  BasicType t = FieldType::get_array_info(class_name(), 
-                                          &dimension, 
-                                          &object_key, 
-                                          CHECK_NULL);
-
-  if (t == T_OBJECT) {
-    symbolHandle h_key(THREAD, object_key);
-    // naked oop "k" is OK here -- we assign back into it
-    k = SystemDictionary::resolve_instance_class_or_null(h_key, 
-                                                         class_loader, 
-                                                         protection_domain, 
-                                                         CHECK_NULL);
-    if (k != NULL) {
-      k = Klass::cast(k)->array_klass(dimension, CHECK_NULL);
-    }
-  } else {
-    k = Universe::typeArrayKlassObj(t);
-    k = typeArrayKlass::cast(k)->array_klass(dimension, CHECK_NULL);
-  }
-  return k;
-}
-
-
-// Must be called for any super-class or super-interface resolution
-// during class definition to allow class circularity checking
-// super-interface callers: 
-//    parse_interfaces - for defineClass & jvmtiRedefineClasses
-// super-class callers:
-//   ClassFileParser - for defineClass & jvmtiRedefineClasses
-//   load_shared_class - while loading a class from shared archive
-//   resolve_instance_class_or_fail:
-//      when resolving a class that has an existing placeholder with
-//      a saved superclass [i.e. a defineClass is currently in progress]
-//      if another thread is trying to resolve the class, it must do
-//      super-class checks on its own thread to catch class circularity
-// This last call is critical in class circularity checking for cases
-// where classloading is delegated to different threads and the
-// classloader lock is released.
-// Take the case: Base->Super->Base
-//   1. If thread T1 tries to do a defineClass of class Base
-//    resolve_super_or_fail creates placeholder: T1, Base (super Super)
-//   2. resolve_instance_class_or_null does not find SD or placeholder for Super
-//    so it tries to load Super
-//   3. If we load the class internally, or user classloader uses same thread
-//      loadClassFromxxx or defineClass via parseClassFile Super ...
-//      3.1 resolve_super_or_fail creates placeholder: T1, Super (super Base) 
-//      3.3 resolve_instance_class_or_null Base, finds placeholder for Base
-//      3.4 calls resolve_super_or_fail Base
-//      3.5 finds T1,Base -> throws class circularity
-//OR 4. If T2 tries to resolve Super via defineClass Super ...
-//      4.1 resolve_super_or_fail creates placeholder: T2, Super (super Base) 
-//      4.2 resolve_instance_class_or_null Base, finds placeholder for Base (super Super)
-//      4.3 calls resolve_super_or_fail Super in parallel on own thread T2
-//      4.4 finds T2, Super -> throws class circularity
-// Must be called, even if superclass is null, since this is
-// where the placeholder entry is created which claims this
-// thread is loading this class/classloader.
-klassOop SystemDictionary::resolve_super_or_fail(symbolHandle child_name,
-                                                 symbolHandle class_name,
-                                                 Handle class_loader,
-                                                 Handle protection_domain,
-                                                 bool is_superclass,
-                                                 TRAPS) {
-
-  // Double-check, if child class is already loaded, just return super-class,interface
-  // Don't add a placedholder if already loaded, i.e. already in system dictionary
-  // Make sure there's a placeholder for the *child* before resolving.
-  // Used as a claim that this thread is currently loading superclass/classloader
-  // Used here for ClassCircularity checks and also for heap verification
-  // (every instanceKlass in the heap needs to be in the system dictionary
-  // or have a placeholder).
-  // Must check ClassCircularity before checking if super class is already loaded
-  //
-  // We might not already have a placeholder if this child_name was
-  // first seen via resolve_from_stream (jni_DefineClass or JVM_DefineClass);
-  // the name of the class might not be known until the stream is actually
-  // parsed.
-  // Bugs 4643874, 4715493
-  // compute_hash can have a safepoint
-
-  unsigned int d_hash = dictionary()->compute_hash(child_name, class_loader);
-  int d_index = dictionary()->hash_to_index(d_hash);
-  unsigned int p_hash = placeholders()->compute_hash(child_name, class_loader);
-  int p_index = placeholders()->hash_to_index(p_hash);
-  // can't throw error holding a lock
-  bool child_already_loaded = false;
-  bool throw_circularity_error = false;
-  {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    klassOop childk = find_class(d_index, d_hash, child_name, class_loader);
-    klassOop quicksuperk;
-    // to support // loading: if child done loading, just return superclass
-    // if class_name, & class_loader don't match:
-    // if initial define, SD update will give LinkageError
-    // if redefine: compare_class_versions will give HIERARCHY_CHANGED
-    // so we don't throw an exception here.
-    // see: nsk redefclass014 & java.lang.instrument Instrument032
-    if ((childk != NULL ) && (is_superclass) &&
-       ((quicksuperk = instanceKlass::cast(childk)->super()) != NULL) &&
-      
-         ((Klass::cast(quicksuperk)->name() == class_name()) && 
-            (Klass::cast(quicksuperk)->class_loader()  == class_loader()))) {
-           return quicksuperk;
-    } else {
-      PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, class_loader);
-      if (probe && probe->check_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER)) {
-          throw_circularity_error = true;
-      } 
-
-      // add placeholder entry even if error - callers will remove on error
-      PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, class_loader, PlaceholderTable::LOAD_SUPER, class_name, THREAD); 
-      if (throw_circularity_error) {
-         newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
-      }
-    }
-  }
-  if (throw_circularity_error) {
-      ResourceMark rm(THREAD);
-      THROW_MSG_0(vmSymbols::java_lang_ClassCircularityError(), child_name->as_C_string());
-  }
-
-// java.lang.Object should have been found above
-  assert(class_name() != NULL, "null super class for resolving");
-  // Resolve the super class or interface, check results on return
-  klassOop superk = NULL;
-  superk = SystemDictionary::resolve_or_null(class_name,
-                                                 class_loader,
-                                                 protection_domain,
-                                                 THREAD);
-  
-  KlassHandle superk_h(THREAD, superk);
-  
-  // Note: clean up of placeholders currently in callers of
-  // resolve_super_or_fail - either at update_dictionary time
-  // or on error 
-  {
-  MutexLocker mu(SystemDictionary_lock, THREAD);
-   PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, class_loader);
-   if (probe != NULL) {
-      probe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
-   }
-  }
-  if (HAS_PENDING_EXCEPTION || superk_h() == NULL) {
-    // can null superk
-    superk_h = KlassHandle(THREAD, handle_resolution_exception(class_name, class_loader, protection_domain, true, superk_h, THREAD));
-  }
-
-  return superk_h();
-}
-
-
-void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
-                                                  Handle class_loader,
-                                                  Handle protection_domain,
-                                                  TRAPS) {
-  if(!has_checkPackageAccess()) return;
-
-  // Now we have to call back to java to check if the initating class has access
-  JavaValue result(T_VOID);
-  if (TraceProtectionDomainVerification) {
-    // Print out trace information
-    tty->print_cr("Checking package access");
-    tty->print(" - class loader:      "); class_loader()->print_value_on(tty);      tty->cr();
-    tty->print(" - protection domain: "); protection_domain()->print_value_on(tty); tty->cr();
-    tty->print(" - loading:           "); klass()->print_value_on(tty);             tty->cr();
-  }
-  
-  assert(class_loader() != NULL, "should not have non-null protection domain for null classloader");
-
-  KlassHandle system_loader(THREAD, SystemDictionary::classloader_klass());
-  JavaCalls::call_special(&result,
-                         class_loader,
-                         system_loader,
-                         vmSymbolHandles::checkPackageAccess_name(),
-                         vmSymbolHandles::class_protectiondomain_signature(), 
-                         Handle(THREAD, klass->java_mirror()),
-                         protection_domain,
-                         THREAD);
-
-  if (TraceProtectionDomainVerification) {
-    if (HAS_PENDING_EXCEPTION) {
-      tty->print_cr(" -> DENIED !!!!!!!!!!!!!!!!!!!!!");
-    } else {
-     tty->print_cr(" -> granted");
-    }
-    tty->cr();
-  }
-
-  if (HAS_PENDING_EXCEPTION) return; 
-    
-  // If no exception has been thrown, we have validated the protection domain
-  // Insert the protection domain of the initiating class into the set.
-  {
-    // We recalculate the entry here -- we've called out to java since
-    // the last time it was calculated.
-    symbolHandle kn(THREAD, klass->name());
-    unsigned int d_hash = dictionary()->compute_hash(kn, class_loader);
-    int d_index = dictionary()->hash_to_index(d_hash);
-
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    { 
-      // Note that we have an entry, and entries can be deleted only during GC,
-      // so we cannot allow GC to occur while we're holding this entry.
-
-      // We're using a No_Safepoint_Verifier to catch any place where we
-      // might potentially do a GC at all.
-      // SystemDictionary::do_unloading() asserts that classes are only
-      // unloaded at a safepoint.
-      No_Safepoint_Verifier nosafepoint;
-      dictionary()->add_protection_domain(d_index, d_hash, klass, class_loader,
-                                          protection_domain, THREAD);
-    }
-  }
-}
-
-// We only get here if this thread finds that another thread
-// has already claimed the placeholder token for the current operation,
-// but that other thread either never owned or gave up the
-// object lock
-// Waits on SystemDictionary_lock to indicate placeholder table updated
-// On return, caller must recheck placeholder table state
-//
-// We only get here if 
-//  1) custom classLoader, i.e. not bootstrap classloader
-//  2) UnsyncloadClass not set
-//  3) custom classLoader has broken the class loader objectLock
-//     so another thread got here in parallel
-//
-// lockObject must be held. 
-// Complicated dance due to lock ordering:
-// Must first release the classloader object lock to
-// allow initial definer to complete the class definition
-// and to avoid deadlock
-// Reclaim classloader lock object with same original recursion count
-// Must release SystemDictionary_lock after notify, since
-// class loader lock must be claimed before SystemDictionary_lock
-// to prevent deadlocks
-//
-// The notify allows applications that did an untimed wait() on
-// the classloader object lock to not hang.
-void SystemDictionary::double_lock_wait(Handle lockObject, TRAPS) {
-  assert_lock_strong(SystemDictionary_lock);
-
-  bool calledholdinglock 
-      = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
-  assert(calledholdinglock,"must hold lock for notify");
-  assert(!UnsyncloadClass, "unexpected double_lock_wait");
-  ObjectSynchronizer::notifyall(lockObject, THREAD);
-  intptr_t recursions =  ObjectSynchronizer::complete_exit(lockObject, THREAD);
-  SystemDictionary_lock->wait();
-  SystemDictionary_lock->unlock();
-  ObjectSynchronizer::reenter(lockObject, recursions, THREAD);
-  SystemDictionary_lock->lock();
-}
-
-// If the class in is in the placeholder table, class loading is in progress
-// For cases where the application changes threads to load classes, it
-// is critical to ClassCircularity detection that we try loading
-// the superclass on the same thread internally, so we do parallel
-// super class loading here.
-// This also is critical in cases where the original thread gets stalled
-// even in non-circularity situations.
-// Note: only one thread can define the class, but multiple can resolve
-// Note: must call resolve_super_or_fail even if null super -
-// to force placeholder entry creation for this class
-// Caller must check for pending exception
-// Returns non-null klassOop if other thread has completed load
-// and we are done, 
-// If return null klassOop and no pending exception, the caller must load the class
-// At this point, handle_parallel_super_load should never be called
-// with the bootstrapclass loader
-instanceKlassHandle SystemDictionary::handle_parallel_super_load(
-    symbolHandle name, symbolHandle superclassname, Handle class_loader, 
-    Handle protection_domain, Handle lockObject, TRAPS) {
-
-  instanceKlassHandle nh = instanceKlassHandle(); // null Handle
-  unsigned int d_hash = dictionary()->compute_hash(name, class_loader);
-  int d_index = dictionary()->hash_to_index(d_hash);
-  unsigned int p_hash = placeholders()->compute_hash(name, class_loader);
-  int p_index = placeholders()->hash_to_index(p_hash);
-
-  // superk is not used, resolve_super called for circularity check only
-  // This code is reached in two situations. One if this thread
-  // is loading the same class twice (e.g. ClassCircularity, or 
-  // java.lang.instrument).
-  // The second is if another thread started the resolve_super first
-  // and has not yet finished. 
-  // In both cases the original caller will clean up the placeholder
-  // entry on error.
-  klassOop superk = SystemDictionary::resolve_super_or_fail(name,
-                                                          superclassname,
-                                                          class_loader,
-                                                          protection_domain,
-                                                          true,
-                                                          CHECK_(nh));
-  // We don't redefine the class, so we just need to clean up if there
-  // was not an error (don't want to modify any system dictionary
-  // data structures).
-  {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
-    SystemDictionary_lock->notify_all();
-  }
-
-  // UnsyncloadClass does NOT wait for parallel superclass loads to complete
-  // Bootstrap classloader does wait for parallel superclass loads
- if (UnsyncloadClass) {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    // Check if classloading completed while we were loading superclass or waiting
-    klassOop check = find_class(d_index, d_hash, name, class_loader);
-    if (check != NULL) {
-      // Klass is already loaded, so just return it
-      return(instanceKlassHandle(THREAD, check));
-    } else {
-      return nh;
-    }
-  } 
-
-  // must loop to both handle other placeholder updates
-  // and spurious notifications
-  bool super_load_in_progress = true;
-  PlaceholderEntry* placeholder;
-  while (super_load_in_progress) {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    // Check if classloading completed while we were loading superclass or waiting
-    klassOop check = find_class(d_index, d_hash, name, class_loader);
-    if (check != NULL) {
-      // Klass is already loaded, so just return it
-      return(instanceKlassHandle(THREAD, check));
-    } else {
-      placeholder = placeholders()->get_entry(p_index, p_hash, name, class_loader);
-      if (placeholder && placeholder->super_load_in_progress() ){
-        // Before UnsyncloadClass:
-        // We only get here if the application has released the
-        // classloader lock when another thread was in the middle of loading a
-        // superclass/superinterface for this class, and now
-        // this thread is also trying to load this class.
-        // To minimize surprises, the first thread that started to
-        // load a class should be the one to complete the loading
-        // with the classfile it initially expected.
-        // This logic has the current thread wait once it has done
-        // all the superclass/superinterface loading it can, until
-        // the original thread completes the class loading or fails
-        // If it completes we will use the resulting instanceKlass
-        // which we will find below in the systemDictionary.
-        // We also get here for parallel bootstrap classloader
-        if (class_loader.is_null()) {
-          SystemDictionary_lock->wait();
-        } else {
-          double_lock_wait(lockObject, THREAD);
-        }
-      } else {
-        // If not in SD and not in PH, other thread's load must have failed
-        super_load_in_progress = false;
-      }
-    }
-  }
-  return (nh);
-}
-
-
-klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS) {
-  assert(class_name.not_null() && !FieldType::is_array(class_name()), "invalid class name");
-  // First check to see if we should remove wrapping L and ;
-  symbolHandle name;    
-  if (FieldType::is_obj(class_name())) {
-    ResourceMark rm(THREAD);
-    // Ignore wrapping L and ;.
-    name = oopFactory::new_symbol_handle(class_name()->as_C_string() + 1, class_name()->utf8_length() - 2, CHECK_NULL);    
-  } else {
-    name = class_name;
-  }
-
-  // UseNewReflection
-  // Fix for 4474172; see evaluation for more details
-  class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
-
-  // Do lookup to see if class already exist and the protection domain
-  // has the right access
-  unsigned int d_hash = dictionary()->compute_hash(name, class_loader);
-  int d_index = dictionary()->hash_to_index(d_hash);
-  klassOop probe = dictionary()->find(d_index, d_hash, name, class_loader,
-                                      protection_domain, THREAD);
-  if (probe != NULL) return probe;
-
-
-  // Non-bootstrap class loaders will call out to class loader and
-  // define via jvm/jni_DefineClass which will acquire the
-  // class loader object lock to protect against multiple threads
-  // defining the class in parallel by accident.
-  // This lock must be acquired here so the waiter will find
-  // any successful result in the SystemDictionary and not attempt
-  // the define
-  // Classloaders that support parallelism, e.g. bootstrap classloader,
-  // or all classloaders with UnsyncloadClass do not acquire lock here
-  bool DoObjectLock = true;
-  if (UnsyncloadClass || (class_loader.is_null())) {
-    DoObjectLock = false;
-  }
-
-  unsigned int p_hash = placeholders()->compute_hash(name, class_loader);
-  int p_index = placeholders()->hash_to_index(p_hash);
-
-  // Class is not in SystemDictionary so we have to do loading.
-  // Make sure we are synchronized on the class loader before we proceed
-  Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
-  check_loader_lock_contention(lockObject, THREAD);
-  ObjectLocker ol(lockObject, THREAD, DoObjectLock);
-
-  // Check again (after locking) if class already exist in SystemDictionary
-  bool class_has_been_loaded   = false;
-  bool super_load_in_progress  = false;
-  bool havesupername = false;
-  instanceKlassHandle k;
-  PlaceholderEntry* placeholder;
-  symbolHandle superclassname;
-
-  {           
-    MutexLocker mu(SystemDictionary_lock, THREAD);  
-    klassOop check = find_class(d_index, d_hash, name, class_loader);
-    if (check != NULL) {
-      // Klass is already loaded, so just return it
-      class_has_been_loaded = true;
-      k = instanceKlassHandle(THREAD, check);
-    } else {
-      placeholder = placeholders()->get_entry(p_index, p_hash, name, class_loader);
-      if (placeholder && placeholder->super_load_in_progress()) {
-         super_load_in_progress = true;
-         if (placeholder->havesupername() == true) {
-           superclassname = symbolHandle(THREAD, placeholder->supername());
-           havesupername = true;
-         }
-      } 
-    }
-  }
-
-  // If the class in is in the placeholder table, class loading is in progress
-  if (super_load_in_progress && havesupername==true) {
-    k = SystemDictionary::handle_parallel_super_load(name, superclassname, 
-        class_loader, protection_domain, lockObject, THREAD);
-    if (HAS_PENDING_EXCEPTION) {
-      return NULL;
-    }
-    if (!k.is_null()) {
-      class_has_been_loaded = true;
-    }
-  }
-
-  if (!class_has_been_loaded) {
-  
-    // add placeholder entry to record loading instance class
-    // to prevent parallel instance class loading if classloader object lock
-    // broken
-    // Also needed to prevent modifying bootclasssearchpath
-    // in parallel with a classload of same classname
-    // Classloaders that support parallelism, such as the bootstrap classloader
-    // or all classloaders with UnsyncloadClass flag
-    // allow parallel loading of same class/classloader pair
-    symbolHandle nullsymbolHandle;
-    bool throw_circularity_error = false;
-    // If not a classloader that supports parallelism and
-    // if NOT UnsyncloadClass, and we find an existing LOAD_INSTANCE for this
-    // class/classloader pair,  we know that the
-    // custom classloader explicitly did a wait to release the lock
-    // since we called out to loadClass with the objectlock already held
-    // In that case we should already own the ObjectLocker
-    // and want to send a notify on it
-    // For parallel bootstrap classloader we won't own the ObjectLocker
-    {
-      MutexLocker mu(SystemDictionary_lock, THREAD);
-      if (!UnsyncloadClass) {
-        PlaceholderEntry* oldprobe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
-        if (oldprobe) {
-          // only need check_seen_thread once, not on each loop
-          // 6341374 java/lang/Instrument with -Xcomp
-          if (oldprobe->check_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE)) {
-            throw_circularity_error = true;
-          } else {
-            while (!class_has_been_loaded && oldprobe && oldprobe->instance_load_in_progress()) {
-     
-              // For classloaders that allow parallelism, including bootstrap classloader
-              // we want to wait on the first requestor for a specific
-              // class/classloader pair
-              if (class_loader.is_null()) {
-                SystemDictionary_lock->wait();
-              } else {
-              // if another thread is already loading this instance, then we
-              // know the user has broken the classloader lock
-              // we need to ensure that the first requestor completes the request
-              // and other requestors wait for that completion
-              // The notify allows applications that did an untimed wait() on
-              // the classloader object lock to not hang.
-              // see test b4699981 
-                double_lock_wait(lockObject, THREAD);
-              }
-              // Check if classloading completed while we were waiting
-              klassOop check = find_class(d_index, d_hash, name, class_loader);
-              if (check != NULL) {
-                // Klass is already loaded, so just return it
-                k = instanceKlassHandle(THREAD, check);
-                class_has_been_loaded = true;
-              }
-              // check if other thread failed to load and cleaned up
-              oldprobe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
-            } 
-          } 
-        }
-      }
-      // add LOAD_INSTANCE regardless of flag
-      // classloaders that support parallelism, such as bootstrap classloader
-      // or all loaders with  UnsyncloadClass 
-      // allow competing threads to try LOAD_INSTANCE in parallel
-      // add placeholder entry even if error - callers will remove on error
-      if (!class_has_been_loaded) {
-        PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, name, class_loader, PlaceholderTable::LOAD_INSTANCE, nullsymbolHandle, THREAD); 
-        if (throw_circularity_error) {
-          newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
-        }
-      }
-    }
-    // must throw error outside of owning lock
-    if (throw_circularity_error) {
-      ResourceMark rm(THREAD);
-      THROW_MSG_0(vmSymbols::java_lang_ClassCircularityError(), name->as_C_string());
-    }
-
-    if (!class_has_been_loaded) {
-
-      // Do actual loading
-      k = load_instance_class(name, class_loader, THREAD);
-
-      // In custom class loaders, the usual findClass calls
-      // findLoadedClass, which directly searches  the SystemDictionary, then
-      // defineClass. If these are not atomic with respect to other threads,
-      // the findLoadedClass can fail, but the defineClass can get a 
-      // LinkageError:: duplicate class definition.
-      // If they got a linkageError, check if a parallel class load succeeded.
-      // If it did, then for bytecode resolution the specification requires
-      // that we return the same result we did for the other thread, i.e. the
-      // successfully loaded instanceKlass
-      // Note: Class can not be unloaded as long as any classloader refs exist
-      // Should not get here for classloaders that support parallelism
-      // with the new cleaner mechanism, e.g. bootstrap classloader
-      if (UnsyncloadClass || (class_loader.is_null())) {
-        if (k.is_null() && HAS_PENDING_EXCEPTION 
-          && PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
-          MutexLocker mu(SystemDictionary_lock, THREAD);
-          klassOop check = find_class(d_index, d_hash, name, class_loader);
-          if (check != NULL) {
-            // Klass is already loaded, so just use it
-            k = instanceKlassHandle(THREAD, check);
-            CLEAR_PENDING_EXCEPTION;
-            guarantee((!class_loader.is_null()), "dup definition for bootstrap loader?");
-          }
-        }
-      }
-
-      // clean up placeholder entries for success or error
-      // This cleans up LOAD_INSTANCE entries
-      // It also cleans up LOAD_SUPER entries on errors from 
-      // calling load_instance_class
-      { 
-        MutexLocker mu(SystemDictionary_lock, THREAD);
-        PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
-        if (probe != NULL) {
-          probe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
-          placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
-          SystemDictionary_lock->notify_all();
-        }
-      }
-
-      // If everything was OK (no exceptions, no null return value), and
-      // class_loader is NOT the defining loader, do a little more bookkeeping.
-      if (!HAS_PENDING_EXCEPTION && !k.is_null() && 
-        k->class_loader() != class_loader()) {
-
-        check_constraints(d_index, d_hash, k, class_loader, false, THREAD);
-
-        // Need to check for a PENDING_EXCEPTION again; check_constraints
-        // can throw and doesn't use the CHECK macro.
-        if (!HAS_PENDING_EXCEPTION) {
-          { // Grabbing the Compile_lock prevents systemDictionary updates
-            // during compilations. 
-            MutexLocker mu(Compile_lock, THREAD);      
-            update_dictionary(d_index, d_hash, p_index, p_hash,
-                            k, class_loader, THREAD);
-          }
-          if (JvmtiExport::should_post_class_load()) {
-            Thread *thread = THREAD;
-            assert(thread->is_Java_thread(), "thread->is_Java_thread()");
-            JvmtiExport::post_class_load((JavaThread *) thread, k());
-          }
-        }
-      }
-      if (HAS_PENDING_EXCEPTION || k.is_null()) {
-        // On error, clean up placeholders
-        {
-          MutexLocker mu(SystemDictionary_lock, THREAD);
-          placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
-          SystemDictionary_lock->notify_all();
-        }
-        return NULL;
-      }
-    }
-  }
-
-#ifdef ASSERT
-  {
-    Handle loader (THREAD, k->class_loader());
-    MutexLocker mu(SystemDictionary_lock, THREAD);  
-    oop kk = find_class_or_placeholder(name, loader);
-    assert(kk == k(), "should be present in dictionary");
-  }
-#endif
-
-  // return if the protection domain in NULL
-  if (protection_domain() == NULL) return k();
-
-  // Check the protection domain has the right access 
-  {
-    MutexLocker mu(SystemDictionary_lock, THREAD);  
-    // Note that we have an entry, and entries can be deleted only during GC,
-    // so we cannot allow GC to occur while we're holding this entry.
-    // We're using a No_Safepoint_Verifier to catch any place where we
-    // might potentially do a GC at all.
-    // SystemDictionary::do_unloading() asserts that classes are only
-    // unloaded at a safepoint.
-    No_Safepoint_Verifier nosafepoint;
-    if (dictionary()->is_valid_protection_domain(d_index, d_hash, name,
-                                                 class_loader,
-                                                 protection_domain)) {
-      return k();
-    }
-  }
-
-  // Verify protection domain. If it fails an exception is thrown
-  validate_protection_domain(k, class_loader, protection_domain, CHECK_(klassOop(NULL)));
-
-  return k();
-}
-
-
-// This routine does not lock the system dictionary.
-//
-// Since readers don't hold a lock, we must make sure that system
-// dictionary entries are only removed at a safepoint (when only one
-// thread is running), and are added to in a safe way (all links must
-// be updated in an MT-safe manner).
-//
-// Callers should be aware that an entry could be added just after
-// _dictionary->bucket(index) is read here, so the caller will not see
-// the new entry.
-
-klassOop SystemDictionary::find(symbolHandle class_name,
-                                Handle class_loader, 
-                                Handle protection_domain,
-                                TRAPS) {
-
-  unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
-  int d_index = dictionary()->hash_to_index(d_hash);
-
-  {
-    // Note that we have an entry, and entries can be deleted only during GC,
-    // so we cannot allow GC to occur while we're holding this entry.
-    // We're using a No_Safepoint_Verifier to catch any place where we
-    // might potentially do a GC at all.
-    // SystemDictionary::do_unloading() asserts that classes are only
-    // unloaded at a safepoint.
-    No_Safepoint_Verifier nosafepoint;
-    return dictionary()->find(d_index, d_hash, class_name, class_loader,
-                              protection_domain, THREAD);
-  }
-}
-
-
-// Look for a loaded instance or array klass by name.  Do not do any loading.
-// return NULL in case of error.
-klassOop SystemDictionary::find_instance_or_array_klass(symbolHandle class_name,
-                                                        Handle class_loader,
-							Handle protection_domain,
-                                                        TRAPS) {
-  klassOop k = NULL;
-  assert(class_name() != NULL, "class name must be non NULL");
-  if (FieldType::is_array(class_name())) {
-    // The name refers to an array.  Parse the name.
-    jint dimension;
-    symbolOop object_key;
-
-    // dimension and object_key are assigned as a side-effect of this call
-    BasicType t = FieldType::get_array_info(class_name(), &dimension,
-					    &object_key, CHECK_(NULL));
-    if (t != T_OBJECT) {
-      k = Universe::typeArrayKlassObj(t);
-    } else {
-      symbolHandle h_key(THREAD, object_key);
-      k = SystemDictionary::find(h_key, class_loader, protection_domain, THREAD);
-    }
-    if (k != NULL) {
-      k = Klass::cast(k)->array_klass_or_null(dimension);
-    }
-  } else {
-    k = find(class_name, class_loader, protection_domain, THREAD);
-  }
-  return k;
-}
-
-// Note: this method is much like resolve_from_stream, but
-// updates no supplemental data structures.
-// TODO consolidate the two methods with a helper routine?
-klassOop SystemDictionary::parse_stream(symbolHandle class_name,
-                                        Handle class_loader,
-                                        Handle protection_domain,
-                                        ClassFileStream* st,
-                                        TRAPS) {
-  symbolHandle parsed_name;
-
-  // Parse the stream. Note that we do this even though this klass might
-  // already be present in the SystemDictionary, otherwise we would not
-  // throw potential ClassFormatErrors.
-  //
-  // Note: "name" is updated.
-  // Further note:  a placeholder will be added for this class when
-  //   super classes are loaded (resolve_super_or_fail). We expect this
-  //   to be called for all classes but java.lang.Object; and we preload
-  //   java.lang.Object through resolve_or_fail, not this path.
-
-  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
-                                                             class_loader,
-                                                             protection_domain,
-                                                             parsed_name,
-                                                             THREAD);
-
-
-  // We don't redefine the class, so we just need to clean up whether there
-  // was an error or not (don't want to modify any system dictionary
-  // data structures).
-  // Parsed name could be null if we threw an error before we got far
-  // enough along to parse it -- in that case, there is nothing to clean up.
-  if (!parsed_name.is_null()) {
-    unsigned int p_hash = placeholders()->compute_hash(parsed_name, 
-                                                       class_loader);
-    int p_index = placeholders()->hash_to_index(p_hash);
-    {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    placeholders()->find_and_remove(p_index, p_hash, parsed_name, class_loader, THREAD);
-    SystemDictionary_lock->notify_all();
-    }
-  }
-
-  return k();
-}
-
-// Add a klass to the system from a stream (called by jni_DefineClass and
-// JVM_DefineClass).
-// Note: class_name can be NULL. In that case we do not know the name of 
-// the class until we have parsed the stream.
-
-klassOop SystemDictionary::resolve_from_stream(symbolHandle class_name, 
-                                               Handle class_loader, 
-                                               Handle protection_domain, 
-                                               ClassFileStream* st, 
-                                               TRAPS) {
-
-  // Make sure we are synchronized on the class loader before we initiate 
-  // loading.
-  Handle lockObject = compute_loader_lock_object(class_loader, THREAD); 
-  check_loader_lock_contention(lockObject, THREAD);
-  ObjectLocker ol(lockObject, THREAD);
-
-  symbolHandle parsed_name;
-
-  // Parse the stream. Note that we do this even though this klass might 
-  // already be present in the SystemDictionary, otherwise we would not 
-  // throw potential ClassFormatErrors.
-  //
-  // Note: "name" is updated.
-  // Further note:  a placeholder will be added for this class when
-  //   super classes are loaded (resolve_super_or_fail). We expect this
-  //   to be called for all classes but java.lang.Object; and we preload
-  //   java.lang.Object through resolve_or_fail, not this path.
-
-  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, 
-                                                             class_loader, 
-                                                             protection_domain,
-                                                             parsed_name,
-                                                             THREAD);
-
-  const char* pkg = "java/";
-  if (!HAS_PENDING_EXCEPTION && 
-      !class_loader.is_null() && 
-      !parsed_name.is_null() && 
-      !strncmp((const char*)parsed_name->bytes(), pkg, strlen(pkg))) {
-    // It is illegal to define classes in the "java." package from
-    // JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
-    ResourceMark rm(THREAD);
-    char* name = parsed_name->as_C_string();
-    char* index = strrchr(name, '/');
-    *index = '\0'; // chop to just the package name
-    while ((index = strchr(name, '/')) != NULL) {
-      *index = '.'; // replace '/' with '.' in package name
-    }
-    const char* fmt = "Prohibited package name: %s";
-    size_t len = strlen(fmt) + strlen(name);
-    char* message = NEW_RESOURCE_ARRAY(char, len);
-    jio_snprintf(message, len, fmt, name);
-    Exceptions::_throw_msg(THREAD_AND_LOCATION, 
-      vmSymbols::java_lang_SecurityException(), message);
-  }
-
-  if (!HAS_PENDING_EXCEPTION) {
-    assert(!parsed_name.is_null(), "Sanity");
-    assert(class_name.is_null() || class_name() == parsed_name(), 
-           "name mismatch");
-    // Verification prevents us from creating names with dots in them, this
-    // asserts that that's the case.
-    assert(is_internal_format(parsed_name),
-           "external class name format used internally");
-
-    // Add class just loaded
-    define_instance_class(k, THREAD);
-  }
-
-  // If parsing the class file or define_instance_class failed, we
-  // need to remove the placeholder added on our behalf. But we
-  // must make sure parsed_name is valid first (it won't be if we had
-  // a format error before the class was parsed far enough to
-  // find the name).
-  if (HAS_PENDING_EXCEPTION && !parsed_name.is_null()) {
-    unsigned int p_hash = placeholders()->compute_hash(parsed_name, 
-                                                       class_loader);
-    int p_index = placeholders()->hash_to_index(p_hash);
-    {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    placeholders()->find_and_remove(p_index, p_hash, parsed_name, class_loader, THREAD);
-    SystemDictionary_lock->notify_all();
-    }
-    return NULL;
-  }
-
-  // Make sure that we didn't leave a place holder in the
-  // SystemDictionary; this is only done on success
-  debug_only( {
-    if (!HAS_PENDING_EXCEPTION) {
-      assert(!parsed_name.is_null(), "parsed_name is still null?");
-      symbolHandle h_name   (THREAD, k->name());
-      Handle h_loader (THREAD, k->class_loader());
-
-      MutexLocker mu(SystemDictionary_lock, THREAD);
-
-      oop check = find_class_or_placeholder(parsed_name, class_loader);
-      assert(check == k(), "should be present in the dictionary");
-
-      oop check2 = find_class_or_placeholder(h_name, h_loader);
-      assert(check == check2, "name inconsistancy in SystemDictionary");
-    }
-  } );
-
-  return k();
-}
-
-
-void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length,
-                                             int number_of_entries) {
-  assert(length == _nof_buckets * sizeof(HashtableBucket),
-         "bad shared dictionary size.");
-  _shared_dictionary = new Dictionary(_nof_buckets, t, number_of_entries);
-}
-
-
-// If there is a shared dictionary, then find the entry for the
-// given shared system class, if any.
-
-klassOop SystemDictionary::find_shared_class(symbolHandle class_name) {
-  if (shared_dictionary() != NULL) {
-    unsigned int d_hash = dictionary()->compute_hash(class_name, Handle());
-    int d_index = dictionary()->hash_to_index(d_hash);
-    return shared_dictionary()->find_shared_class(d_index, d_hash, class_name);
-  } else {
-    return NULL;
-  }
-}
-
-
-// Load a class from the shared spaces (found through the shared system
-// dictionary).  Force the superclass and all interfaces to be loaded.
-// Update the class definition to include sibling classes and no
-// subclasses (yet).  [Classes in the shared space are not part of the
-// object hierarchy until loaded.]
-
-instanceKlassHandle SystemDictionary::load_shared_class(
-                 symbolHandle class_name, Handle class_loader, TRAPS) {
-  instanceKlassHandle ik (THREAD, find_shared_class(class_name));
-  return load_shared_class(ik, class_loader, THREAD);
-}
-
-// Note well!  Changes to this method may affect oop access order
-// in the shared archive.  Please take care to not make changes that
-// adversely affect cold start time by changing the oop access order
-// that is specified in dump.cpp MarkAndMoveOrderedReadOnly and
-// MarkAndMoveOrderedReadWrite closures.
-instanceKlassHandle SystemDictionary::load_shared_class(
-                 instanceKlassHandle ik, Handle class_loader, TRAPS) {
-  assert(class_loader.is_null(), "non-null classloader for shared class?");
-  if (ik.not_null()) {
-    instanceKlassHandle nh = instanceKlassHandle(); // null Handle
-    symbolHandle class_name(THREAD, ik->name());
-
-    // Found the class, now load the superclass and interfaces.  If they
-    // are shared, add them to the main system dictionary and reset
-    // their hierarchy references (supers, subs, and interfaces).
-
-    if (ik->super() != NULL) {
-      symbolHandle cn(THREAD, ik->super()->klass_part()->name());
-      resolve_super_or_fail(class_name, cn,
-                            class_loader, Handle(), true, CHECK_(nh));
-    }
-
-    objArrayHandle interfaces (THREAD, ik->local_interfaces());
-    int num_interfaces = interfaces->length();
-    for (int index = 0; index < num_interfaces; index++) {
-      klassOop k = klassOop(interfaces->obj_at(index));
-
-      // Note: can not use instanceKlass::cast here because
-      // interfaces' instanceKlass's C++ vtbls haven't been
-      // reinitialized yet (they will be once the interface classes
-      // are loaded)
-      symbolHandle name (THREAD, k->klass_part()->name());
-      resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh));
-    }
-
-    // Adjust methods to recover missing data.  They need addresses for
-    // interpreter entry points and their default native method address
-    // must be reset.
-
-    // Updating methods must be done under a lock so multiple
-    // threads don't update these in parallel
-    // Shared classes are all currently loaded by the bootstrap
-    // classloader, so this will never cause a deadlock on
-    // a custom class loader lock.
-
-    {
-      Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
-      check_loader_lock_contention(lockObject, THREAD);
-      ObjectLocker ol(lockObject, THREAD, true);
-
-      objArrayHandle methods (THREAD, ik->methods());
-      int num_methods = methods->length();
-      for (int index2 = 0; index2 < num_methods; ++index2) {
-        methodHandle m(THREAD, methodOop(methods->obj_at(index2)));
-        m()->link_method(m, CHECK_(nh));
-      }
-    }
-
-    if (TraceClassLoading) {
-      ResourceMark rm;
-      tty->print("[Loaded %s", ik->external_name());
-      tty->print(" from shared objects file");
-      tty->print_cr("]");
-    }
-    // notify a class loaded from shared object
-    ClassLoadingService::notify_class_loaded(instanceKlass::cast(ik()), 
-                                             true /* shared class */);
-  }
-  return ik;
-}
-
-
-
-instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS) {
-  instanceKlassHandle nh = instanceKlassHandle(); // null Handle
-  if (class_loader.is_null()) {
-    // Search the shared system dictionary for classes preloaded into the
-    // shared spaces.
-    instanceKlassHandle k;
-    k = load_shared_class(class_name, class_loader, THREAD);
-
-    if (k.is_null()) {
-      // Use VM class loader
-      k = ClassLoader::load_classfile(class_name, CHECK_(nh));
-    }
-
-    // find_or_define_instance_class may return a different k
-    if (!k.is_null()) {
-      k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh));
-    }
-    return k;
-  } else {
-    // Use user specified class loader to load class. Call loadClass operation on class_loader.
-    ResourceMark rm(THREAD);
-      
-    Handle s = java_lang_String::create_from_symbol(class_name, CHECK_(nh));
-    // Translate to external class name format, i.e., convert '/' chars to '.'
-    Handle string = java_lang_String::externalize_classname(s, CHECK_(nh));
-
-    JavaValue result(T_OBJECT);
-
-    KlassHandle spec_klass (THREAD, SystemDictionary::classloader_klass());
-
-    // UnsyncloadClass option means don't synchronize loadClass() calls.
-    // loadClassInternal() is synchronized and public loadClass(String) is not.
-    // This flag is for diagnostic purposes only. It is risky to call
-    // custom class loaders without synchronization.
-    // WARNING If a custom class loader does NOT synchronizer findClass, or callers of
-    // findClass, this flag risks unexpected timing bugs in the field.
-    // Do NOT assume this will be supported in future releases.
-    if (!UnsyncloadClass && has_loadClassInternal()) {
-      JavaCalls::call_special(&result, 
-                              class_loader, 
-                              spec_klass,
-                              vmSymbolHandles::loadClassInternal_name(),
-                              vmSymbolHandles::string_class_signature(), 
-                              string,
-                              CHECK_(nh));
-    } else {
-      JavaCalls::call_virtual(&result, 
-                              class_loader, 
-                              spec_klass,
-                              vmSymbolHandles::loadClass_name(),
-                              vmSymbolHandles::string_class_signature(), 
-                              string,
-                              CHECK_(nh));
-    }
-
-    assert(result.get_type() == T_OBJECT, "just checking");
-    oop obj = (oop) result.get_jobject();
-
-    // Primitive classes return null since forName() can not be
-    // used to obtain any of the Class objects representing primitives or void
-    if ((obj != NULL) && !(java_lang_Class::is_primitive(obj))) {      
-      instanceKlassHandle k = 
-                instanceKlassHandle(THREAD, java_lang_Class::as_klassOop(obj));
-      // For user defined Java class loaders, check that the name returned is
-      // the same as that requested.  This check is done for the bootstrap
-      // loader when parsing the class file.
-      if (class_name() == k->name()) {
-        return k;
-      }
-    }
-    // Class is not found or has the wrong name, return NULL
-    return nh;
-  }
-}
-
-void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
-
-  Handle class_loader_h(THREAD, k->class_loader());
-
-  // for bootstrap classloader don't acquire lock
-  if (!class_loader_h.is_null()) {
-    assert(ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, 
-         compute_loader_lock_object(class_loader_h, THREAD)),
-         "define called without lock");
-  }
-
-
-  // Check class-loading constraints. Throw exception if violation is detected.
-  // Grabs and releases SystemDictionary_lock
-  // The check_constraints/find_class call and update_dictionary sequence
-  // must be "atomic" for a specific class/classloader pair so we never
-  // define two different instanceKlasses for that class/classloader pair.
-  // Existing classloaders will call define_instance_class with the
-  // classloader lock held
-  // Parallel classloaders will call find_or_define_instance_class
-  // which will require a token to perform the define class
-  symbolHandle name_h(THREAD, k->name());
-  unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader_h);
-  int d_index = dictionary()->hash_to_index(d_hash);
-  check_constraints(d_index, d_hash, k, class_loader_h, true, CHECK);
-
-  // Register class just loaded with class loader (placed in Vector)
-  // Note we do this before updating the dictionary, as this can
-  // fail with an OutOfMemoryError (if it does, we will *not* put this
-  // class in the dictionary and will not update the class hierarchy).
-  if (k->class_loader() != NULL) {
-    methodHandle m(THREAD, Universe::loader_addClass_method());
-    JavaValue result(T_VOID);
-    JavaCallArguments args(class_loader_h);
-    args.push_oop(Handle(THREAD, k->java_mirror()));
-    JavaCalls::call(&result, m, &args, CHECK);
-  }
-
-  // Add the new class. We need recompile lock during update of CHA.
-  {
-    unsigned int p_hash = placeholders()->compute_hash(name_h, class_loader_h);
-    int p_index = placeholders()->hash_to_index(p_hash);
-
-    MutexLocker mu_r(Compile_lock, THREAD);                    
-
-    // Add to class hierarchy, initialize vtables, and do possible
-    // deoptimizations.
-    add_to_hierarchy(k, CHECK); // No exception, but can block
-
-    // Add to systemDictionary - so other classes can see it.
-    // Grabs and releases SystemDictionary_lock
-    update_dictionary(d_index, d_hash, p_index, p_hash,
-                      k, class_loader_h, THREAD);
-  }
-  k->eager_initialize(THREAD);
-
-  // notify jvmti
-  if (JvmtiExport::should_post_class_load()) {
-      assert(THREAD->is_Java_thread(), "thread->is_Java_thread()");
-      JvmtiExport::post_class_load((JavaThread *) THREAD, k());
-
-  }
-#ifdef JVMPI_SUPPORT
-  // notify jvmpi
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_CLASS_LOAD)) {
-    jvmpi::post_class_load_event(Klass::cast(k())->java_mirror());
-  }
-#endif // JVMPI_SUPPORT
-}
-
-// Support parallel classloading
-// Initial implementation for bootstrap classloader
-// For future:
-// For custom class loaders that support parallel classloading,
-// in case they do not synchronize around
-// FindLoadedClass/DefineClass calls, we check for parallel
-// loading for them, wait if a defineClass is in progress
-// and return the initial requestor's results
-// For better performance, the class loaders should synchronize
-// findClass(), i.e. FindLoadedClass/DefineClass or they
-// potentially waste time reading and parsing the bytestream.
-// Note: VM callers should ensure consistency of k/class_name,class_loader
-instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle class_name, Handle class_loader, instanceKlassHandle k, TRAPS) {
-
-  instanceKlassHandle nh = instanceKlassHandle(); // null Handle
-
-  unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
-  int d_index = dictionary()->hash_to_index(d_hash);
-
-// Hold SD lock around find_class and placeholder creation for DEFINE_CLASS
-  unsigned int p_hash = placeholders()->compute_hash(class_name, class_loader);
-  int p_index = placeholders()->hash_to_index(p_hash);
-  PlaceholderEntry* probe;
-
-  { 
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    // First check if class already defined
-    klassOop check = find_class(d_index, d_hash, class_name, class_loader);
-    if (check != NULL) {
-      return(instanceKlassHandle(THREAD, check));
-    }
-
-    // Acquire define token for this class/classloader
-    symbolHandle nullsymbolHandle;
-    probe = placeholders()->find_and_add(p_index, p_hash, class_name, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD); 
-    // Check if another thread defining in parallel
-    if (probe->definer() == NULL) {
-      // Thread will define the class
-      probe->set_definer(THREAD);
-    } else {
-      // Wait for defining thread to finish and return results
-      while (probe->definer() != NULL) {
-        SystemDictionary_lock->wait();
-      }
-      if (probe->instanceKlass() != NULL) {
-        probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
-        return(instanceKlassHandle(THREAD, probe->instanceKlass()));
-      } else {
-        // If definer had an error, try again as any new thread would
-        probe->set_definer(THREAD);
-#ifdef ASSERT
-        klassOop check = find_class(d_index, d_hash, class_name, class_loader);
-        assert(check == NULL, "definer missed recording success");
-#endif
-      }
-    }
-  }
-
-  define_instance_class(k, THREAD);
-
-  Handle linkage_exception = Handle(); // null handle
-
-  // definer must notify any waiting threads
-  {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, class_name, class_loader);
-    assert(probe != NULL, "DEFINE_INSTANCE placeholder lost?");
-    if (probe != NULL) {
-      if (HAS_PENDING_EXCEPTION) {
-        linkage_exception = Handle(THREAD,PENDING_EXCEPTION);
-        CLEAR_PENDING_EXCEPTION;
-      } else {
-        probe->set_instanceKlass(k());
-      }
-      probe->set_definer(NULL);
-      probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
-      SystemDictionary_lock->notify_all();
-    }
-  }
-
-  // Can't throw exception while holding lock due to rank ordering
-  if (linkage_exception() != NULL) {
-    THROW_OOP_(linkage_exception(), nh); // throws exception and returns
-  }
-
-  return k;
-}
-
-Handle SystemDictionary::compute_loader_lock_object(Handle class_loader, TRAPS) {
-  // If class_loader is NULL we synchronize on _system_loader_lock_obj
-  if (class_loader.is_null()) {
-    return Handle(THREAD, _system_loader_lock_obj);
-  } else {
-    return class_loader;
-  }
-}
-
-// This method is added to check how often we have to wait to grab loader
-// lock. The results are being recorded in the performance counters defined in
-// ClassLoader::_sync_systemLoaderLockContentionRate and
-// ClassLoader::_sync_nonSystemLoaderLockConteionRate. 
-void SystemDictionary::check_loader_lock_contention(Handle loader_lock, TRAPS) {
-  if (!UsePerfData) {
-    return;
-  }
-
-  assert(!loader_lock.is_null(), "NULL lock object");
-
-  if (ObjectSynchronizer::query_lock_ownership((JavaThread*)THREAD, loader_lock)
-      == ObjectSynchronizer::owner_other) {
-    // contention will likely happen, so increment the corresponding 
-    // contention counter.
-    if (loader_lock() == _system_loader_lock_obj) {
-      ClassLoader::sync_systemLoaderLockContentionRate()->inc();
-    } else {
-      ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
-    }
-  }
-} 
-  
-// ----------------------------------------------------------------------------
-// Lookup
-
-klassOop SystemDictionary::find_class(int index, unsigned int hash,
-                                      symbolHandle class_name,
-                                      Handle class_loader) {
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  assert (index == dictionary()->index_for(class_name, class_loader),
-          "incorrect index?");
-
-  klassOop k = dictionary()->find_class(index, hash, class_name, class_loader);
-  return k;
-}
-
-
-// Basic find on classes in the midst of being loaded
-symbolOop SystemDictionary::find_placeholder(int index, unsigned int hash,
-                                             symbolHandle class_name,
-                                             Handle class_loader) {
-  assert_locked_or_safepoint(SystemDictionary_lock);
-
-  return placeholders()->find_entry(index, hash, class_name, class_loader);
-}
-
-
-// Used for assertions and verification only
-oop SystemDictionary::find_class_or_placeholder(symbolHandle class_name, 
-                                                Handle class_loader) {
-  #ifndef ASSERT
-  guarantee(VerifyBeforeGC   || 
-            VerifyDuringGC   || 
-            VerifyBeforeExit ||
-            VerifyAfterGC, "too expensive"); 
-  #endif
-  assert_locked_or_safepoint(SystemDictionary_lock);
-  symbolOop class_name_ = class_name();
-  oop class_loader_ = class_loader();
-
-  // First look in the loaded class array
-  unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
-  int d_index = dictionary()->hash_to_index(d_hash);
-  oop lookup = find_class(d_index, d_hash, class_name, class_loader);
-
-  if (lookup == NULL) {
-    // Next try the placeholders
-    unsigned int p_hash = placeholders()->compute_hash(class_name,class_loader);
-    int p_index = placeholders()->hash_to_index(p_hash);
-    lookup = find_placeholder(p_index, p_hash, class_name, class_loader);
-  }
-
-  return lookup;
-}
-
-
-// Get the next class in the diictionary.
-klassOop SystemDictionary::try_get_next_class() {
-  return dictionary()->try_get_next_class();
-}
-
-
-// ----------------------------------------------------------------------------
-// Update hierachy. This is done before the new klass has been added to the SystemDictionary. The Recompile_lock
-// is held, to ensure that the compiler is not using the class hierachy, and that deoptimization will kick in
-// before a new class is used.
-
-void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
-  assert(k.not_null(), "just checking");
-  // Link into hierachy. Make sure the vtables are initialized before linking into 
-  k->append_to_sibling_list();                    // add to superklass/sibling list
-  k->process_interfaces(THREAD);                  // handle all "implements" declarations  
-  k->set_init_state(instanceKlass::loaded);
-  // Now flush all code that depended on old class hierarchy.
-  // Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
-  // Also, first reinitialize vtable because it may have gotten out of synch 
-  // while the new class wasn't connected to the class hierarchy.     
-  Universe::flush_dependents_on(k);
-}
-
-
-// ----------------------------------------------------------------------------
-// GC support
-
-// Following roots during mark-sweep is separated in two phases. 
-//
-// The first phase follows preloaded classes and all other system 
-// classes, since these will never get unloaded anyway.
-//
-// The second phase removes (unloads) unreachable classes from the
-// system dictionary and follows the remaining classes' contents.
-
-void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
-  // Follow preloaded classes/mirrors and system loader object
-  blk->do_oop(&_java_system_loader);
-  preloaded_oops_do(blk);
-  always_strong_classes_do(blk);
-}
-
-
-void SystemDictionary::always_strong_classes_do(OopClosure* blk) {
-  // Follow all system classes and temporary placeholders in dictionary
-  dictionary()->always_strong_classes_do(blk);
-  
-  // Placeholders. These are *always* strong roots, as they
-  // represent classes we're actively loading.
-  placeholders_do(blk);  
-
-  // Loader constraints. We must keep the symbolOop used in the name alive.
-  constraints()->always_strong_classes_do(blk);
-
-  // Resolution errors keep the symbolOop for the error alive
-  resolution_errors()->always_strong_classes_do(blk);
-}
-
-
-void SystemDictionary::placeholders_do(OopClosure* blk) {
-  placeholders()->oops_do(blk);
-}
-
-
-bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
-  bool result = dictionary()->do_unloading(is_alive);
-  constraints()->purge_loader_constraints(is_alive);
-  resolution_errors()->purge_resolution_errors(is_alive);
-  return result;
-}
-
-
-void SystemDictionary::oops_do(OopClosure* f) {
-  // Adjust preloaded classes/mirrors and system loader object
-  f->do_oop(&_java_system_loader);
-  preloaded_oops_do(f);
-
-  lazily_loaded_oops_do(f);
-
-  // Adjust dictionary
-  dictionary()->oops_do(f);
-
-  // Partially loaded classes
-  placeholders()->oops_do(f);
-
-  // Adjust constraint table
-  constraints()->oops_do(f);
-
-  // Adjust resolution error table
-  resolution_errors()->oops_do(f);
-}
-
-
-void SystemDictionary::preloaded_oops_do(OopClosure* f) {
-  f->do_oop((oop*) &_string_klass);
-  f->do_oop((oop*) &_object_klass);
-  f->do_oop((oop*) &_class_klass);
-  f->do_oop((oop*) &_cloneable_klass);
-  f->do_oop((oop*) &_classloader_klass);
-  f->do_oop((oop*) &_serializable_klass);
-  f->do_oop((oop*) &_system_klass);
-
-  f->do_oop((oop*) &_throwable_klass);
-  f->do_oop((oop*) &_error_klass);
-  f->do_oop((oop*) &_threaddeath_klass);
-  f->do_oop((oop*) &_exception_klass);
-  f->do_oop((oop*) &_runtime_exception_klass);
-  f->do_oop((oop*) &_classNotFoundException_klass);
-  f->do_oop((oop*) &_noClassDefFoundError_klass);
-  f->do_oop((oop*) &_linkageError_klass);
-  f->do_oop((oop*) &_classCastException_klass);
-  f->do_oop((oop*) &_arrayStoreException_klass);
-  f->do_oop((oop*) &_virtualMachineError_klass);
-  f->do_oop((oop*) &_outOfMemoryError_klass);
-  f->do_oop((oop*) &_StackOverflowError_klass);
-  f->do_oop((oop*) &_illegalMonitorStateException_klass);
-  f->do_oop((oop*) &_protectionDomain_klass);
-  f->do_oop((oop*) &_AccessControlContext_klass);
-
-  f->do_oop((oop*) &_reference_klass);
-  f->do_oop((oop*) &_soft_reference_klass);
-  f->do_oop((oop*) &_weak_reference_klass);
-  f->do_oop((oop*) &_final_reference_klass);
-  f->do_oop((oop*) &_phantom_reference_klass);
-  f->do_oop((oop*) &_finalizer_klass);
-  
-  f->do_oop((oop*) &_thread_klass);
-  f->do_oop((oop*) &_threadGroup_klass);
-  f->do_oop((oop*) &_properties_klass);      
-  f->do_oop((oop*) &_reflect_accessible_object_klass);      
-  f->do_oop((oop*) &_reflect_field_klass);      
-  f->do_oop((oop*) &_reflect_method_klass);      
-  f->do_oop((oop*) &_reflect_constructor_klass);      
-  f->do_oop((oop*) &_reflect_magic_klass);
-  f->do_oop((oop*) &_reflect_method_accessor_klass);
-  f->do_oop((oop*) &_reflect_constructor_accessor_klass);
-  f->do_oop((oop*) &_reflect_delegating_classloader_klass);
-  f->do_oop((oop*) &_reflect_constant_pool_klass);
-  f->do_oop((oop*) &_reflect_unsafe_static_field_accessor_impl_klass);
-
-  f->do_oop((oop*) &_stringBuffer_klass);
-  f->do_oop((oop*) &_vector_klass);
-  f->do_oop((oop*) &_hashtable_klass);
-
-  f->do_oop((oop*) &_stackTraceElement_klass);
-
-  f->do_oop((oop*) &_java_nio_Buffer_klass);
-
-  f->do_oop((oop*) &_sun_misc_AtomicLongCSImpl_klass);
-
-  f->do_oop((oop*) &_boolean_klass);
-  f->do_oop((oop*) &_char_klass);
-  f->do_oop((oop*) &_float_klass);
-  f->do_oop((oop*) &_double_klass);
-  f->do_oop((oop*) &_byte_klass);
-  f->do_oop((oop*) &_short_klass);
-  f->do_oop((oop*) &_int_klass);
-  f->do_oop((oop*) &_long_klass);
-  {
-    for (int i = 0; i < T_VOID+1; i++) {
-      if (_box_klasses[i] != NULL) {
-	assert(i >= T_BOOLEAN, "checking");
-	f->do_oop((oop*) &_box_klasses[i]);
-      }
-    }
-  }
-
-  // Do the basic type mirrors.  (These are shared with Universe::oops_do.)
-  shared_oops_do(f);
-  
-  f->do_oop((oop*) &_system_loader_lock_obj); 
-  FilteredFieldsMap::klasses_oops_do(f); 
-}
-
-void SystemDictionary::shared_oops_do(OopClosure* f) {
-  f->do_oop((oop*) &_int_mirror);
-  f->do_oop((oop*) &_float_mirror);
-  f->do_oop((oop*) &_double_mirror);
-  f->do_oop((oop*) &_byte_mirror);
-  f->do_oop((oop*) &_bool_mirror);
-  f->do_oop((oop*) &_char_mirror);
-  f->do_oop((oop*) &_long_mirror);
-  f->do_oop((oop*) &_short_mirror);
-  f->do_oop((oop*) &_void_mirror);
-
-  // It's important to iterate over these guys even if they are null,
-  // since that's how shared heaps are restored.
-  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
-    f->do_oop((oop*) &_mirrors[i]);
-  }
-  assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
-}
-
-void SystemDictionary::lazily_loaded_oops_do(OopClosure* f) {
-  f->do_oop((oop*) &_abstract_ownable_synchronizer_klass);
-}
-
-// Just the classes from defining class loaders
-// Don't iterate over placeholders
-void SystemDictionary::classes_do(void f(klassOop)) {
-  dictionary()->classes_do(f);
-}
-
-// Added for initialize_itable_for_klass
-//   Just the classes from defining class loaders
-// Don't iterate over placeholders
-void SystemDictionary::classes_do(void f(klassOop, TRAPS), TRAPS) {
-  dictionary()->classes_do(f, CHECK);
-}
-
-//   All classes, and their class loaders
-// Don't iterate over placeholders
-void SystemDictionary::classes_do(void f(klassOop, oop)) {
-  dictionary()->classes_do(f);
-}
-
-//   All classes, and their class loaders
-//   (added for helpers that use HandleMarks and ResourceMarks)
-// Don't iterate over placeholders
-void SystemDictionary::classes_do(void f(klassOop, oop, TRAPS), TRAPS) {
-  dictionary()->classes_do(f, CHECK);
-}
-
-void SystemDictionary::placeholders_do(void f(symbolOop, oop)) {
-  placeholders()->entries_do(f);
-}
-
-void SystemDictionary::methods_do(void f(methodOop)) {
-  dictionary()->methods_do(f);
-}
-
-// ----------------------------------------------------------------------------
-// Lazily load klasses
-
-void SystemDictionary::load_abstract_ownable_synchronizer_klass(TRAPS) {
-  assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later");
-
-  // if multiple threads calling this function, only one thread will load
-  // the class.  The other threads will find the loaded version once the
-  // class is loaded.
-  klassOop aos = _abstract_ownable_synchronizer_klass;
-  if (aos == NULL) {
-    klassOop k = resolve_or_fail(vmSymbolHandles::java_util_concurrent_locks_AbstractOwnableSynchronizer(), true, CHECK);
-    // Force a fence to prevent any read before the write completes
-    OrderAccess::fence();
-    _abstract_ownable_synchronizer_klass = k;
-  }
-}
-
-// ----------------------------------------------------------------------------
-// Initialization
-
-void SystemDictionary::initialize(TRAPS) {
-  // Allocate arrays
-  assert(dictionary() == NULL,
-         "SystemDictionary should only be initialized once");
-  _dictionary = new Dictionary(_nof_buckets);
-  _placeholders = new PlaceholderTable(_nof_buckets);
-  _number_of_modifications = 0;
-  _loader_constraints = new LoaderConstraintTable(_loader_constraint_size);
-  _resolution_errors = new ResolutionErrorTable(_resolution_error_size);
-
-  // Allocate private object used as system class loader lock
-  _system_loader_lock_obj = oopFactory::new_system_objArray(0, CHECK);
-  // Initialize basic classes
-  initialize_preloaded_classes(CHECK);
-}
-
-
-void SystemDictionary::initialize_preloaded_classes(TRAPS) {
-  assert(_object_klass == NULL, "preloaded classes should only be initialized once");
-  // Preload commonly used klasses
-  _object_klass            = resolve_or_fail(vmSymbolHandles::java_lang_Object(),                true, CHECK);
-  _string_klass            = resolve_or_fail(vmSymbolHandles::java_lang_String(),                true, CHECK);  
-  _class_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Class(),                 true, CHECK);
-  debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(_class_klass));
-
-  // Fixup mirrors for classes loaded before java.lang.Class
-  initialize_basic_type_mirrors(CHECK);
-  Universe::fixup_mirrors(CHECK);
-
-  _cloneable_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Cloneable(),             true, CHECK);
-  _classloader_klass       = resolve_or_fail(vmSymbolHandles::java_lang_ClassLoader(),           true, CHECK);
-  _serializable_klass      = resolve_or_fail(vmSymbolHandles::java_io_Serializable(),            true, CHECK);
-  _system_klass            = resolve_or_fail(vmSymbolHandles::java_lang_System(),                true, CHECK);  
-
-  _throwable_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Throwable(),             true, CHECK);
-  _error_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Error(),                 true, CHECK);
-  _threaddeath_klass       = resolve_or_fail(vmSymbolHandles::java_lang_ThreadDeath(),           true, CHECK);
-  _exception_klass         = resolve_or_fail(vmSymbolHandles::java_lang_Exception(),             true, CHECK);
-  _runtime_exception_klass = resolve_or_fail(vmSymbolHandles::java_lang_RuntimeException(),      true, CHECK);
-  _protectionDomain_klass  = resolve_or_fail(vmSymbolHandles::java_security_ProtectionDomain(),  true, CHECK);
-  _AccessControlContext_klass = resolve_or_fail(vmSymbolHandles::java_security_AccessControlContext(),  true, CHECK);
-  _classNotFoundException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassNotFoundException(),  true, CHECK);
-  _noClassDefFoundError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_NoClassDefFoundError(),  true, CHECK);  
-  _linkageError_klass   = resolve_or_fail(vmSymbolHandles::java_lang_LinkageError(),  true, CHECK);  
-  _classCastException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ClassCastException(),   true, CHECK);  
-  _arrayStoreException_klass = resolve_or_fail(vmSymbolHandles::java_lang_ArrayStoreException(),   true, CHECK);  
-  _virtualMachineError_klass = resolve_or_fail(vmSymbolHandles::java_lang_VirtualMachineError(),   true, CHECK);  
-  _outOfMemoryError_klass  = resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(),      true, CHECK);  
-  _StackOverflowError_klass = resolve_or_fail(vmSymbolHandles::java_lang_StackOverflowError(),   true, CHECK);  
-  _illegalMonitorStateException_klass = resolve_or_fail(vmSymbolHandles::java_lang_IllegalMonitorStateException(),   true, CHECK);  
-
-  // Preload ref klasses and set reference types
-  _reference_klass         = resolve_or_fail(vmSymbolHandles::java_lang_ref_Reference(),         true, CHECK);
-  instanceKlass::cast(_reference_klass)->set_reference_type(REF_OTHER);
-  instanceRefKlass::update_nonstatic_oop_maps(_reference_klass);
-
-  _soft_reference_klass    = resolve_or_fail(vmSymbolHandles::java_lang_ref_SoftReference(),     true, CHECK);
-  instanceKlass::cast(_soft_reference_klass)->set_reference_type(REF_SOFT);
-  _weak_reference_klass    = resolve_or_fail(vmSymbolHandles::java_lang_ref_WeakReference(),     true, CHECK);
-  instanceKlass::cast(_weak_reference_klass)->set_reference_type(REF_WEAK);
-  _final_reference_klass   = resolve_or_fail(vmSymbolHandles::java_lang_ref_FinalReference(),    true, CHECK);
-  instanceKlass::cast(_final_reference_klass)->set_reference_type(REF_FINAL);
-  _phantom_reference_klass = resolve_or_fail(vmSymbolHandles::java_lang_ref_PhantomReference(),  true, CHECK);
-  instanceKlass::cast(_phantom_reference_klass)->set_reference_type(REF_PHANTOM);
-  _finalizer_klass         = resolve_or_fail(vmSymbolHandles::java_lang_ref_Finalizer(),         true, CHECK);
-
-  _thread_klass           = resolve_or_fail(vmSymbolHandles::java_lang_Thread(),                true, CHECK);
-  _threadGroup_klass      = resolve_or_fail(vmSymbolHandles::java_lang_ThreadGroup(),           true, CHECK);
-  _properties_klass       = resolve_or_fail(vmSymbolHandles::java_util_Properties(),            true, CHECK);  
-  _reflect_accessible_object_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_AccessibleObject(),  true, CHECK);  
-  _reflect_field_klass    = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Field(),         true, CHECK);  
-  _reflect_method_klass   = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(),        true, CHECK);  
-  _reflect_constructor_klass = resolve_or_fail(vmSymbolHandles::java_lang_reflect_Constructor(),   true, CHECK);  
-  // Universe::is_gte_jdk14x_version() is not set up by this point.
-  // It's okay if these turn out to be NULL in non-1.4 JDKs.
-  _reflect_magic_klass    = resolve_or_null(vmSymbolHandles::sun_reflect_MagicAccessorImpl(),         CHECK);
-  _reflect_method_accessor_klass = resolve_or_null(vmSymbolHandles::sun_reflect_MethodAccessorImpl(),     CHECK);
-  _reflect_constructor_accessor_klass = resolve_or_null(vmSymbolHandles::sun_reflect_ConstructorAccessorImpl(),     CHECK);
-  _reflect_delegating_classloader_klass = resolve_or_null(vmSymbolHandles::sun_reflect_DelegatingClassLoader(),     CHECK);
-  _reflect_constant_pool_klass = resolve_or_null(vmSymbolHandles::sun_reflect_ConstantPool(),         CHECK);
-  _reflect_unsafe_static_field_accessor_impl_klass = resolve_or_null(vmSymbolHandles::sun_reflect_UnsafeStaticFieldAccessorImpl(), CHECK);
-
-  _vector_klass           = resolve_or_fail(vmSymbolHandles::java_util_Vector(),                true, CHECK);  
-  _hashtable_klass        = resolve_or_fail(vmSymbolHandles::java_util_Hashtable(),             true, CHECK);  
-  _stringBuffer_klass     = resolve_or_fail(vmSymbolHandles::java_lang_StringBuffer(),          true, CHECK);  
-
-  // It's NULL in non-1.4 JDKs.
-  _stackTraceElement_klass = resolve_or_null(vmSymbolHandles::java_lang_StackTraceElement(),          CHECK);
-
-  // Universe::is_gte_jdk14x_version() is not set up by this point.
-  // It's okay if this turns out to be NULL in non-1.4 JDKs.
-  _java_nio_Buffer_klass   = resolve_or_null(vmSymbolHandles::java_nio_Buffer(),                 CHECK);
-
-  // If this class isn't present, it won't be referenced.
-  _sun_misc_AtomicLongCSImpl_klass = resolve_or_null(vmSymbolHandles::sun_misc_AtomicLongCSImpl(),     CHECK);
-
-  // Preload boxing klasses
-  _boolean_klass           = resolve_or_fail(vmSymbolHandles::java_lang_Boolean(),               true, CHECK);
-  _char_klass              = resolve_or_fail(vmSymbolHandles::java_lang_Character(),             true, CHECK);
-  _float_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Float(),                 true, CHECK);
-  _double_klass            = resolve_or_fail(vmSymbolHandles::java_lang_Double(),                true, CHECK);
-  _byte_klass              = resolve_or_fail(vmSymbolHandles::java_lang_Byte(),                  true, CHECK);
-  _short_klass             = resolve_or_fail(vmSymbolHandles::java_lang_Short(),                 true, CHECK);
-  _int_klass               = resolve_or_fail(vmSymbolHandles::java_lang_Integer(),               true, CHECK);
-  _long_klass              = resolve_or_fail(vmSymbolHandles::java_lang_Long(),                  true, CHECK);
-
-  _box_klasses[T_BOOLEAN] = _boolean_klass;
-  _box_klasses[T_CHAR]    = _char_klass;
-  _box_klasses[T_FLOAT]   = _float_klass;
-  _box_klasses[T_DOUBLE]  = _double_klass;
-  _box_klasses[T_BYTE]    = _byte_klass;
-  _box_klasses[T_SHORT]   = _short_klass;
-  _box_klasses[T_INT]     = _int_klass;
-  _box_klasses[T_LONG]    = _long_klass;
-  //_box_klasses[T_OBJECT]  = _object_klass;
-  //_box_klasses[T_ARRAY]   = _object_klass;
-
-  { // Compute whether we should use loadClass or loadClassInternal when loading classes.
-    methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
-    _has_loadClassInternal = (method != NULL);
-  }
-
-  { // Compute whether we should use checkPackageAccess or NOT
-    methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
-    _has_checkPackageAccess = (method != NULL); 
-  }
-}
-
-void SystemDictionary::initialize_basic_type_mirrors(TRAPS) { 
-  if (UseSharedSpaces) {
-    assert(_int_mirror != NULL, "already loaded");
-    assert(_void_mirror == _mirrors[T_VOID], "consistently loaded");
-    return;
-  }
-
-  assert(_int_mirror==NULL, "basic type mirrors already initialized");
-
-  _int_mirror     = java_lang_Class::create_basic_type_mirror("int",    T_INT,     CHECK);
-  _float_mirror   = java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
-  _double_mirror  = java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
-  _byte_mirror    = java_lang_Class::create_basic_type_mirror("byte",   T_BYTE,    CHECK);
-  _bool_mirror    = java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
-  _char_mirror    = java_lang_Class::create_basic_type_mirror("char",   T_CHAR,    CHECK);
-  _long_mirror    = java_lang_Class::create_basic_type_mirror("long",   T_LONG,    CHECK);
-  _short_mirror   = java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
-  _void_mirror    = java_lang_Class::create_basic_type_mirror("void",   T_VOID,    CHECK);
-
-  _mirrors[T_INT]     = _int_mirror;
-  _mirrors[T_FLOAT]   = _float_mirror;
-  _mirrors[T_DOUBLE]  = _double_mirror;
-  _mirrors[T_BYTE]    = _byte_mirror;
-  _mirrors[T_BOOLEAN] = _bool_mirror;
-  _mirrors[T_CHAR]    = _char_mirror;
-  _mirrors[T_LONG]    = _long_mirror;
-  _mirrors[T_SHORT]   = _short_mirror;
-  _mirrors[T_VOID]    = _void_mirror;
-  //_mirrors[T_OBJECT]  = instanceKlass::cast(_object_klass)->java_mirror();
-  //_mirrors[T_ARRAY]   = instanceKlass::cast(_object_klass)->java_mirror();
-}
-
-
-// Tells if a given klass is a box (wrapper class, such as java.lang.Integer).
-// If so, returns the basic type it holds.  If not, returns T_OBJECT.
-BasicType SystemDictionary::box_klass_type(klassOop k) {
-  assert(k != NULL, "");
-  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
-    if (_box_klasses[i] == k)
-      return (BasicType)i;
-  }
-  return T_OBJECT;
-}
-
-// Constraints on class loaders. The details of the algorithm can be
-// found in the OOPSLA'98 paper "Dynamic Class Loading in the Java
-// Virtual Machine" by Sheng Liang and Gilad Bracha.  The basic idea is
-// that the system dictionary needs to maintain a set of contraints that
-// must be satisfied by all classes in the dictionary.
-// if defining is true, then LinkageError if already in systemDictionary
-// if initiating loader, then ok if instanceKlass matches existing entry
-
-void SystemDictionary::check_constraints(int d_index, unsigned int d_hash,
-                                         instanceKlassHandle k,
-                                         Handle class_loader, bool defining, 
-                                         TRAPS) {
-  const char *linkage_error = NULL;
-  {
-    symbolHandle name (THREAD, k->name());
-    MutexLocker mu(SystemDictionary_lock, THREAD);         
-
-    klassOop check = find_class(d_index, d_hash, name, class_loader);
-    if (check != (klassOop)NULL) { 
-      // if different instanceKlass - duplicate class definition,
-      // else - ok, class loaded by a different thread in parallel,
-      // we should only have found it if it was done loading and ok to use 
-      // system dictionary only holds instance classes, placeholders
-      // also holds array classes
-      
-      assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary");
-      if ((defining == true) || (k() != check)) {
-        linkage_error = "loader (instance of  %s): attempted  duplicate class "
-	  "definition for name: \"%s\"";
-      } else {
-        return;
-      }
-    }
-
-#ifdef ASSERT
-    unsigned int p_hash = placeholders()->compute_hash(name, class_loader);
-    int p_index = placeholders()->hash_to_index(p_hash);
-    symbolOop ph_check = find_placeholder(p_index, p_hash, name, class_loader);
-    assert(ph_check == NULL || ph_check == name(), "invalid symbol");
-#endif
-
-    if (linkage_error == NULL) {
-      if (constraints()->check_or_update(k, class_loader, name) == false) {
-	linkage_error = "loader constraint violation: loader (instance of %s)"
-	  " previously initiated loading for a different type with name \"%s\"";
-      }
-    }
-  }
-
-  // Throw error now if needed (cannot throw while holding 
-  // SystemDictionary_lock because of rank ordering)
-
-  if (linkage_error) {
-    ResourceMark rm(THREAD);
-    const char* class_loader_name = loader_name(class_loader());
-    char* type_name = k->name()->as_C_string();
-    size_t buflen = strlen(linkage_error) + strlen(class_loader_name) +
-      strlen(type_name);
-    char* buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, buflen);
-    jio_snprintf(buf, buflen, linkage_error, class_loader_name, type_name);
-    THROW_MSG(vmSymbols::java_lang_LinkageError(), buf);
-  }
-}
-
-
-// Update system dictionary - done after check_constraint and add_to_hierachy 
-// have been called.
-void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
-                                         int p_index, unsigned int p_hash,
-                                         instanceKlassHandle k, 
-                                         Handle class_loader, 
-                                         TRAPS) {
-  // Compile_lock prevents systemDictionary updates during compilations
-  assert_locked_or_safepoint(Compile_lock);
-  symbolHandle name (THREAD, k->name());
-
-  {
-  MutexLocker mu1(SystemDictionary_lock, THREAD);           
-
-  // See whether biased locking is enabled and if so set it for this
-  // klass.
-  // Note that this must be done past the last potential blocking
-  // point / safepoint. We enable biased locking lazily using a
-  // VM_Operation to iterate the SystemDictionary and installing the
-  // biasable mark word into each instanceKlass's prototype header.
-  // To avoid race conditions where we accidentally miss enabling the
-  // optimization for one class in the process of being added to the
-  // dictionary, we must not safepoint after the test of
-  // BiasedLocking::enabled().
-  if (UseBiasedLocking && BiasedLocking::enabled()) {
-    // Set biased locking bit for all loaded classes; it will be
-    // cleared if revocation occurs too often for this type
-    // NOTE that we must only do this when the class is initally
-    // defined, not each time it is referenced from a new class loader
-    if (k->class_loader() == class_loader()) {
-      k->set_prototype_header(markOopDesc::biased_locking_prototype());
-    }
-  }
-
-  // Check for a placeholder. If there, remove it and make a
-  // new system dictionary entry.
-  placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
-  klassOop sd_check = find_class(d_index, d_hash, name, class_loader);
-  if (sd_check == NULL) {
-    dictionary()->add_klass(name, class_loader, k);
-    notice_modification();
-  }
-#ifdef ASSERT
-  sd_check = find_class(d_index, d_hash, name, class_loader);
-  assert (sd_check != NULL, "should have entry in system dictionary");
-// Changed to allow PH to remain to complete class circularity checking
-// while only one thread can define a class at one time, multiple
-// classes can resolve the superclass for a class at one time, 
-// and the placeholder is used to track that
-//  symbolOop ph_check = find_placeholder(p_index, p_hash, name, class_loader);
-//  assert (ph_check == NULL, "should not have a placeholder entry");
-#endif
-    SystemDictionary_lock->notify_all();
-  }
-}
-
-
-klassOop SystemDictionary::find_constrained_instance_or_array_klass(
-                    symbolHandle class_name, Handle class_loader, TRAPS) {
-
-  // First see if it has been loaded directly.
-  // Force the protection domain to be null.  (This removes protection checks.)
-  Handle no_protection_domain;
-  klassOop klass = find_instance_or_array_klass(class_name, class_loader,
-                                                no_protection_domain, CHECK_NULL);
-  if (klass != NULL)
-    return klass;
-
-  // Now look to see if it has been loaded elsewhere, and is subject to
-  // a loader constraint that would require this loader to return the
-  // klass that is already loaded.
-  if (FieldType::is_array(class_name())) {
-    // Array classes are hard because their klassOops are not kept in the
-    // constraint table. The array klass may be constrained, but the elem class
-    // may not be. 
-    jint dimension;
-    symbolOop object_key;
-    BasicType t = FieldType::get_array_info(class_name(), &dimension,
-                                            &object_key, CHECK_(NULL));
-    if (t != T_OBJECT) {
-      klass = Universe::typeArrayKlassObj(t);
-    } else {
-      symbolHandle elem_name(THREAD, object_key);
-      klass = constraints()->find_constrained_elem_klass(class_name, elem_name, class_loader, THREAD);
-    }
-    if (klass != NULL) {
-      klass = Klass::cast(klass)->array_klass_or_null(dimension);
-    }
-  } else {
-    // Non-array classes are easy: simply check the constraint table.
-    klass = constraints()->find_constrained_klass(class_name, class_loader);
-  }
-      
-  return klass;
-}
-
-
-bool SystemDictionary::add_loader_constraint(symbolHandle class_name,
-                                             Handle class_loader1,
-                                             Handle class_loader2, 
-					     Thread* THREAD) {
-  unsigned int d_hash1 = dictionary()->compute_hash(class_name, class_loader1);
-  int d_index1 = dictionary()->hash_to_index(d_hash1);
-
-  unsigned int d_hash2 = dictionary()->compute_hash(class_name, class_loader2);
-  int d_index2 = dictionary()->hash_to_index(d_hash2);
-
-  {
-    MutexLocker mu_s(SystemDictionary_lock, THREAD);
-
-    // Better never do a GC while we're holding these oops
-    No_Safepoint_Verifier nosafepoint;
-
-    klassOop klass1 = find_class(d_index1, d_hash1, class_name, class_loader1);
-    klassOop klass2 = find_class(d_index2, d_hash2, class_name, class_loader2);
-    return constraints()->add_entry(class_name, klass1, class_loader1,
-				    klass2, class_loader2);
-  }
-}
-
-// Add entry to resolution error table to record the error when the first
-// attempt to resolve a reference to a class has failed.
-void SystemDictionary::add_resolution_error(constantPoolHandle pool, int which, symbolHandle error) {
-  unsigned int hash = resolution_errors()->compute_hash(pool, which);
-  int index = resolution_errors()->hash_to_index(hash);
-  { 
-    MutexLocker ml(SystemDictionary_lock, Thread::current());
-    resolution_errors()->add_entry(index, hash, pool, which, error);
-  }
-}
-
-// Lookup resolution error table. Returns error if found, otherwise NULL.
-symbolOop SystemDictionary::find_resolution_error(constantPoolHandle pool, int which) {
-  unsigned int hash = resolution_errors()->compute_hash(pool, which);
-  int index = resolution_errors()->hash_to_index(hash);
-  { 
-    MutexLocker ml(SystemDictionary_lock, Thread::current());
-    ResolutionErrorEntry* entry = resolution_errors()->find_entry(index, hash, pool, which);
-    return (entry != NULL) ? entry->error() : (symbolOop)NULL;
-  }
-}
-
-
-// Make sure all class components (including arrays) in the given
-// signature will be resolved to the same class in both loaders.
-// Returns the name of the type that failed a loader constraint check, or
-// NULL if no constraint failed. The returned C string needs cleaning up
-// with a ResourceMark in the caller
-char* SystemDictionary::check_signature_loaders(symbolHandle signature,
-                                               Handle loader1, Handle loader2,
-                                               bool is_method, TRAPS)  {
-  // Nothing to do if loaders are the same. 
-  if (loader1() == loader2()) {
-    return NULL;
-  }
-  
-  SignatureStream sig_strm(signature, is_method);
-  while (!sig_strm.is_done()) {
-    if (sig_strm.is_object()) {
-      symbolOop s = sig_strm.as_symbol(CHECK_NULL);
-      symbolHandle sig (THREAD, s);
-      if (!add_loader_constraint(sig, loader1, loader2, THREAD)) {
-	return sig()->as_C_string();
-      }
-    }
-    sig_strm.next();
-  }
-  return NULL;
-}
-
-
-// Since the identity hash code for symbols changes when the symbols are
-// moved from the regular perm gen (hash in the mark word) to the shared
-// spaces (hash is the address), the classes loaded into the dictionary
-// may be in the wrong buckets.
-
-void SystemDictionary::reorder_dictionary() {
-  dictionary()->reorder_dictionary();
-}
-
-
-void SystemDictionary::copy_buckets(char** top, char* end) {
-  dictionary()->copy_buckets(top, end);
-}
-
-
-void SystemDictionary::copy_table(char** top, char* end) {
-  dictionary()->copy_table(top, end);
-}
-
-
-void SystemDictionary::reverse() {
-  dictionary()->reverse();
-}
-
-int SystemDictionary::number_of_classes() {
-  return dictionary()->number_of_entries();
-}
-
-
-// ----------------------------------------------------------------------------
-#ifndef PRODUCT
-
-void SystemDictionary::print() {
-  dictionary()->print();
-
-  // Placeholders
-  GCMutexLocker mu(SystemDictionary_lock);
-  placeholders()->print();
-
-  // loader constraints - print under SD_lock
-  constraints()->print();
-}
-
-#endif
-
-void SystemDictionary::verify() {
-  guarantee(dictionary() != NULL, "Verify of system dictionary failed");
-  guarantee(constraints() != NULL,
-            "Verify of loader constraints failed");
-  guarantee(dictionary()->number_of_entries() >= 0 &&
-            placeholders()->number_of_entries() >= 0,
-            "Verify of system dictionary failed");
-
-  // Verify dictionary
-  dictionary()->verify();
-
-  GCMutexLocker mu(SystemDictionary_lock);
-  placeholders()->verify();
-
-  // Verify constraint table
-  guarantee(constraints() != NULL, "Verify of loader constraints failed");
-  constraints()->verify(dictionary());
-}
-
-
-void SystemDictionary::verify_obj_klass_present(Handle obj,
-                                                symbolHandle class_name,
-                                                Handle class_loader) {
-  GCMutexLocker mu(SystemDictionary_lock);
-  oop probe = find_class_or_placeholder(class_name, class_loader);
-  if (probe == NULL) {
-    probe = SystemDictionary::find_shared_class(class_name);
-  }
-  guarantee(probe != NULL && 
-            (!probe->is_klass() || probe == obj()), 
-                     "Loaded klasses should be in SystemDictionary");
-}
-
-#ifndef PRODUCT
-
-// statistics code
-class ClassStatistics: AllStatic {
- private:
-  static int nclasses;        // number of classes
-  static int nmethods;        // number of methods
-  static int nmethoddata;     // number of methodData    
-  static int class_size;      // size of class objects in words
-  static int method_size;     // size of method objects in words
-  static int debug_size;      // size of debug info in methods
-  static int methoddata_size; // size of methodData objects in words
-
-  static void do_class(klassOop k) {
-    nclasses++;
-    class_size += k->size();
-    if (k->klass_part()->oop_is_instance()) {
-      instanceKlass* ik = (instanceKlass*)k->klass_part();
-      class_size += ik->methods()->size();
-      class_size += ik->constants()->size();
-      class_size += ik->local_interfaces()->size();
-      class_size += ik->transitive_interfaces()->size();
-      // We do not have to count implementors, since we only store one!      
-      class_size += ik->fields()->size();
-    }
-  }
-
-  static void do_method(methodOop m) {
-    nmethods++;
-    method_size += m->size();
-    // class loader uses same objArray for empty vectors, so don't count these
-    if (m->exception_table()->length() != 0)   method_size += m->exception_table()->size();
-    if (m->has_stackmap_table()) {
-      method_size += m->stackmap_data()->size();
-    }
-
-    methodDataOop mdo = m->method_data();
-    if (mdo != NULL) {
-      nmethoddata++;
-      methoddata_size += mdo->size();
-    }
-  }
-
- public:
-  static void print() {
-    SystemDictionary::classes_do(do_class);
-    SystemDictionary::methods_do(do_method);
-    tty->print_cr("Class statistics:");
-    tty->print_cr("%d classes (%d bytes)", nclasses, class_size * oopSize);
-    tty->print_cr("%d methods (%d bytes = %d base + %d debug info)", nmethods, 
-                  (method_size + debug_size) * oopSize, method_size * oopSize, debug_size * oopSize);
-    tty->print_cr("%d methoddata (%d bytes)", nmethoddata, methoddata_size * oopSize);
-  }
-};
-
-
-int ClassStatistics::nclasses        = 0;  
-int ClassStatistics::nmethods        = 0;
-int ClassStatistics::nmethoddata     = 0;
-int ClassStatistics::class_size      = 0;
-int ClassStatistics::method_size     = 0; 
-int ClassStatistics::debug_size      = 0;
-int ClassStatistics::methoddata_size = 0;
-
-void SystemDictionary::print_class_statistics() {
-  ResourceMark rm;
-  ClassStatistics::print();
-}
-
-
-class MethodStatistics: AllStatic {
- public:
-  enum {
-    max_parameter_size = 10
-  };
- private:
-
-  static int _number_of_methods;
-  static int _number_of_final_methods;
-  static int _number_of_static_methods;
-  static int _number_of_native_methods;
-  static int _number_of_synchronized_methods;
-  static int _number_of_profiled_methods;
-  static int _number_of_bytecodes;
-  static int _parameter_size_profile[max_parameter_size];
-  static int _bytecodes_profile[Bytecodes::number_of_java_codes];
-
-  static void initialize() {
-    _number_of_methods        = 0;
-    _number_of_final_methods  = 0;
-    _number_of_static_methods = 0;
-    _number_of_native_methods = 0;
-    _number_of_synchronized_methods = 0;
-    _number_of_profiled_methods = 0;
-    _number_of_bytecodes      = 0;
-    for (int i = 0; i < max_parameter_size             ; i++) _parameter_size_profile[i] = 0;
-    for (int j = 0; j < Bytecodes::number_of_java_codes; j++) _bytecodes_profile     [j] = 0;
-  };
-
-  static void do_method(methodOop m) {
-    _number_of_methods++;
-    // collect flag info
-    if (m->is_final()       ) _number_of_final_methods++;
-    if (m->is_static()      ) _number_of_static_methods++;
-    if (m->is_native()      ) _number_of_native_methods++;
-    if (m->is_synchronized()) _number_of_synchronized_methods++;
-    if (m->method_data() != NULL) _number_of_profiled_methods++;
-    // collect parameter size info (add one for receiver, if any)
-    _parameter_size_profile[MIN2(m->size_of_parameters() + (m->is_static() ? 0 : 1), max_parameter_size - 1)]++;
-    // collect bytecodes info
-    { 
-      Thread *thread = Thread::current();
-      HandleMark hm(thread);
-      BytecodeStream s(methodHandle(thread, m));
-      Bytecodes::Code c;
-      while ((c = s.next()) >= 0) {
-        _number_of_bytecodes++;
-        _bytecodes_profile[c]++;
-      }
-    }
-  }
-
- public:
-  static void print() {
-    initialize();
-    SystemDictionary::methods_do(do_method);
-    // generate output
-    tty->cr();
-    tty->print_cr("Method statistics (static):");
-    // flag distribution
-    tty->cr();
-    tty->print_cr("%6d final        methods  %6.1f%%", _number_of_final_methods       , _number_of_final_methods        * 100.0F / _number_of_methods);
-    tty->print_cr("%6d static       methods  %6.1f%%", _number_of_static_methods      , _number_of_static_methods       * 100.0F / _number_of_methods);
-    tty->print_cr("%6d native       methods  %6.1f%%", _number_of_native_methods      , _number_of_native_methods       * 100.0F / _number_of_methods);
-    tty->print_cr("%6d synchronized methods  %6.1f%%", _number_of_synchronized_methods, _number_of_synchronized_methods * 100.0F / _number_of_methods);
-    tty->print_cr("%6d profiled     methods  %6.1f%%", _number_of_profiled_methods, _number_of_profiled_methods * 100.0F / _number_of_methods);
-    // parameter size profile
-    tty->cr();
-    { int tot = 0;
-      int avg = 0;
-      for (int i = 0; i < max_parameter_size; i++) {
-        int n = _parameter_size_profile[i];
-        tot += n;
-        avg += n*i;
-        tty->print_cr("parameter size = %1d: %6d methods  %5.1f%%", i, n, n * 100.0F / _number_of_methods);
-      }
-      assert(tot == _number_of_methods, "should be the same");
-      tty->print_cr("                    %6d methods  100.0%%", _number_of_methods);
-      tty->print_cr("(average parameter size = %3.1f including receiver, if any)", (float)avg / _number_of_methods);
-    }
-    // bytecodes profile
-    tty->cr();
-    { int tot = 0;
-      for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
-        if (Bytecodes::is_defined(i)) {
-          Bytecodes::Code c = Bytecodes::cast(i);
-          int n = _bytecodes_profile[c];
-          tot += n;
-          tty->print_cr("%9d  %7.3f%%  %s", n, n * 100.0F / _number_of_bytecodes, Bytecodes::name(c));
-        }
-      }
-      assert(tot == _number_of_bytecodes, "should be the same");
-      tty->print_cr("%9d  100.000%%", _number_of_bytecodes);
-    }
-    tty->cr();
-  }
-};
-
-int MethodStatistics::_number_of_methods;
-int MethodStatistics::_number_of_final_methods;
-int MethodStatistics::_number_of_static_methods;
-int MethodStatistics::_number_of_native_methods;
-int MethodStatistics::_number_of_synchronized_methods;
-int MethodStatistics::_number_of_profiled_methods;
-int MethodStatistics::_number_of_bytecodes;
-int MethodStatistics::_parameter_size_profile[MethodStatistics::max_parameter_size];
-int MethodStatistics::_bytecodes_profile[Bytecodes::number_of_java_codes];
-
-
-void SystemDictionary::print_method_statistics() {
-  MethodStatistics::print();
-}
-
-#endif // PRODUCT
--- a/hotspot/src/share/vm/memory/systemDictionary.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,610 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)systemDictionary.hpp	1.153 07/05/05 17:05:56 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// The system dictionary stores all loaded classes and maps:
-//
-//   [class name,class loader] -> class   i.e.  [symbolOop,oop] -> klassOop
-//
-// Classes are loaded lazily. The default VM class loader is
-// represented as NULL.
-
-// The underlying data structure is an open hash table with a fixed number
-// of buckets. During loading the loader object is locked, (for the VM loader 
-// a private lock object is used). Class loading can thus be done concurrently,
-// but only by different loaders.
-//
-// During loading a placeholder (name, loader) is temporarily placed in
-// a side data structure, and is used to detect ClassCircularityErrors
-// and to perform verification during GC.  A GC can occur in the midst
-// of class loading, as we call out to Java, have to take locks, etc.
-//
-// When class loading is finished, a new entry is added to the system
-// dictionary and the place holder is removed. Note that the protection
-// domain field of the system dictionary has not yet been filled in when
-// the "real" system dictionary entry is created.
-//
-// Clients of this class who are interested in finding if a class has
-// been completely loaded -- not classes in the process of being loaded --
-// can read the SystemDictionary unlocked. This is safe because
-//    - entries are only deleted at safepoints  
-//    - readers cannot come to a safepoint while actively examining
-//         an entry  (an entry cannot be deleted from under a reader) 
-//    - entries must be fully formed before they are available to concurrent
-//         readers (we must ensure write ordering)
-//
-// Note that placeholders are deleted at any time, as they are removed
-// when a class is completely loaded. Therefore, readers as well as writers
-// of placeholders must hold the SystemDictionary_lock.
-// 
-
-class Dictionary;
-class PlaceholderTable;
-class LoaderConstraintTable;
-class HashtableBucket;
-class ResolutionErrorTable;
-
-class SystemDictionary : AllStatic {
-  friend class VMStructs;
-  friend class CompactingPermGenGen;
-  NOT_PRODUCT(friend class instanceKlassKlass;)
-
- public:
-  // Returns a class with a given class name and class loader.  Loads the
-  // class if needed. If not found a NoClassDefFoundError or a
-  // ClassNotFoundException is thrown, depending on the value on the
-  // throw_error flag.  For most uses the throw_error argument should be set
-  // to true.
-
-  static klassOop resolve_or_fail(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS);
-  // Convenient call for null loader and protection domain.
-  static klassOop resolve_or_fail(symbolHandle class_name, bool throw_error, TRAPS);
-private:
-  // handle error translation for resolve_or_null results
-  static klassOop handle_resolution_exception(symbolHandle class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS);
-
-public:
-
-  // Returns a class with a given class name and class loader.
-  // Loads the class if needed. If not found NULL is returned.
-  static klassOop resolve_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
-  // Version with null loader and protection domain
-  static klassOop resolve_or_null(symbolHandle class_name, TRAPS);
-
-  // Resolve a superclass or superinterface. Called from ClassFileParser, 
-  // parse_interfaces, resolve_instance_class_or_null, load_shared_class
-  // "child_name" is the class whose super class or interface is being resolved.
-  static klassOop resolve_super_or_fail(symbolHandle child_name,
-                                        symbolHandle class_name,
-                                        Handle class_loader,
-                                        Handle protection_domain,
-                                        bool is_superclass,
-                                        TRAPS);
-
-  // Parse new stream. This won't update the system dictionary or
-  // class hierarchy, simply parse the stream. Used by JVMTI RedefineClasses.
-  static klassOop parse_stream(symbolHandle class_name,
-                               Handle class_loader,
-                               Handle protection_domain,
-                               ClassFileStream* st,
-                               TRAPS);
-                               
-  // Resolve from stream (called by jni_DefineClass and JVM_DefineClass)
-  static klassOop resolve_from_stream(symbolHandle class_name, Handle class_loader, Handle protection_domain, ClassFileStream* st, TRAPS);
-  
-  // Lookup an already loaded class. If not found NULL is returned.
-  static klassOop find(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
-
-  // Lookup an already loaded instance or array class.
-  // Do not make any queries to class loaders; consult only the cache.
-  // If not found NULL is returned.
-  static klassOop find_instance_or_array_klass(symbolHandle class_name,
-					       Handle class_loader,
-					       Handle protection_domain,
-					       TRAPS);
-
-  // Lookup an instance or array class that has already been loaded
-  // either into the given class loader, or else into another class
-  // loader that is constrained (via loader constraints) to produce
-  // a consistent class.  Do not take protection domains into account.
-  // Do not make any queries to class loaders; consult only the cache.
-  // Return NULL if the class is not found.
-  //
-  // This function is a strict superset of find_instance_or_array_klass.
-  // This function (the unchecked version) makes a conservative prediction
-  // of the result of the checked version, assuming successful lookup.
-  // If both functions return non-null, they must return the same value.
-  // Also, the unchecked version may sometimes be non-null where the
-  // checked version is null.  This can occur in several ways:
-  //   1. No query has yet been made to the class loader.
-  //   2. The class loader was queried, but chose not to delegate.
-  //   3. ClassLoader.checkPackageAccess rejected a proposed protection domain.
-  //   4. Loading was attempted, but there was a linkage error of some sort.
-  // In all of these cases, the loader constraints on this type are
-  // satisfied, and it is safe for classes in the given class loader
-  // to manipulate strongly-typed values of the found class, subject
-  // to local linkage and access checks.
-  static klassOop find_constrained_instance_or_array_klass(symbolHandle class_name,
-                                                           Handle class_loader,
-                                                           TRAPS);
-  
-  // Iterate over all klasses in dictionary
-  //   Just the classes from defining class loaders
-  static void classes_do(void f(klassOop));
-  // Added for initialize_itable_for_klass to handle exceptions
-  static void classes_do(void f(klassOop, TRAPS), TRAPS);
-  //   All classes, and their class loaders
-  static void classes_do(void f(klassOop, oop));
-  //   All classes, and their class loaders
-  //   (added for helpers that use HandleMarks and ResourceMarks)
-  static void classes_do(void f(klassOop, oop, TRAPS), TRAPS);
-  // All entries in the placeholder table and their class loaders
-  static void placeholders_do(void f(symbolOop, oop));
-
-  // Iterate over all methods in all klasses in dictionary
-  static void methods_do(void f(methodOop));
-
-  // Garbage collection support
-
-  // This method applies "blk->do_oop" to all the pointers to "system"
-  // classes and loaders.
-  static void always_strong_oops_do(OopClosure* blk);
-  static void always_strong_classes_do(OopClosure* blk);
-  // This method applies "blk->do_oop" to all the placeholders.
-  static void placeholders_do(OopClosure* blk);
-
-  // Unload (that is, break root links to) all unmarked classes and
-  // loaders.  Returns "true" iff something was unloaded.
-  static bool do_unloading(BoolObjectClosure* is_alive);
-
-  // Applies "f->do_oop" to all root oops in the system dictionary.
-  static void oops_do(OopClosure* f);
-
-  // Applies "f->do_oop" to root oops that are loaded from a shared heap.
-  static void shared_oops_do(OopClosure* f);
-
-  // System loader lock
-  static oop system_loader_lock()	    { return _system_loader_lock_obj; }
-
-private:
-  //    Traverses preloaded oops: various system classes.  These are
-  //    guaranteed to be in the perm gen.
-  static void preloaded_oops_do(OopClosure* f);
-  static void lazily_loaded_oops_do(OopClosure* f);
-
-public:
-  // Sharing support.
-  static void reorder_dictionary();
-  static void copy_buckets(char** top, char* end);
-  static void copy_table(char** top, char* end);
-  static void reverse();
-  static void set_shared_dictionary(HashtableBucket* t, int length,
-                                    int number_of_entries);
-  // Printing
-  static void print()                   PRODUCT_RETURN;
-  static void print_class_statistics()  PRODUCT_RETURN;
-  static void print_method_statistics() PRODUCT_RETURN;
-
-  // Number of contained klasses
-  // This is both fully loaded classes and classes in the process
-  // of being loaded
-  static int number_of_classes();
-
-  // Monotonically increasing counter which grows as classes are
-  // loaded or modifications such as hot-swapping or setting/removing
-  // of breakpoints are performed
-  static inline int number_of_modifications()     { assert_locked_or_safepoint(Compile_lock); return _number_of_modifications; }
-  // Needed by evolution and breakpoint code
-  static inline void notice_modification()        { assert_locked_or_safepoint(Compile_lock); ++_number_of_modifications;      }
-
-  // Verification
-  static void verify();
-
-#ifdef ASSERT
-  static bool is_internal_format(symbolHandle class_name);
-#endif
-
-  // Verify class is in dictionary
-  static void verify_obj_klass_present(Handle obj,
-                                       symbolHandle class_name,
-                                       Handle class_loader);
-
-  // Initialization
-  static void initialize(TRAPS);
-
-  // Fast access to commonly used classes (preloaded)
-  static klassOop check_klass(klassOop k) {
-    assert(k != NULL, "preloaded klass not initialized"); 
-    return k;
-  }
-
-public:
-  static klassOop object_klass()            { return check_klass(_object_klass); }
-  static klassOop string_klass()            { return check_klass(_string_klass); }
-  static klassOop class_klass()             { return check_klass(_class_klass); }
-  static klassOop cloneable_klass()         { return check_klass(_cloneable_klass); }
-  static klassOop classloader_klass()       { return check_klass(_classloader_klass); }
-  static klassOop serializable_klass()      { return check_klass(_serializable_klass); }
-  static klassOop system_klass()            { return check_klass(_system_klass); }
-
-  static klassOop throwable_klass()         { return check_klass(_throwable_klass); }
-  static klassOop error_klass()             { return check_klass(_error_klass); }
-  static klassOop threaddeath_klass()       { return check_klass(_threaddeath_klass); }
-  static klassOop exception_klass()         { return check_klass(_exception_klass); }
-  static klassOop runtime_exception_klass() { return check_klass(_runtime_exception_klass); }
-  static klassOop classNotFoundException_klass() { return check_klass(_classNotFoundException_klass); }
-  static klassOop noClassDefFoundError_klass()   { return check_klass(_noClassDefFoundError_klass); }
-  static klassOop linkageError_klass()       { return check_klass(_linkageError_klass); }
-  static klassOop ClassCastException_klass() { return check_klass(_classCastException_klass); }
-  static klassOop ArrayStoreException_klass() { return check_klass(_arrayStoreException_klass); }
-  static klassOop virtualMachineError_klass()  { return check_klass(_virtualMachineError_klass); }
-  static klassOop OutOfMemoryError_klass()  { return check_klass(_outOfMemoryError_klass); }
-  static klassOop StackOverflowError_klass() { return check_klass(_StackOverflowError_klass); }
-  static klassOop IllegalMonitorStateException_klass() { return check_klass(_illegalMonitorStateException_klass); }
-  static klassOop protectionDomain_klass()  { return check_klass(_protectionDomain_klass); }
-  static klassOop AccessControlContext_klass() { return check_klass(_AccessControlContext_klass); }
-  static klassOop reference_klass()         { return check_klass(_reference_klass); }
-  static klassOop soft_reference_klass()    { return check_klass(_soft_reference_klass); }
-  static klassOop weak_reference_klass()    { return check_klass(_weak_reference_klass); }
-  static klassOop final_reference_klass()   { return check_klass(_final_reference_klass); }
-  static klassOop phantom_reference_klass() { return check_klass(_phantom_reference_klass); }
-  static klassOop finalizer_klass()         { return check_klass(_finalizer_klass); }
-  
-  static klassOop thread_klass()            { return check_klass(_thread_klass); }
-  static klassOop threadGroup_klass()       { return check_klass(_threadGroup_klass); }
-  static klassOop properties_klass()        { return check_klass(_properties_klass); }  
-  static klassOop reflect_accessible_object_klass() { return check_klass(_reflect_accessible_object_klass); }
-  static klassOop reflect_field_klass()     { return check_klass(_reflect_field_klass); }
-  static klassOop reflect_method_klass()    { return check_klass(_reflect_method_klass); }
-  static klassOop reflect_constructor_klass() { return check_klass(_reflect_constructor_klass); }
-  static klassOop reflect_method_accessor_klass() { 
-    assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only");
-    return check_klass(_reflect_method_accessor_klass);
-  }
-  static klassOop reflect_constructor_accessor_klass() {
-    assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only");
-    return check_klass(_reflect_constructor_accessor_klass);
-  }
-  // NOTE: needed too early in bootstrapping process to have checks based on JDK version
-  static klassOop reflect_magic_klass()     { return _reflect_magic_klass; }
-  static klassOop reflect_delegating_classloader_klass() { return _reflect_delegating_classloader_klass; }
-  static klassOop reflect_constant_pool_klass() {
-    assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only");
-    return _reflect_constant_pool_klass;
-  }
-  static klassOop reflect_unsafe_static_field_accessor_impl_klass() {
-    assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only");
-    return _reflect_unsafe_static_field_accessor_impl_klass;
-  }
-
-  static klassOop vector_klass()            { return check_klass(_vector_klass); }
-  static klassOop hashtable_klass()         { return check_klass(_hashtable_klass); }
-  static klassOop stringBuffer_klass()      { return check_klass(_stringBuffer_klass); }
-  static klassOop stackTraceElement_klass() { return check_klass(_stackTraceElement_klass); }
-
-  static klassOop java_nio_Buffer_klass()   { return check_klass(_java_nio_Buffer_klass); }
-
-  static klassOop sun_misc_AtomicLongCSImpl_klass() { return _sun_misc_AtomicLongCSImpl_klass; }
-
-  static klassOop boolean_klass()           { return check_klass(_boolean_klass); }
-  static klassOop char_klass()              { return check_klass(_char_klass); }
-  static klassOop float_klass()             { return check_klass(_float_klass); }
-  static klassOop double_klass()            { return check_klass(_double_klass); }
-  static klassOop byte_klass()              { return check_klass(_byte_klass); }
-  static klassOop short_klass()             { return check_klass(_short_klass); }
-  static klassOop int_klass()               { return check_klass(_int_klass); }
-  static klassOop long_klass()              { return check_klass(_long_klass); } 
-
-  static klassOop box_klass(BasicType t) {
-    assert((uint)t < T_VOID+1, "range check");
-    return check_klass(_box_klasses[t]);
-  }
-  static BasicType box_klass_type(klassOop k);  // inverse of box_klass
-
-  // methods returning lazily loaded klasses
-  // The corresponding method to load the class must be called before calling them.
-  static klassOop abstract_ownable_synchronizer_klass() { return check_klass(_abstract_ownable_synchronizer_klass); }
-
-  static void load_abstract_ownable_synchronizer_klass(TRAPS);
-
-private:
-  // Tells whether ClassLoader.loadClassInternal is present
-  static bool has_loadClassInternal()       { return _has_loadClassInternal; }
-
-public:
-  // Tells whether ClassLoader.checkPackageAccess is present
-  static bool has_checkPackageAccess()      { return _has_checkPackageAccess; }
-
-  static bool class_klass_loaded()          { return _class_klass != NULL; }
-  static bool cloneable_klass_loaded()      { return _cloneable_klass != NULL; }
-  
-  // Returns default system loader
-  static oop java_system_loader();
-
-  // Compute the default system loader
-  static void compute_java_system_loader(TRAPS);
-
-private:
-  // Mirrors for primitive classes (created eagerly)
-  static oop check_mirror(oop m) {
-    assert(m != NULL, "mirror not initialized"); 
-    return m;
-  }
-
-public:
-  static oop int_mirror()                   { return check_mirror(_int_mirror); }
-  static oop float_mirror()                 { return check_mirror(_float_mirror); }
-  static oop double_mirror()                { return check_mirror(_double_mirror); }
-  static oop byte_mirror()                  { return check_mirror(_byte_mirror); }
-  static oop bool_mirror()                  { return check_mirror(_bool_mirror); }
-  static oop char_mirror()                  { return check_mirror(_char_mirror); }
-  static oop long_mirror()                  { return check_mirror(_long_mirror); }
-  static oop short_mirror()                 { return check_mirror(_short_mirror); }
-  static oop void_mirror()                  { return check_mirror(_void_mirror); }
-
-  static oop java_mirror(BasicType t) {
-    assert((uint)t < T_VOID+1, "range check");
-    return check_mirror(_mirrors[t]);
-  }
-  // Note:  java_lang_Class::primitive_type is the inverse of java_mirror
-
-  // Check class loader constraints
-  static bool add_loader_constraint(symbolHandle name, Handle loader1,
-                                    Handle loader2, TRAPS);
-  static char* check_signature_loaders(symbolHandle signature, Handle loader1,
-				       Handle loader2, bool is_method, TRAPS);
-
-  // Utility for printing loader "name" as part of tracing constraints
-  static const char* loader_name(oop loader) {
-    return ((loader) == NULL ? "<bootloader>" : 
-	    instanceKlass::cast((loader)->klass())->name()->as_C_string() );
-  }
-
-  // Record the error when the first attempt to resolve a reference from a constant
-  // pool entry to a class fails.
-  static void add_resolution_error(constantPoolHandle pool, int which, symbolHandle error);
-  static symbolOop find_resolution_error(constantPoolHandle pool, int which);
-
- private:
-
-  enum Constants {
-    _loader_constraint_size = 107,                     // number of entries in constraint table
-    _resolution_error_size  = 107,		       // number of entries in resolution error table
-    _nof_buckets            = 1009                     // number of buckets in hash table
-  };
-
-
-  // Static variables
-
-  // Hashtable holding loaded classes.
-  static Dictionary*            _dictionary;
-
-  // Hashtable holding placeholders for classes being loaded.
-  static PlaceholderTable*       _placeholders;
-
-  // Hashtable holding classes from the shared archive.
-  static Dictionary*             _shared_dictionary;
-
-  // Monotonically increasing counter which grows with
-  // _number_of_classes as well as hot-swapping and breakpoint setting
-  // and removal.
-  static int                     _number_of_modifications;
-
-  // Lock object for system class loader
-  static oop                     _system_loader_lock_obj;
-
-  // Constraints on class loaders
-  static LoaderConstraintTable*  _loader_constraints;
-
-  // Resolution errors
-  static ResolutionErrorTable*	 _resolution_errors;
-
-public:
-  // for VM_CounterDecay iteration support
-  friend class CounterDecay;
-  static klassOop try_get_next_class();
-
-private:
-  static void validate_protection_domain(instanceKlassHandle klass,
-                                         Handle class_loader,
-                                         Handle protection_domain, TRAPS);
-
-  friend class VM_PopulateDumpSharedSpace;
-  friend class TraversePlaceholdersClosure;
-  static Dictionary*         dictionary() { return _dictionary; }
-  static Dictionary*         shared_dictionary() { return _shared_dictionary; }
-  static PlaceholderTable*   placeholders() { return _placeholders; }
-  static LoaderConstraintTable* constraints() { return _loader_constraints; }
-  static ResolutionErrorTable* resolution_errors() { return _resolution_errors; }
-
-  // Basic loading operations
-  static klassOop resolve_instance_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
-  static klassOop resolve_array_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
-  static instanceKlassHandle handle_parallel_super_load(symbolHandle class_name, symbolHandle supername, Handle class_loader, Handle protection_domain, Handle lockObject, TRAPS);
-  // Wait on SystemDictionary_lock; unlocks lockObject before 
-  // waiting; relocks lockObject with correct recursion count
-  // after waiting, but before reentering SystemDictionary_lock
-  // to preserve lock order semantics.
-  static void double_lock_wait(Handle lockObject, TRAPS);
-  static void define_instance_class(instanceKlassHandle k, TRAPS);
-  static instanceKlassHandle find_or_define_instance_class(symbolHandle class_name, 
-                                                Handle class_loader, 
-                                                instanceKlassHandle k, TRAPS);
-  static instanceKlassHandle load_shared_class(symbolHandle class_name,
-                                               Handle class_loader, TRAPS);
-  static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
-                                               Handle class_loader, TRAPS);
-  static instanceKlassHandle load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS);
-  static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
-  static void check_loader_lock_contention(Handle loader_lock, TRAPS);
-
-  static klassOop find_shared_class(symbolHandle class_name);
-
-  // Setup link to hierarchy
-  static void add_to_hierarchy(instanceKlassHandle k, TRAPS);  
- 
-private:
-  // We pass in the hashtable index so we can calculate it outside of
-  // the SystemDictionary_lock.   
-
-  // Basic find on loaded classes 
-  static klassOop find_class(int index, unsigned int hash,
-                             symbolHandle name, Handle loader);
-
-  // Basic find on classes in the midst of being loaded
-  static symbolOop find_placeholder(int index, unsigned int hash,
-                                    symbolHandle name, Handle loader);
-
-  // Basic find operation of loaded classes and classes in the midst
-  // of loading;  used for assertions and verification only.
-  static oop find_class_or_placeholder(symbolHandle class_name,
-                                       Handle class_loader);
-
-  // Updating entry in dictionary
-  // Add a completely loaded class 
-  static void add_klass(int index, symbolHandle class_name,
-                        Handle class_loader, KlassHandle obj);
-
-  // Add a placeholder for a class being loaded
-  static void add_placeholder(int index, 
-                              symbolHandle class_name, 
-                              Handle class_loader);
-  static void remove_placeholder(int index,
-                                 symbolHandle class_name, 
-                                 Handle class_loader);
-
-  // Performs cleanups after resolve_super_or_fail. This typically needs
-  // to be called on failure.
-  // Won't throw, but can block.
-  static void resolution_cleanups(symbolHandle class_name,
-                                  Handle class_loader,
-                                  TRAPS);
-  
-  // Initialization
-  static void initialize_preloaded_classes(TRAPS);
-  static void initialize_basic_type_mirrors(TRAPS);
-    
-  // Class loader constraints
-  static void check_constraints(int index, unsigned int hash,
-                                instanceKlassHandle k, Handle loader, 
-                                bool defining, TRAPS);
-  static void update_dictionary(int d_index, unsigned int d_hash,
-                                int p_index, unsigned int p_hash,
-                                instanceKlassHandle k, Handle loader, TRAPS);
-
-  // Variables holding commonly used klasses (preloaded)
-  static klassOop _object_klass;
-  static klassOop _string_klass;
-  static klassOop _class_klass;
-  static klassOop _cloneable_klass;
-  static klassOop _classloader_klass;
-  static klassOop _serializable_klass;
-  static klassOop _system_klass;
-  
-  static klassOop _throwable_klass;
-  static klassOop _error_klass;
-  static klassOop _threaddeath_klass;
-  static klassOop _exception_klass;
-  static klassOop _runtime_exception_klass;
-  static klassOop _classNotFoundException_klass;
-  static klassOop _noClassDefFoundError_klass;
-  static klassOop _linkageError_klass;
-  static klassOop _classCastException_klass;
-  static klassOop _arrayStoreException_klass;
-  static klassOop _virtualMachineError_klass;
-  static klassOop _outOfMemoryError_klass;
-  static klassOop _StackOverflowError_klass;
-  static klassOop _illegalMonitorStateException_klass;
-  static klassOop _protectionDomain_klass;
-  static klassOop _AccessControlContext_klass;
-  static klassOop _reference_klass;
-  static klassOop _soft_reference_klass;
-  static klassOop _weak_reference_klass;
-  static klassOop _final_reference_klass;
-  static klassOop _phantom_reference_klass;
-  static klassOop _finalizer_klass;
-
-  static klassOop _thread_klass;
-  static klassOop _threadGroup_klass;
-  static klassOop _properties_klass;      
-  static klassOop _reflect_accessible_object_klass;
-  static klassOop _reflect_field_klass;
-  static klassOop _reflect_method_klass;
-  static klassOop _reflect_constructor_klass;
-  // 1.4 reflection implementation
-  static klassOop _reflect_magic_klass;
-  static klassOop _reflect_method_accessor_klass;
-  static klassOop _reflect_constructor_accessor_klass;
-  static klassOop _reflect_delegating_classloader_klass;
-  // 1.5 annotations implementation
-  static klassOop _reflect_constant_pool_klass;
-  static klassOop _reflect_unsafe_static_field_accessor_impl_klass;
-
-  static klassOop _stringBuffer_klass;
-  static klassOop _vector_klass;
-  static klassOop _hashtable_klass;
-
-  static klassOop _stackTraceElement_klass;
-
-  static klassOop _java_nio_Buffer_klass;
-
-  static klassOop _sun_misc_AtomicLongCSImpl_klass;
-
-  // Lazily loaded klasses
-  static volatile klassOop _abstract_ownable_synchronizer_klass;
-
-  // Box klasses
-  static klassOop _boolean_klass;
-  static klassOop _char_klass;
-  static klassOop _float_klass;
-  static klassOop _double_klass;
-  static klassOop _byte_klass;
-  static klassOop _short_klass;
-  static klassOop _int_klass;
-  static klassOop _long_klass;
-
-  // table of same
-  static klassOop _box_klasses[T_VOID+1];
-
-  static oop  _java_system_loader;
-
-  static bool _has_loadClassInternal;
-  static bool _has_checkPackageAccess;
-
-  // Primitive classes
-  static oop _int_mirror;
-  static oop _float_mirror;
-  static oop _double_mirror;
-  static oop _byte_mirror;
-  static oop _bool_mirror;
-  static oop _char_mirror;
-  static oop _long_mirror;
-  static oop _short_mirror;
-  static oop _void_mirror;
-
-  // table of same
-  static oop _mirrors[T_VOID+1];
-};
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)tenuredGeneration.cpp	1.45 07/05/05 17:05:56 JVM"
+#pragma ident "@(#)tenuredGeneration.cpp	1.46 07/05/17 15:55:16 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -89,13 +89,6 @@
   _shrink_factor = 0;
   _capacity_at_prologue = 0;
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_ARENA_NEW)) {
-    jvmpi::post_arena_new_event(Universe::heap()->addr_to_arena_id(bottom),
-				name());
-  }
-#endif // JVMPI_SUPPORT
-
   _gc_stats = new GCStats();
 
   // initialize performance counters
@@ -133,16 +126,6 @@
   return "tenured generation";
 }
 
-#ifdef JVMPI_SUPPORT
-int TenuredGeneration::addr_to_arena_id(void* addr) {
-  if (_the_space->is_in_reserved(addr)) {
-    return 0;
-  } else {
-    return -1;
-  }
-}
-#endif // JVMPI_SUPPORT
-
 void TenuredGeneration::compute_new_size() {
   assert(_shrink_factor <= 100, "invalid shrink factor");
   size_t current_shrink_factor = _shrink_factor;
--- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)tenuredGeneration.hpp	1.25 07/05/05 17:05:55 JVM"
+#pragma ident "@(#)tenuredGeneration.hpp	1.26 07/05/17 15:55:18 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -63,11 +63,6 @@
   bool must_be_youngest() const { return false; }
   bool must_be_oldest() const { return true; }
 
-#ifdef JVMPI_SUPPORT
-  // override 
-  int addr_to_arena_id(void* addr); 
-#endif // JVMPI_SUPPORT
-
   // Does a "full" (forced) collection invoked on this generation collect
   // all younger generations as well? Note that this is a
   // hack to allow the collection of the younger gen first if the flag is
--- a/hotspot/src/share/vm/memory/universe.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)universe.cpp	1.357 07/05/05 17:05:57 JVM"
+#pragma ident "@(#)universe.cpp	1.358 07/05/17 15:55:21 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -75,14 +75,6 @@
 oop Universe::_virtual_machine_error_instance         = NULL;
 oop Universe::_vm_exception                           = NULL;
 oop Universe::_emptySymbol                            = NULL;
-#ifdef JVMPI_SUPPORT
-Universe::JVMPIState Universe::_jvmpi_alloc_event_enabled       = _jvmpi_disabled;
-bool Universe::_jvmpi_move_event_enabled              = false;
-bool Universe::_jvmpi_jni_global_alloc_event_enabled      = false;
-bool Universe::_jvmpi_jni_global_free_event_enabled       = false;
-bool Universe::_jvmpi_jni_weak_global_alloc_event_enabled = false;
-bool Universe::_jvmpi_jni_weak_global_free_event_enabled  = false;
-#endif // JVMPI_SUPPORT
 
 // These variables are guarded by FullGCALot_lock.
 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
@@ -1189,48 +1181,6 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-void Universe::jvmpi_object_alloc(oop obj, size_t bytesize) {
-  JavaThread *calling_thread = JavaThread::active();
-  assert(calling_thread != NULL, "must be posting for another thread");
-  if (calling_thread == NULL) return; // robustness
-
-  GrowableArray<DeferredObjAllocEvent *>* deferred_list =
-    calling_thread->deferred_obj_alloc_events();
-
-  if (deferred_list == NULL) {
-    jvmpi::post_object_alloc_event(obj, bytesize,
-      heap()->addr_to_arena_id(obj), 0);
-  } else {
-    DeferredObjAllocEvent *node =
-      new DeferredObjAllocEvent(obj, bytesize, heap()->addr_to_arena_id(obj));
-    deferred_list->append(node);
-  }
-}
-
-
-void Universe::jvmpi_object_move(oop from, oop to) {
-  CollectedHeap* ch = heap();
-  jvmpi::post_object_move_event(from, ch->addr_to_arena_id(from),
-				to, ch->addr_to_arena_id(to));
-}
-
-
-void Universe::jvmpi_post_deferred_obj_alloc_events(
-  GrowableArray<DeferredObjAllocEvent *>* deferred_list) {
-
-  for (int i = 0; i < deferred_list->length(); i++) {
-    DeferredObjAllocEvent *node = deferred_list->at(i);
-    assert(node != NULL, "expected DeferredObjAllocEvent node");
-    jvmpi::post_object_alloc_event(node->get_oop(), node->bytesize(),
-      node->arena_id(), 0);
-
-    delete node;
-  }
-}
-#endif // JVMPI_SUPPORT
-
-
 void CommonMethodOopCache::init(klassOop k, methodOop m, TRAPS) {
   if (!UseSharedSpaces) {
     _klass = k;
--- a/hotspot/src/share/vm/memory/universe.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/memory/universe.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)universe.hpp	1.181 07/05/05 17:05:57 JVM"
+#pragma ident "@(#)universe.hpp	1.182 07/05/17 15:55:24 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -108,15 +108,6 @@
   friend void  universe2_init();
   friend bool  universe_post_init();
 
-#ifdef JVMPI_SUPPORT
- public:
-  enum JVMPIState {
-    _jvmpi_disabled = 0,
-    _jvmpi_enabled = 1,
-    _jvmpi_disabling = 2
-  };
-#endif // JVMPI_SUPPORT
-
  private:
   // Known classes in the VM
   static klassOop _boolArrayKlassObj;
@@ -200,16 +191,6 @@
   // otherwise return the given default error.
   static oop	    gen_out_of_memory_error(oop default_err);
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI support (cached values for performance)
-  static JVMPIState _jvmpi_alloc_event_enabled;
-  static bool       _jvmpi_move_event_enabled;
-  static bool       _jvmpi_jni_global_alloc_event_enabled;
-  static bool       _jvmpi_jni_global_free_event_enabled;
-  static bool       _jvmpi_jni_weak_global_alloc_event_enabled;
-  static bool       _jvmpi_jni_weak_global_free_event_enabled;
-#endif // JVMPI_SUPPORT
-
   // Historic gc information
   static size_t _heap_capacity_at_last_gc;
   static size_t _heap_used_at_last_gc;
@@ -308,26 +289,6 @@
   // The particular choice of collected heap.
   static CollectedHeap* heap() { return _collectedHeap; }
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI support
-  static bool jvmpi_slow_allocation()                       { return (_jvmpi_alloc_event_enabled != _jvmpi_disabled); }
-  static bool jvmpi_alloc_event_enabled()                   { return (_jvmpi_alloc_event_enabled == _jvmpi_enabled); }
-  static void set_jvmpi_alloc_event_enabled(JVMPIState b)   { _jvmpi_alloc_event_enabled = b;   }
-
-  static bool jvmpi_move_event_enabled()                    { return _jvmpi_move_event_enabled; }
-  static void set_jvmpi_move_event_enabled(bool b)          { _jvmpi_move_event_enabled = b;    }
-
-  static bool jvmpi_jni_global_alloc_event_enabled()                 { return _jvmpi_jni_global_alloc_event_enabled; }
-  static void set_jvmpi_jni_global_alloc_event_enabled(bool b)       { _jvmpi_jni_global_alloc_event_enabled = b;    }
-  static bool jvmpi_jni_global_free_event_enabled()                  { return _jvmpi_jni_global_free_event_enabled; }
-  static void set_jvmpi_jni_global_free_event_enabled(bool b)        { _jvmpi_jni_global_free_event_enabled = b;    }
-
-  static bool jvmpi_jni_weak_global_alloc_event_enabled()            { return _jvmpi_jni_weak_global_alloc_event_enabled; }
-  static void set_jvmpi_jni_weak_global_alloc_event_enabled(bool b)  { _jvmpi_jni_weak_global_alloc_event_enabled = b;    }
-  static bool jvmpi_jni_weak_global_free_event_enabled()             { return _jvmpi_jni_weak_global_free_event_enabled; }
-  static void set_jvmpi_jni_weak_global_free_event_enabled(bool b)   { _jvmpi_jni_weak_global_free_event_enabled = b;    }
-#endif // JVMPI_SUPPORT
-
   // Historic gc information
   static size_t get_heap_capacity_at_last_gc()         { return _heap_capacity_at_last_gc; }
   static size_t get_heap_free_at_last_gc()             { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
@@ -400,16 +361,6 @@
 
   // Compiler support
   static int base_vtable_size()               { return _base_vtable_size; }
-
-#ifdef JVMPI_SUPPORT
-  // jvmpi support
-  static void jvmpi_object_alloc(oop obj, size_t bytesize);
-  static void jvmpi_object_move(oop from, oop to);
-
-  // deferred JVM/PI OBJECT_ALLOC event support:
-  static void jvmpi_post_deferred_obj_alloc_events(
-    GrowableArray<DeferredObjAllocEvent *>* deferred_list);
-#endif // JVMPI_SUPPORT
 };
 
 class DeferredObjAllocEvent : public CHeapObj {
--- a/hotspot/src/share/vm/memory/vmSymbols.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)vmSymbols.cpp	1.27 07/05/05 17:05:57 JVM"
-#endif
-/*
- * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_vmSymbols.cpp.incl"
-
-
-symbolOop vmSymbols::_symbols[vmSymbols::vm_symbols_terminating_enum];
-
-symbolOop vmSymbols::_type_signatures[T_VOID+1] = { NULL /*, NULL...*/ };
-
-#define VM_SYMBOL_INITIALIZE(name, string) { \
-  symbolOop sym = oopFactory::new_symbol(string, CHECK);  \
-  _symbols[VM_SYMBOL_ENUM_NAME(name)] = sym; }
-
-
-void vmSymbols::initialize(TRAPS) {
-  if (!UseSharedSpaces) {
-    VM_SYMBOLS_DO(VM_SYMBOL_INITIALIZE)
-
-      _type_signatures[T_BYTE]    = byte_signature();
-    _type_signatures[T_CHAR]    = char_signature();
-    _type_signatures[T_DOUBLE]  = double_signature();
-    _type_signatures[T_FLOAT]   = float_signature();
-    _type_signatures[T_INT]     = int_signature();
-    _type_signatures[T_LONG]    = long_signature();
-    _type_signatures[T_SHORT]   = short_signature();
-    _type_signatures[T_BOOLEAN] = bool_signature();
-    _type_signatures[T_VOID]    = void_signature();
-    // no single signatures for T_OBJECT or T_ARRAY
-  }
-}
-
-
-void vmSymbols::oops_do(OopClosure* f, bool do_all) {
-  for (int index = 0; index < vm_symbols_terminating_enum; index++) {
-    f->do_oop((oop*) &_symbols[index]);
-  }
-  for (int i = 0; i < T_VOID+1; i++) {
-    if (_type_signatures[i] != NULL) {
-      assert(i >= T_BOOLEAN, "checking");
-      f->do_oop((oop*)&_type_signatures[i]);
-    } else if (do_all) {
-      f->do_oop((oop*)&_type_signatures[i]);
-    }
-  }
-}
-
-
-BasicType vmSymbols::signature_type(symbolOop s) {
-  assert(s != NULL, "checking");
-  for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
-    if (s == _type_signatures[i]) {
-      return (BasicType)i;
-    }
-  }
-  return T_OBJECT;
-}
-
-
-#define VM_INTRINSIC_INITIALIZE(id, klass, name, sig) #id,
-static const char* vm_intrinsic_name_table[] = {
-  NULL,
-  VM_INTRINSICS_DO(VM_INTRINSIC_INITIALIZE)
-  NULL
-};
-
-const char* vmIntrinsics::name_at(int raw_id) {
-  if ((uint)raw_id < (uint)_vm_intrinsics_terminating_enum)
-    return vm_intrinsic_name_table[raw_id];
-  else
-    return NULL;
-}
--- a/hotspot/src/share/vm/memory/vmSymbols.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,853 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)vmSymbols.hpp	1.161 07/05/05 17:05:57 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// The classes vmSymbols and vmSymbolHandles are a name spaces for fast lookup of 
-// symbols commonly used in the VM. The first class return a symbolOop, while the
-// second class returns a SymbolHandle. The underlying data structure is shared
-// between the two classes.
-//
-// Sample usage:
-//
-//   symbolOop obj       = vmSymbols::java_lang_Object()();
-//   SymbolHandle handle = vmSymbolHandles::java_lang_Object();
-
-
-// Mapping function names to values. New entries should be added below.
-
-#define VM_SYMBOLS_DO(template)                                                                   \
-  /* commonly used class names */                                                                 \
-  template(java_lang_System,                          "java/lang/System")                         \
-  template(java_lang_Object,                          "java/lang/Object")                         \
-  template(java_lang_Class,                           "java/lang/Class")                          \
-  template(java_lang_String,                          "java/lang/String")                         \
-  template(java_lang_Thread,                          "java/lang/Thread")                         \
-  template(java_lang_ThreadGroup,                     "java/lang/ThreadGroup")                    \
-  template(java_lang_Cloneable,                       "java/lang/Cloneable")                      \
-  template(java_lang_Throwable,                       "java/lang/Throwable")                      \
-  template(java_lang_ClassLoader,                     "java/lang/ClassLoader")                    \
-  template(java_lang_ClassLoader_NativeLibrary,       "java/lang/ClassLoader\x024NativeLibrary")  \
-  template(java_lang_ThreadDeath,                     "java/lang/ThreadDeath")                    \
-  template(java_lang_Boolean,                         "java/lang/Boolean")                        \
-  template(java_lang_Character,                       "java/lang/Character")                      \
-  template(java_lang_Float,                           "java/lang/Float")                          \
-  template(java_lang_Double,                          "java/lang/Double")                         \
-  template(java_lang_Byte,                            "java/lang/Byte")                           \
-  template(java_lang_Short,                           "java/lang/Short")                          \
-  template(java_lang_Integer,                         "java/lang/Integer")                        \
-  template(java_lang_Long,                            "java/lang/Long")                           \
-  template(java_lang_Shutdown,                        "java/lang/Shutdown")                       \
-  template(java_lang_ref_Reference,                   "java/lang/ref/Reference")                  \
-  template(java_lang_ref_SoftReference,               "java/lang/ref/SoftReference")              \
-  template(java_lang_ref_WeakReference,               "java/lang/ref/WeakReference")              \
-  template(java_lang_ref_FinalReference,              "java/lang/ref/FinalReference")             \
-  template(java_lang_ref_PhantomReference,            "java/lang/ref/PhantomReference")           \
-  template(java_lang_ref_Finalizer,                   "java/lang/ref/Finalizer")                  \
-  template(java_lang_reflect_AccessibleObject,        "java/lang/reflect/AccessibleObject")       \
-  template(java_lang_reflect_Method,                  "java/lang/reflect/Method")                 \
-  template(java_lang_reflect_Constructor,             "java/lang/reflect/Constructor")            \
-  template(java_lang_reflect_Field,                   "java/lang/reflect/Field")                  \
-  template(java_lang_StringBuffer,                    "java/lang/StringBuffer")                   \
-  template(java_lang_CharSequence,                    "java/lang/CharSequence")                   \
-  template(java_security_AccessControlContext,        "java/security/AccessControlContext")       \
-  template(java_security_ProtectionDomain,            "java/security/ProtectionDomain")           \
-  template(java_io_OutputStream,                      "java/io/OutputStream")                     \
-  template(java_io_Reader,                            "java/io/Reader")                           \
-  template(java_io_BufferedReader,                    "java/io/BufferedReader")                   \
-  template(java_io_FileInputStream,                   "java/io/FileInputStream")                  \
-  template(java_io_ByteArrayInputStream,              "java/io/ByteArrayInputStream")             \
-  template(java_io_Serializable,                      "java/io/Serializable")                     \
-  template(java_util_Properties,                      "java/util/Properties")                     \
-  template(java_util_Vector,                          "java/util/Vector")                         \
-  template(java_util_AbstractList,                    "java/util/AbstractList")                   \
-  template(java_util_Hashtable,                       "java/util/Hashtable")                      \
-  template(java_lang_Compiler,                        "java/lang/Compiler")                       \
-  template(sun_misc_Signal,                           "sun/misc/Signal")                          \
-  template(java_lang_AssertionStatusDirectives,       "java/lang/AssertionStatusDirectives")      \
-                                                                                                  \
-  /* class file format tags */                                                                    \
-  template(tag_source_file,                           "SourceFile")                               \
-  template(tag_inner_classes,                         "InnerClasses")                             \
-  template(tag_constant_value,                        "ConstantValue")                            \
-  template(tag_code,                                  "Code")                                     \
-  template(tag_exceptions,                            "Exceptions")                               \
-  template(tag_line_number_table,                     "LineNumberTable")                          \
-  template(tag_local_variable_table,                  "LocalVariableTable")                       \
-  template(tag_local_variable_type_table,             "LocalVariableTypeTable")                   \
-  template(tag_stack_map_table,                       "StackMapTable")                            \
-  template(tag_synthetic,                             "Synthetic")                                \
-  template(tag_deprecated,                            "Deprecated")                               \
-  template(tag_source_debug_extension,                "SourceDebugExtension")                     \
-  template(tag_signature,                             "Signature")                                \
-  template(tag_runtime_visible_annotations,           "RuntimeVisibleAnnotations")                \
-  template(tag_runtime_invisible_annotations,         "RuntimeInvisibleAnnotations")              \
-  template(tag_runtime_visible_parameter_annotations, "RuntimeVisibleParameterAnnotations")       \
-  template(tag_runtime_invisible_parameter_annotations,"RuntimeInvisibleParameterAnnotations")    \
-  template(tag_annotation_default,                    "AnnotationDefault")                        \
-  template(tag_enclosing_method,                      "EnclosingMethod")                          \
-                                                                                                  \
-  /* exception klasses: at least all exceptions thrown by the VM have entries here */             \
-  template(java_lang_ArithmeticException,             "java/lang/ArithmeticException")            \
-  template(java_lang_ArrayIndexOutOfBoundsException,  "java/lang/ArrayIndexOutOfBoundsException") \
-  template(java_lang_ArrayStoreException,             "java/lang/ArrayStoreException")            \
-  template(java_lang_ClassCastException,              "java/lang/ClassCastException")             \
-  template(java_lang_ClassNotFoundException,          "java/lang/ClassNotFoundException")         \
-  template(java_lang_CloneNotSupportedException,      "java/lang/CloneNotSupportedException")     \
-  template(java_lang_IllegalAccessException,          "java/lang/IllegalAccessException")         \
-  template(java_lang_IllegalArgumentException,        "java/lang/IllegalArgumentException")       \
-  template(java_lang_IllegalMonitorStateException,    "java/lang/IllegalMonitorStateException")   \
-  template(java_lang_IllegalThreadStateException,     "java/lang/IllegalThreadStateException")    \
-  template(java_lang_IndexOutOfBoundsException,       "java/lang/IndexOutOfBoundsException")      \
-  template(java_lang_InstantiationException,          "java/lang/InstantiationException")         \
-  template(java_lang_InstantiationError,              "java/lang/InstantiationError")             \
-  template(java_lang_InterruptedException,            "java/lang/InterruptedException")           \
-  template(java_lang_LinkageError,                    "java/lang/LinkageError")                   \
-  template(java_lang_NegativeArraySizeException,      "java/lang/NegativeArraySizeException")     \
-  template(java_lang_NoSuchFieldException,            "java/lang/NoSuchFieldException")           \
-  template(java_lang_NoSuchMethodException,           "java/lang/NoSuchMethodException")          \
-  template(java_lang_NullPointerException,            "java/lang/NullPointerException")           \
-  template(java_lang_StringIndexOutOfBoundsException, "java/lang/StringIndexOutOfBoundsException")\
-  template(java_lang_InvalidClassException,           "java/lang/InvalidClassException")          \
-  template(java_lang_reflect_InvocationTargetException, "java/lang/reflect/InvocationTargetException") \
-  template(java_lang_Exception,                       "java/lang/Exception")                      \
-  template(java_lang_RuntimeException,                "java/lang/RuntimeException")               \
-  template(java_io_IOException,                       "java/io/IOException")                      \
-  template(java_security_PrivilegedActionException,   "java/security/PrivilegedActionException")  \
-                                                                                                  \
-  /* error klasses: at least all errors thrown by the VM have entries here */                     \
-  template(java_lang_AbstractMethodError,             "java/lang/AbstractMethodError")            \
-  template(java_lang_ClassCircularityError,           "java/lang/ClassCircularityError")          \
-  template(java_lang_ClassFormatError,                "java/lang/ClassFormatError")               \
-  template(java_lang_UnsupportedClassVersionError,    "java/lang/UnsupportedClassVersionError")   \
-  template(java_lang_Error,                           "java/lang/Error")                          \
-  template(java_lang_ExceptionInInitializerError,     "java/lang/ExceptionInInitializerError")    \
-  template(java_lang_IllegalAccessError,              "java/lang/IllegalAccessError")             \
-  template(java_lang_IncompatibleClassChangeError,    "java/lang/IncompatibleClassChangeError")   \
-  template(java_lang_InternalError,                   "java/lang/InternalError")                  \
-  template(java_lang_NoClassDefFoundError,            "java/lang/NoClassDefFoundError")           \
-  template(java_lang_NoSuchFieldError,                "java/lang/NoSuchFieldError")               \
-  template(java_lang_NoSuchMethodError,               "java/lang/NoSuchMethodError")              \
-  template(java_lang_OutOfMemoryError,                "java/lang/OutOfMemoryError")               \
-  template(java_lang_UnsatisfiedLinkError,            "java/lang/UnsatisfiedLinkError")           \
-  template(java_lang_VerifyError,                     "java/lang/VerifyError")                    \
-  template(java_lang_SecurityException,               "java/lang/SecurityException")              \
-  template(java_lang_VirtualMachineError,             "java/lang/VirtualMachineError")            \
-  template(java_lang_StackOverflowError,              "java/lang/StackOverflowError")             \
-  template(java_lang_StackTraceElement,               "java/lang/StackTraceElement")              \
-                                                                                                  \
-  /* handling native inlining */                                                                  \
-  template(java_lang_Integer_reverseBytes_name,      "reverseBytes")                              \
-  template(java_lang_Integer_reverseBytes_signature, "(I)I")                                      \
-  template(java_lang_Long_reverseBytes_name,         "reverseBytes")                              \
-  template(java_lang_Long_reverseBytes_signature,    "(J)J")                                      \
-  template(java_lang_Math,                            "java/lang/Math")                           \
-  template(java_lang_StrictMath,                      "java/lang/StrictMath")                     \
-  template(java_lang_Math_abs_name,                   "abs")                                      \
-  template(java_lang_Math_abs_signature,              "(D)D")                                     \
-  template(java_lang_Math_sin_name,                   "sin")                                      \
-  template(java_lang_Math_sin_signature,              "(D)D")                                     \
-  template(java_lang_Math_cos_name,                   "cos")                                      \
-  template(java_lang_Math_cos_signature,              "(D)D")                                     \
-  template(java_lang_Math_tan_name,                   "tan")                                      \
-  template(java_lang_Math_tan_signature,              "(D)D")                                     \
-  template(java_lang_Math_atan2_name,                 "atan2")                                    \
-  template(java_lang_Math_atan2_signature,            "(DD)D")                                    \
-  template(java_lang_Math_sqrt_name,                  "sqrt")                                     \
-  template(java_lang_Math_sqrt_signature,             "(D)D")                                     \
-  template(java_lang_Math_exp_name,                   "exp")                                      \
-  template(java_lang_Math_exp_signature,              "(D)D")                                     \
-  template(java_lang_Math_log_name,                   "log")                                      \
-  template(java_lang_Math_log_signature,              "(D)D")                                     \
-  template(java_lang_Math_log10_name,                 "log10")                                    \
-  template(java_lang_Math_log10_signature,            "(D)D")                                     \
-  template(java_lang_Math_pow_name,                   "pow")                                      \
-  template(java_lang_Math_pow_signature,              "(DD)D")                                    \
-  template(java_lang_Float_floatToRawIntBits_name,    "floatToRawIntBits")                        \
-  template(java_lang_Float_floatToIntBits_name,       "floatToIntBits")                           \
-  template(java_lang_Float_intBitsToFloat_name,       "intBitsToFloat")                           \
-  template(java_lang_Double_doubleToRawLongBits_name, "doubleToRawLongBits")                      \
-  template(java_lang_Double_doubleToLongBits_name,    "doubleToLongBits")                         \
-  template(java_lang_Double_longBitsToDouble_name,    "longBitsToDouble")                         \
-  template(java_lang_System_arraycopy_name,           "arraycopy")                                \
-  template(java_lang_System_arraycopy_signature,      "(Ljava/lang/Object;ILjava/lang/Object;II)V") \
-  template(java_lang_System_currentTimeMillis_name,   "currentTimeMillis")                        \
-  template(java_lang_System_currentTimeMillis_signature, "()J")                                   \
-  template(java_lang_System_nanoTime_name,             "nanoTime")                                \
-  template(java_lang_System_nanoTime_signature,       "()J")                                      \
-  template(java_lang_Object_hashCode_name,            "hashCode")                                 \
-  template(java_lang_Object_hashCode_signature,       "()I")                                      \
-  template(java_lang_String_hashCode_name,            "hashCode")                                 \
-  template(java_lang_String_hashCode_signature,       "()I")                                      \
-  template(java_lang_System_identityHashCode_name,    "identityHashCode")                         \
-  template(java_lang_System_identityHashCode_signature,"(Ljava/lang/Object;)I")                   \
-  template(java_lang_Thread_currentThread_name,       "currentThread")                            \
-  template(java_lang_Thread_currentThread_signature,  "()Ljava/lang/Thread;")                     \
-  template(java_lang_Thread_isInterrupted_name,       "isInterrupted")                            \
-  template(java_lang_Thread_isInterrupted_signature,  "(Z)Z")                                     \
-  template(java_lang_Class_isAssignableFrom_name,     "isAssignableFrom")                         \
-  template(java_lang_Class_isAssignableFrom_signature,"(Ljava/lang/Class;)Z")                     \
-  template(java_lang_Class_isInstance_name,           "isInstance")                               \
-  template(java_lang_Class_isInstance_signature,      "(Ljava/lang/Object;)Z")                    \
-  template(java_lang_Class_getModifiers_name,         "getModifiers")                             \
-  template(java_lang_Class_getModifiers_signature,    "()I")                                      \
-  template(java_lang_Object_getClass_name,            "getClass")                                 \
-  template(java_lang_Object_getClass_signature,       "()Ljava/lang/Class;")                      \
-  template(java_lang_String_compareTo_name,           "compareTo")                                \
-  template(java_lang_String_compareTo_signature,      "(Ljava/lang/String;)I")                    \
-  template(java_util_Vector_elementAt_name,           "elementAt")                                \
-  template(java_util_Vector_elementAt_signature,      "(I)Ljava/lang/Object;")                    \
-  template(java_nio_Buffer,                           "java/nio/Buffer")                          \
-  template(java_nio_Buffer_checkIndex_name,           "checkIndex")                               \
-  template(java_nio_Buffer_checkIndex_signature,      "(I)I")                                     \
-  template(sun_misc_AtomicLongCSImpl,                 "sun/misc/AtomicLongCSImpl")                \
-  template(sun_misc_AtomicLongCSImpl_attemptUpdate_name, "attemptUpdate")                         \
-  template(sun_misc_AtomicLongCSImpl_attemptUpdate_signature, "(JJ)Z")                            \
-  template(sun_misc_AtomicLongCSImpl_value_name, "value")                                         \
-  template(java_util_concurrent_locks_AbstractOwnableSynchronizer,   "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
-                                                                                                  \
-  /* support for Unsafe class */                                                                  \
-  template(sun_misc_Unsafe,                           "sun/misc/Unsafe")                          \
-                                                                                                  \
-  template(sun_misc_Unsafe_getObject_name,            "getObject")                                \
-  template(sun_misc_Unsafe_getBoolean_name,           "getBoolean")                               \
-  template(sun_misc_Unsafe_getByte_name,              "getByte")                                  \
-  template(sun_misc_Unsafe_getShort_name,             "getShort")                                 \
-  template(sun_misc_Unsafe_getChar_name,              "getChar")                                  \
-  template(sun_misc_Unsafe_getInt_name,               "getInt")                                   \
-  template(sun_misc_Unsafe_getLong_name,              "getLong")                                  \
-  template(sun_misc_Unsafe_getFloat_name,             "getFloat")                                 \
-  template(sun_misc_Unsafe_getDouble_name,            "getDouble")                                \
-  template(sun_misc_Unsafe_getAddress_name,           "getAddress")                               \
-                                                                                                  \
-  template(sun_misc_Unsafe_putObject_name,            "putObject")                                \
-  template(sun_misc_Unsafe_putBoolean_name,           "putBoolean")                               \
-  template(sun_misc_Unsafe_putByte_name,              "putByte")                                  \
-  template(sun_misc_Unsafe_putShort_name,             "putShort")                                 \
-  template(sun_misc_Unsafe_putChar_name,              "putChar")                                  \
-  template(sun_misc_Unsafe_putInt_name,               "putInt")                                   \
-  template(sun_misc_Unsafe_putLong_name,              "putLong")                                  \
-  template(sun_misc_Unsafe_putFloat_name,             "putFloat")                                 \
-  template(sun_misc_Unsafe_putDouble_name,            "putDouble")                                \
-  template(sun_misc_Unsafe_putAddress_name,           "putAddress")                               \
-                                                                                                  \
-  template(sun_misc_Unsafe_getObjectVolatile_name,    "getObjectVolatile")                        \
-  template(sun_misc_Unsafe_getBooleanVolatile_name,   "getBooleanVolatile")                       \
-  template(sun_misc_Unsafe_getByteVolatile_name,      "getByteVolatile")                          \
-  template(sun_misc_Unsafe_getShortVolatile_name,     "getShortVolatile")                         \
-  template(sun_misc_Unsafe_getCharVolatile_name,      "getCharVolatile")                          \
-  template(sun_misc_Unsafe_getIntVolatile_name,       "getIntVolatile")                           \
-  template(sun_misc_Unsafe_getLongVolatile_name,      "getLongVolatile")                          \
-  template(sun_misc_Unsafe_getFloatVolatile_name,     "getFloatVolatile")                         \
-  template(sun_misc_Unsafe_getDoubleVolatile_name,    "getDoubleVolatile")                        \
-                                                                                                  \
-  template(sun_misc_Unsafe_putObjectVolatile_name,    "putObjectVolatile")                        \
-  template(sun_misc_Unsafe_putBooleanVolatile_name,   "putBooleanVolatile")                       \
-  template(sun_misc_Unsafe_putByteVolatile_name,      "putByteVolatile")                          \
-  template(sun_misc_Unsafe_putShortVolatile_name,     "putShortVolatile")                         \
-  template(sun_misc_Unsafe_putCharVolatile_name,      "putCharVolatile")                          \
-  template(sun_misc_Unsafe_putIntVolatile_name,       "putIntVolatile")                           \
-  template(sun_misc_Unsafe_putLongVolatile_name,      "putLongVolatile")                          \
-  template(sun_misc_Unsafe_putFloatVolatile_name,     "putFloatVolatile")                         \
-  template(sun_misc_Unsafe_putDoubleVolatile_name,    "putDoubleVolatile")                        \
-                                                                                                  \
-  template(sun_misc_Unsafe_allocateInstance_name,     "allocateInstance")                         \
-                                                                                                  \
-  template(sun_misc_Unsafe_compareAndSwapObject_name, "compareAndSwapObject")                     \
-  template(sun_misc_Unsafe_compareAndSwapLong_name,   "compareAndSwapLong")                       \
-  template(sun_misc_Unsafe_compareAndSwapInt_name,    "compareAndSwapInt")                        \
-  template(sun_misc_Unsafe_putOrderedObject_name,     "putOrderedObject")                         \
-  template(sun_misc_Unsafe_putOrderedLong_name,       "putOrderedLong")                           \
-  template(sun_misc_Unsafe_putOrderedInt_name,        "putOrderedInt")                            \
-  template(sun_misc_Unsafe_park_name,                 "park")                                     \
-  template(sun_misc_Unsafe_unpark_name,               "unpark")                                   \
-                                                                                                  \
-  template(sun_misc_Unsafe_prefetchRead_name,         "prefetchRead")                             \
-  template(sun_misc_Unsafe_prefetchWrite_name,        "prefetchWrite")                            \
-  template(sun_misc_Unsafe_prefetchReadStatic_name,   "prefetchReadStatic")                       \
-  template(sun_misc_Unsafe_prefetchWriteStatic_name,  "prefetchWriteStatic")                      \
-                                                                                                  \
-  /* %%% the following are temporary until the 1.4.0 sun.misc.Unsafe goes away */                 \
-  template(sun_misc_Unsafe_getObject_obj32_signature, "(Ljava/lang/Object;I)Ljava/lang/Object;")  \
-  template(sun_misc_Unsafe_getBoolean_obj32_signature,"(Ljava/lang/Object;I)Z")                   \
-  template(sun_misc_Unsafe_getByte_obj32_signature,   "(Ljava/lang/Object;I)B")                   \
-  template(sun_misc_Unsafe_getShort_obj32_signature,  "(Ljava/lang/Object;I)S")                   \
-  template(sun_misc_Unsafe_getChar_obj32_signature,   "(Ljava/lang/Object;I)C")                   \
-  template(sun_misc_Unsafe_getInt_obj32_signature,    "(Ljava/lang/Object;I)I")                   \
-  template(sun_misc_Unsafe_getLong_obj32_signature,   "(Ljava/lang/Object;I)J")                   \
-  template(sun_misc_Unsafe_getFloat_obj32_signature,  "(Ljava/lang/Object;I)F")                   \
-  template(sun_misc_Unsafe_getDouble_obj32_signature, "(Ljava/lang/Object;I)D")                   \
-                                                                                                  \
-  /* %%% the following are temporary until the 1.4.0 sun.misc.Unsafe goes away */                 \
-  template(sun_misc_Unsafe_putObject_obj32_signature, "(Ljava/lang/Object;ILjava/lang/Object;)V") \
-  template(sun_misc_Unsafe_putBoolean_obj32_signature,"(Ljava/lang/Object;IZ)V")                  \
-  template(sun_misc_Unsafe_putByte_obj32_signature,   "(Ljava/lang/Object;IB)V")                  \
-  template(sun_misc_Unsafe_putShort_obj32_signature,  "(Ljava/lang/Object;IS)V")                  \
-  template(sun_misc_Unsafe_putChar_obj32_signature,   "(Ljava/lang/Object;IC)V")                  \
-  template(sun_misc_Unsafe_putInt_obj32_signature,    "(Ljava/lang/Object;II)V")                  \
-  template(sun_misc_Unsafe_putLong_obj32_signature,   "(Ljava/lang/Object;IJ)V")                  \
-  template(sun_misc_Unsafe_putFloat_obj32_signature,  "(Ljava/lang/Object;IF)V")                  \
-  template(sun_misc_Unsafe_putDouble_obj32_signature, "(Ljava/lang/Object;ID)V")                  \
-                                                                                                  \
-  template(sun_misc_Unsafe_getObject_obj_signature,   "(Ljava/lang/Object;J)Ljava/lang/Object;")  \
-  template(sun_misc_Unsafe_getBoolean_obj_signature,  "(Ljava/lang/Object;J)Z")                   \
-  template(sun_misc_Unsafe_getByte_obj_signature,     "(Ljava/lang/Object;J)B")                   \
-  template(sun_misc_Unsafe_getShort_obj_signature,    "(Ljava/lang/Object;J)S")                   \
-  template(sun_misc_Unsafe_getChar_obj_signature,     "(Ljava/lang/Object;J)C")                   \
-  template(sun_misc_Unsafe_getInt_obj_signature,      "(Ljava/lang/Object;J)I")                   \
-  template(sun_misc_Unsafe_getLong_obj_signature,     "(Ljava/lang/Object;J)J")                   \
-  template(sun_misc_Unsafe_getFloat_obj_signature,    "(Ljava/lang/Object;J)F")                   \
-  template(sun_misc_Unsafe_getDouble_obj_signature,   "(Ljava/lang/Object;J)D")                   \
-                                                                                                  \
-  template(sun_misc_Unsafe_putObject_obj_signature,   "(Ljava/lang/Object;JLjava/lang/Object;)V") \
-  template(sun_misc_Unsafe_putBoolean_obj_signature,  "(Ljava/lang/Object;JZ)V")                  \
-  template(sun_misc_Unsafe_putByte_obj_signature,     "(Ljava/lang/Object;JB)V")                  \
-  template(sun_misc_Unsafe_putShort_obj_signature,    "(Ljava/lang/Object;JS)V")                  \
-  template(sun_misc_Unsafe_putChar_obj_signature,     "(Ljava/lang/Object;JC)V")                  \
-  template(sun_misc_Unsafe_putInt_obj_signature,      "(Ljava/lang/Object;JI)V")                  \
-  template(sun_misc_Unsafe_putLong_obj_signature,     "(Ljava/lang/Object;JJ)V")                  \
-  template(sun_misc_Unsafe_putFloat_obj_signature,    "(Ljava/lang/Object;JF)V")                  \
-  template(sun_misc_Unsafe_putDouble_obj_signature,   "(Ljava/lang/Object;JD)V")                  \
-                                                                                                  \
-  template(sun_misc_Unsafe_getObjectVolatile_obj_signature,   "(Ljava/lang/Object;J)Ljava/lang/Object;")  \
-  template(sun_misc_Unsafe_getBooleanVolatile_obj_signature,  "(Ljava/lang/Object;J)Z")           \
-  template(sun_misc_Unsafe_getByteVolatile_obj_signature,     "(Ljava/lang/Object;J)B")           \
-  template(sun_misc_Unsafe_getShortVolatile_obj_signature,    "(Ljava/lang/Object;J)S")           \
-  template(sun_misc_Unsafe_getCharVolatile_obj_signature,     "(Ljava/lang/Object;J)C")           \
-  template(sun_misc_Unsafe_getIntVolatile_obj_signature,      "(Ljava/lang/Object;J)I")           \
-  template(sun_misc_Unsafe_getLongVolatile_obj_signature,     "(Ljava/lang/Object;J)J")           \
-  template(sun_misc_Unsafe_getFloatVolatile_obj_signature,    "(Ljava/lang/Object;J)F")           \
-  template(sun_misc_Unsafe_getDoubleVolatile_obj_signature,   "(Ljava/lang/Object;J)D")           \
-                                                                                                  \
-  template(sun_misc_Unsafe_putObjectVolatile_obj_signature,   "(Ljava/lang/Object;JLjava/lang/Object;)V") \
-  template(sun_misc_Unsafe_putBooleanVolatile_obj_signature,  "(Ljava/lang/Object;JZ)V")          \
-  template(sun_misc_Unsafe_putByteVolatile_obj_signature,     "(Ljava/lang/Object;JB)V")          \
-  template(sun_misc_Unsafe_putShortVolatile_obj_signature,    "(Ljava/lang/Object;JS)V")          \
-  template(sun_misc_Unsafe_putCharVolatile_obj_signature,     "(Ljava/lang/Object;JC)V")          \
-  template(sun_misc_Unsafe_putIntVolatile_obj_signature,      "(Ljava/lang/Object;JI)V")          \
-  template(sun_misc_Unsafe_putLongVolatile_obj_signature,     "(Ljava/lang/Object;JJ)V")          \
-  template(sun_misc_Unsafe_putFloatVolatile_obj_signature,    "(Ljava/lang/Object;JF)V")          \
-  template(sun_misc_Unsafe_putDoubleVolatile_obj_signature,   "(Ljava/lang/Object;JD)V")          \
-                                                                                                  \
-  template(sun_misc_Unsafe_getByte_raw_signature,     "(J)B")                                     \
-  template(sun_misc_Unsafe_getShort_raw_signature,    "(J)S")                                     \
-  template(sun_misc_Unsafe_getChar_raw_signature,     "(J)C")                                     \
-  template(sun_misc_Unsafe_getInt_raw_signature,      "(J)I")                                     \
-  template(sun_misc_Unsafe_getLong_raw_signature,     "(J)J")                                     \
-  template(sun_misc_Unsafe_getFloat_raw_signature,    "(J)F")                                     \
-  template(sun_misc_Unsafe_getDouble_raw_signature,   "(J)D")                                     \
-  template(sun_misc_Unsafe_getAddress_raw_signature,  "(J)J")                                     \
-                                                                                                  \
-  template(sun_misc_Unsafe_putByte_raw_signature,     "(JB)V")                                    \
-  template(sun_misc_Unsafe_putShort_raw_signature,    "(JS)V")                                    \
-  template(sun_misc_Unsafe_putChar_raw_signature,     "(JC)V")                                    \
-  template(sun_misc_Unsafe_putInt_raw_signature,      "(JI)V")                                    \
-  template(sun_misc_Unsafe_putLong_raw_signature,     "(JJ)V")                                    \
-  template(sun_misc_Unsafe_putFloat_raw_signature,    "(JF)V")                                    \
-  template(sun_misc_Unsafe_putDouble_raw_signature,   "(JD)V")                                    \
-  template(sun_misc_Unsafe_putAddress_raw_signature,  "(JJ)V")                                    \
-                                                                                                  \
-  template(sun_misc_Unsafe_allocateInstance_signature,"(Ljava/lang/Class;)Ljava/lang/Object;")    \
-                                                                                                  \
-  template(sun_misc_Unsafe_compareAndSwapObject_obj_signature,   "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z") \
-  template(sun_misc_Unsafe_compareAndSwapLong_obj_signature,  "(Ljava/lang/Object;JJJ)Z")         \
-  template(sun_misc_Unsafe_compareAndSwapInt_obj_signature,   "(Ljava/lang/Object;JII)Z")         \
-  template(sun_misc_Unsafe_putOrderedObject_obj_signature,    "(Ljava/lang/Object;JLjava/lang/Object;)V") \
-  template(sun_misc_Unsafe_putOrderedLong_obj_signature,      "(Ljava/lang/Object;JJ)V")          \
-  template(sun_misc_Unsafe_putOrderedInt_obj_signature,       "(Ljava/lang/Object;JI)V")          \
-  template(sun_misc_Unsafe_park_signature,            "(ZJ)V")                                    \
-  template(sun_misc_Unsafe_unpark_signature,          "(Ljava/lang/Object;)V")                    \
-                                                                                                  \
-  template(sun_misc_Unsafe_prefetchRead_signature,        "(Ljava/lang/Object;J)V")               \
-  template(sun_misc_Unsafe_prefetchWrite_signature,       "(Ljava/lang/Object;J)V")               \
-  template(sun_misc_Unsafe_prefetchReadStatic_signature,  "(Ljava/lang/Object;J)V")               \
-  template(sun_misc_Unsafe_prefetchWriteStatic_signature, "(Ljava/lang/Object;J)V")               \
-                                                                                                  \
-  /* Support for reflection based on dynamic bytecode generation (JDK 1.4 and above) */           \
-                                                                                                  \
-  template(sun_reflect_FieldInfo,                     "sun/reflect/FieldInfo")                    \
-  template(sun_reflect_MethodInfo,                    "sun/reflect/MethodInfo")                   \
-  template(sun_reflect_MagicAccessorImpl,             "sun/reflect/MagicAccessorImpl")            \
-  template(sun_reflect_MethodAccessorImpl,            "sun/reflect/MethodAccessorImpl")           \
-  template(sun_reflect_ConstructorAccessorImpl,       "sun/reflect/ConstructorAccessorImpl")      \
-  template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \
-  template(sun_reflect_DelegatingClassLoader,         "sun/reflect/DelegatingClassLoader")        \
-  template(sun_reflect_Reflection,                    "sun/reflect/Reflection")                   \
-  template(checkedExceptions_name,                    "checkedExceptions")                        \
-  template(clazz_name,                                "clazz")                                    \
-  template(exceptionTypes_name,                       "exceptionTypes")                           \
-  template(getClassAccessFlags_name,                  "getClassAccessFlags")                      \
-  template(getClassAccessFlags_signature,             "(Ljava/lang/Class;)I")                     \
-  template(modifiers_name,                            "modifiers")                                \
-  template(newConstructor_name,                       "newConstructor")                           \
-  template(newConstructor_signature,                  "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Constructor;") \
-  template(newField_name,                             "newField")                                 \
-  template(newField_signature,                        "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \
-  template(newMethod_name,                            "newMethod")                                \
-  template(newMethod_signature,                       "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \
-  template(override_name,                             "override")                                 \
-  template(parameterTypes_name,                       "parameterTypes")                           \
-  template(returnType_name,                           "returnType")                               \
-  template(signature_name,                            "signature")                                \
-  template(slot_name,                                 "slot")                                     \
-                                                                                                  \
-  /* Support for annotations (JDK 1.5 and above) */                                               \
-                                                                                                  \
-  template(annotations_name,                          "annotations")                              \
-  template(parameter_annotations_name,                "parameterAnnotations")                     \
-  template(annotation_default_name,                   "annotationDefault")                        \
-  template(sun_reflect_ConstantPool,                  "sun/reflect/ConstantPool")                 \
-  template(constantPoolOop_name,                      "constantPoolOop")                          \
-  template(sun_reflect_UnsafeStaticFieldAccessorImpl, "sun/reflect/UnsafeStaticFieldAccessorImpl")\
-  template(base_name,                                 "base")                                     \
-                                                                                                  \
-  /* common method names */                                                                       \
-  template(object_initializer_name,                   "<init>")                                   \
-  template(class_initializer_name,                    "<clinit>")                                 \
-  template(println_name,                              "println")                                  \
-  template(printStackTrace_name,                      "printStackTrace")                          \
-  template(main_name,                                 "main")                                     \
-  template(name_name,                                 "name")                                     \
-  template(priority_name,                             "priority")                                 \
-  template(stillborn_name,                            "stillborn")                                \
-  template(group_name,                                "group")                                    \
-  template(daemon_name,                               "daemon")                                   \
-  template(eetop_name,                                "eetop")                                    \
-  template(thread_status_name,                        "threadStatus")                             \
-  template(run_method_name,                           "run")                                      \
-  template(exit_method_name,                          "exit")                                     \
-  template(add_method_name,                           "add")                                     \
-  template(parent_name,                               "parent")                                   \
-  template(threads_name,                              "threads")                                  \
-  template(groups_name,                               "groups")                                   \
-  template(maxPriority_name,                          "maxPriority")                              \
-  template(destroyed_name,                            "destroyed")                                \
-  template(vmAllowSuspension_name,                    "vmAllowSuspension")                        \
-  template(nthreads_name,                             "nthreads")                                 \
-  template(ngroups_name,                              "ngroups")                                  \
-  template(shutdown_method_name,                      "shutdown")                                 \
-  template(finalize_method_name,                      "finalize")                                 \
-  template(register_method_name,                      "register")                                 \
-  template(reference_lock_name,                       "lock")                                     \
-  template(reference_discovered_name,                 "discovered")                               \
-  template(run_finalizers_on_exit_name,               "runFinalizersOnExit")                      \
-  template(uncaughtException_name,                    "uncaughtException")                        \
-  template(dispatchUncaughtException_name,            "dispatchUncaughtException")                \
-  template(initializeSystemClass_name,                "initializeSystemClass")                    \
-  template(loadClass_name,                            "loadClass")                                \
-  template(loadClassInternal_name,                    "loadClassInternal")                        \
-  template(put_name,                                  "put")                                      \
-  template(type_name,                                 "type")                                     \
-  template(findNative_name,                           "findNative")                               \
-  template(deadChild_name,                            "deadChild")                                \
-  template(invoke_name,                               "invoke")                                   \
-  template(addClass_name,                             "addClass")                                 \
-  template(getFromClass_name,                         "getFromClass")                             \
-  template(dispatch_name,                             "dispatch")                                 \
-  template(getSystemClassLoader_name,                 "getSystemClassLoader")                     \
-  template(fillInStackTrace_name,                     "fillInStackTrace")                         \
-  template(getCause_name,                             "getCause")                                 \
-  template(initCause_name,                            "initCause")                                \
-  template(setProperty_name,                          "setProperty")                              \
-  template(getProperty_name,                          "getProperty")                              \
-  template(context_name,                              "context")                                  \
-  template(privilegedContext_name,                    "privilegedContext")                        \
-  template(contextClassLoader_name,		      "contextClassLoader")			  \
-  template(inheritedAccessControlContext_name,        "inheritedAccessControlContext")            \
-  template(isPrivileged_name,                         "isPrivileged")                             \
-  template(compareTo_name,                            "compareTo")                                \
-  template(indexOf_name,                              "indexOf")                                  \
-  template(clone_name,                                 "clone")                                   \
-  template(wait_name,                                 "wait")                                     \
-  template(checkPackageAccess_name,                   "checkPackageAccess")                       \
-  template(stackSize_name,                            "stackSize")                                \
-  template(thread_id_name,                            "tid")                                      \
-  template(newInstance0_name,                         "newInstance0")                             \
-  template(limit_name,                                "limit")                                    \
-  template(forName_name,                              "forName")                                  \
-  template(forName0_name,                             "forName0")                                 \
-  template(isJavaIdentifierStart_name,                "isJavaIdentifierStart")                    \
-  template(isJavaIdentifierPart_name,                 "isJavaIdentifierPart")                     \
-  template(exclusive_owner_thread_name,               "exclusiveOwnerThread")                     \
-  template(park_blocker_name,                         "parkBlocker")                              \
-  template(park_event_name,                           "nativeParkEventPointer")                   \
-                                                                                                  \
-  /* common signatures names */                                                                   \
-  template(void_method_signature,                     "()V")                                      \
-  template(int_void_signature,                        "(I)V")                                     \
-  template(int_bool_signature,                        "(I)Z")                                     \
-  template(float_int_signature,                       "(F)I")                                     \
-  template(double_long_signature,                     "(D)J")                                     \
-  template(int_float_signature,                       "(I)F")                                     \
-  template(long_double_signature,                     "(J)D")                                     \
-  template(byte_signature,                            "B")                                        \
-  template(char_signature,                            "C")                                        \
-  template(double_signature,                          "D")                                        \
-  template(float_signature,                           "F")                                        \
-  template(int_signature,                             "I")                                        \
-  template(long_signature,                            "J")                                        \
-  template(short_signature,                           "S")                                        \
-  template(bool_signature,                            "Z")                                        \
-  template(void_signature,                            "V")                                        \
-  template(byte_array_signature,                      "[B")                                       \
-  template(char_array_signature,                      "[C")                                       \
-  template(register_method_signature,                 "(Ljava/lang/Object;)V")                    \
-  template(string_void_signature,                     "(Ljava/lang/String;)V")                    \
-  template(string_int_signature,                      "(Ljava/lang/String;)I")                    \
-  template(throwable_void_signature,                  "(Ljava/lang/Throwable;)V")                 \
-  template(void_throwable_signature,                  "()Ljava/lang/Throwable;")                  \
-  template(throwable_throwable_signature,             "(Ljava/lang/Throwable;)Ljava/lang/Throwable;")             \
-  template(class_void_signature,                      "(Ljava/lang/Class;)V")                     \
-  template(throwable_string_void_signature,           "(Ljava/lang/Throwable;Ljava/lang/String;)V")               \
-  template(string_array_void_signature,               "([Ljava/lang/String;)V")                                   \
-  template(string_array_string_array_void_signature,  "([Ljava/lang/String;[Ljava/lang/String;)V")                \
-  template(thread_throwable_void_signature,           "(Ljava/lang/Thread;Ljava/lang/Throwable;)V")               \
-  template(thread_void_signature,                     "(Ljava/lang/Thread;)V")                                    \
-  template(threadgroup_runnable_void_signature,       "(Ljava/lang/ThreadGroup;Ljava/lang/Runnable;)V")           \
-  template(threadgroup_string_void_signature,         "(Ljava/lang/ThreadGroup;Ljava/lang/String;)V")             \
-  template(string_class_signature,                    "(Ljava/lang/String;)Ljava/lang/Class;")                    \
-  template(object_object_object_signature,            "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
-  template(string_string_string_signature,            "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;") \
-  template(string_string_signature,                   "(Ljava/lang/String;)Ljava/lang/String;")                   \
-  template(classloader_string_long_signature,         "(Ljava/lang/ClassLoader;Ljava/lang/String;)J")             \
-  template(byte_array_void_signature,                 "([B)V")                                                    \
-  template(char_array_void_signature,                 "([C)V")                                                    \
-  template(int_int_void_signature,                    "(II)V")                                                    \
-  template(void_classloader_signature,                "()Ljava/lang/ClassLoader;")                                \
-  template(void_object_signature,                     "()Ljava/lang/Object;")                                     \
-  template(void_class_signature,                      "()Ljava/lang/Class;")                                      \
-  template(object_array_object_object_signature,      "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
-  template(exception_void_signature,                  "(Ljava/lang/Exception;)V")                                 \
-  template(protectiondomain_signature,                "[Ljava/security/ProtectionDomain;")                        \
-  template(accesscontrolcontext_signature,            "Ljava/security/AccessControlContext;")                     \
-  template(class_protectiondomain_signature,          "(Ljava/lang/Class;Ljava/security/ProtectionDomain;)V")     \
-  template(thread_signature,                          "Ljava/lang/Thread;")                                       \
-  template(thread_array_signature,                    "[Ljava/lang/Thread;")                                      \
-  template(threadgroup_signature,                     "Ljava/lang/ThreadGroup;")                                  \
-  template(threadgroup_array_signature,               "[Ljava/lang/ThreadGroup;")                                 \
-  template(class_array_signature,                     "[Ljava/lang/Class;")                                       \
-  template(classloader_signature,		      "Ljava/lang/ClassLoader;")				  \
-  template(object_signature,                          "Ljava/lang/Object;")                                       \
-  template(class_signature,                           "Ljava/lang/Class;")                                        \
-  template(string_signature,                          "Ljava/lang/String;")                                       \
-  template(reference_signature,                       "Ljava/lang/ref/Reference;")                                \
-                                                                                                                  \
-  /* returned by the C1 compiler in case there's not enough memory to allocate a new symbol*/                     \
-  template(dummy_symbol_oop,                          "illegal symbol")                                           \
-                                                                                                                  \
-  /* used by ClassFormatError when class name is not known yet */                                                 \
-  template(unknown_class_name,                        "<Unknown>")                                                \
-                                                                                                                  \
-  /* JVM monitoring and management support */                                                                     \
-  template(java_lang_StackTraceElement_array,          "[Ljava/lang/StackTraceElement;")                          \
-  template(java_lang_management_ThreadState,           "java/lang/management/ThreadState")                        \
-  template(java_lang_management_MemoryUsage,           "java/lang/management/MemoryUsage")                        \
-  template(java_lang_management_ThreadInfo,            "java/lang/management/ThreadInfo")                         \
-  template(sun_management_ManagementFactory,           "sun/management/ManagementFactory")                        \
-  template(sun_management_Sensor,                      "sun/management/Sensor")                                   \
-  template(sun_management_Agent,                       "sun/management/Agent")                                    \
-  template(createMemoryPoolMBean_name,                 "createMemoryPoolMBean")                                   \
-  template(createMemoryManagerMBean_name,              "createMemoryManagerMBean")                                \
-  template(createGarbageCollectorMBean_name,           "createGarbageCollectorMBean")                             \
-  template(createMemoryPoolMBean_signature,            "(Ljava/lang/String;ZJJ)Ljava/lang/management/MemoryPoolMBean;") \
-  template(createMemoryManagerMBean_signature,         "(Ljava/lang/String;)Ljava/lang/management/MemoryManagerMBean;") \
-  template(createGarbageCollectorMBean_signature,      "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/management/GarbageCollectorMBean;") \
-  template(trigger_name,                               "trigger")                                                 \
-  template(clear_name,                                 "clear")                                                   \
-  template(trigger_method_signature,                   "(ILjava/lang/management/MemoryUsage;)V")                                                 \
-  template(startAgent_name,                            "startAgent")                                              \
-  template(java_lang_management_ThreadInfo_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;)V") \
-  template(java_lang_management_ThreadInfo_with_locks_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;[Ljava/lang/Object;[I[Ljava/lang/Object;)V") \
-  template(long_long_long_long_void_signature,         "(JJJJ)V")                                                 \
-                                                                                                                  \
-  template(java_lang_management_MemoryPoolMXBean,      "java/lang/management/MemoryPoolMXBean")                   \
-  template(java_lang_management_MemoryManagerMXBean,   "java/lang/management/MemoryManagerMXBean")                \
-  template(java_lang_management_GarbageCollectorMXBean,"java/lang/management/GarbageCollectorMXBean")             \
-  template(createMemoryPool_name,                      "createMemoryPool")                                        \
-  template(createMemoryManager_name,                   "createMemoryManager")                                     \
-  template(createGarbageCollector_name,                "createGarbageCollector")                                  \
-  template(createMemoryPool_signature,                 "(Ljava/lang/String;ZJJ)Ljava/lang/management/MemoryPoolMXBean;") \
-  template(createMemoryManager_signature,              "(Ljava/lang/String;)Ljava/lang/management/MemoryManagerMXBean;") \
-  template(createGarbageCollector_signature,           "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/management/GarbageCollectorMXBean;") \
-  template(addThreadDumpForMonitors_name,              "addThreadDumpForMonitors")                                \
-  template(addThreadDumpForSynchronizers_name,         "addThreadDumpForSynchronizers")                           \
-  template(addThreadDumpForMonitors_signature,         "(Ljava/lang/management/ThreadInfo;[Ljava/lang/Object;[I)V") \
-  template(addThreadDumpForSynchronizers_signature,    "(Ljava/lang/management/ThreadInfo;[Ljava/lang/Object;)V")   \
-														  \
-  /* JVMTI/java.lang.instrument support and VM Attach mechanism */						  \
-  template(sun_misc_VMSupport,			       "sun/misc/VMSupport")					  \
-  template(appendToClassPathForInstrumentation_name,   "appendToClassPathForInstrumentation")			  \
-  template(appendToClassPathForInstrumentation_signature, "(Ljava/lang/String;)V")				  \
-  template(serializePropertiesToByteArray_name,	       "serializePropertiesToByteArray")			  \
-  template(serializePropertiesToByteArray_signature,   "()[B")							  \
-  template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")			  \
-  template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
- 
-                
-// Name macros
-
-#define VM_SYMBOL_ENUM_NAME(name)    name##_enum
-#define VM_SYMBOL_ENUM(name, string) VM_SYMBOL_ENUM_NAME(name),
-
-
-// Declaration macros
-
-#define VM_SYMBOL_DECLARE(name, string) \
-  static symbolOop name() { return _symbols[VM_SYMBOL_ENUM_NAME(name)]; }
-
-#define VM_SYMBOL_HANDLE_DECLARE(name, string) \
-  static symbolHandle name() { return symbol_handle_at(vmSymbols::VM_SYMBOL_ENUM_NAME(name)); }
-
-
-// Class vmSymbols
-
-class vmSymbols: AllStatic {
- friend class vmSymbolHandles;
- public:
-  // enum for figuring positions and size of array holding symbolOops
-  enum {
-    VM_SYMBOLS_DO(VM_SYMBOL_ENUM)
-    vm_symbols_terminating_enum
-  };
-
- private:
-  // The symbol array
-  static symbolOop _symbols[];
-
-  // Field signatures indexed by BasicType.
-  static symbolOop _type_signatures[T_VOID+1];
-
- public:
-  // Initialization
-  static void initialize(TRAPS);
-  // Accessing
-  VM_SYMBOLS_DO(VM_SYMBOL_DECLARE)
-  // GC support
-  static void oops_do(OopClosure* f, bool do_all = false);
-
-  static symbolOop type_signature(BasicType t) {
-    assert((uint)t < T_VOID+1, "range check");
-    assert(_type_signatures[t] != NULL, "domain check");
-    return _type_signatures[t];
-  }
-  // inverse of type_signature; returns T_OBJECT if s is not recognized
-  static BasicType signature_type(symbolOop s);
-};
-
-
-// Class vmSymbolHandles
-
-class vmSymbolHandles: AllStatic {
-  friend class vmIntrinsics;
-  friend class ciObjectFactory;
-  static int symbol_handle_count() { return (int)vmSymbols::vm_symbols_terminating_enum; }
-  static symbolHandle symbol_handle_at(int n) { return symbolHandle(&vmSymbols::_symbols[n], false); }
-
- public:
-  // Accessing
-  VM_SYMBOLS_DO(VM_SYMBOL_HANDLE_DECLARE)
-
-  static symbolHandle type_signature(BasicType t) {
-    assert(vmSymbols::type_signature(t) != NULL, "domain check");
-    return symbolHandle(&vmSymbols::_type_signatures[t], false);
-  }
-  // inverse of type_signature; returns T_OBJECT if s is not recognized
-  static BasicType signature_type(symbolHandle s) {
-    return vmSymbols::signature_type(s());
-  }
-};
-
-
-#undef VM_SYMBOL_ENUM
-#undef VM_SYMBOL_DECLARE
-#undef VM_SYMBOL_HANDLE_DECLARE
-
-// Define here intrinsics known to the runtime and the CI.
-// Each intrinsic consists of a public enum name (like _hash),
-// followed by a specification of its klass, name, and signature:
-//    template(<id>,  <klass>,  <name>,<sig>)
-//
-// If you add an intrinsic here, you must also add its name
-// and signature as VM symbols above.  The VM symbols for
-// the intrinsic name and signature are <klass>_<name>_name
-// and <klass>_<sig>_signature.
-//
-// For example:
-//    template(_hash,  java_lang_Object,  hashCode,hashCode)
-// klass      = vmSymbols::java_lang_Object()
-// name       = vmSymbols::java_lang_Object_hashCode_name()
-// signature  = vmSymbols::java_lang_Object_hashCode_signature()
-//
-// (Note that the <name> and <sig> are often, but not always, the same.)
-
-#define VM_INTRINSICS_DO(template)                                                                \
-  template(_hash,             java_lang_Object,   hashCode,hashCode)                              \
-  template(_Object_init,      java_lang_Object,   object_initializer,void_method)                 \
-  template(_getClass,         java_lang_Object,   getClass,getClass)                              \
-  template(_reverseBytes_i,   java_lang_Integer,  reverseBytes,reverseBytes)                      \
-  template(_reverseBytes_l,   java_lang_Long,     reverseBytes,reverseBytes)                      \
-  template(_identityHash,     java_lang_System,   identityHashCode,identityHashCode)              \
-  template(_dabs,             java_lang_Math,     abs,abs)                                        \
-  template(_dsin,             java_lang_Math,     sin,sin)                                        \
-  template(_dcos,             java_lang_Math,     cos,cos)                                        \
-  template(_dtan,             java_lang_Math,     tan,tan)                                        \
-  template(_datan2,           java_lang_Math,     atan2,atan2)                                    \
-  template(_dsqrt,            java_lang_Math,     sqrt,sqrt)                                      \
-  template(_dlog,             java_lang_Math,     log,log)                                        \
-  template(_dlog10,           java_lang_Math,     log10,log10)                                    \
-  template(_dpow,             java_lang_Math,     pow,pow)                                        \
-  template(_dexp,             java_lang_Math,     exp,exp)                                        \
-  template(_floatToRawIntBits,java_lang_Float,    floatToRawIntBits,float_int)                    \
-  template(_floatToIntBits,   java_lang_Float,    floatToIntBits,float_int)                       \
-  template(_intBitsToFloat,   java_lang_Float,    intBitsToFloat,int_float)                       \
-  template(_doubleToRawLongBits,java_lang_Double, doubleToRawLongBits,double_long)                \
-  template(_doubleToLongBits, java_lang_Double,   doubleToLongBits,double_long)                   \
-  template(_longBitsToDouble, java_lang_Double,   longBitsToDouble,long_double)                   \
-  template(_arraycopy,        java_lang_System,   arraycopy,arraycopy)                            \
-  template(_currentTimeMillis,java_lang_System,   currentTimeMillis,currentTimeMillis)            \
-  template(_nanoTime,         java_lang_System,   nanoTime,nanoTime)                              \
-  template(_currentThread,    java_lang_Thread,   currentThread,currentThread)                    \
-  template(_isInterrupted,    java_lang_Thread,   isInterrupted,isInterrupted)                    \
-  template(_isAssignableFrom, java_lang_Class,    isAssignableFrom,isAssignableFrom)              \
-  template(_isInstance,       java_lang_Class,    isInstance,isInstance)                          \
-  template(_getModifiers,     java_lang_Class,    getModifiers,getModifiers)                      \
-  template(_getClassAccessFlags,sun_reflect_Reflection,getClassAccessFlags,getClassAccessFlags)   \
-  template(_compareTo,        java_lang_String,   compareTo,compareTo)                            \
-  template(_indexOf,          java_lang_String,   indexOf,indexOf)                                \
-  template(_checkIndex,       java_nio_Buffer,    checkIndex,checkIndex)                          \
-  template(_attemptUpdate,    sun_misc_AtomicLongCSImpl, attemptUpdate,attemptUpdate)             \
-  /* %%% the following xxx_obj32 are temporary until the 1.4.0 sun.misc.Unsafe goes away */       \
-  template(_getObject_obj32,  sun_misc_Unsafe,    getObject,getObject_obj32)                      \
-  template(_getBoolean_obj32, sun_misc_Unsafe,    getBoolean,getBoolean_obj32)                    \
-  template(_getByte_obj32,    sun_misc_Unsafe,    getByte,getByte_obj32)                          \
-  template(_getShort_obj32,   sun_misc_Unsafe,    getShort,getShort_obj32)                        \
-  template(_getChar_obj32,    sun_misc_Unsafe,    getChar,getChar_obj32)                          \
-  template(_getInt_obj32,     sun_misc_Unsafe,    getInt,getInt_obj32)                            \
-  template(_getLong_obj32,    sun_misc_Unsafe,    getLong,getLong_obj32)                          \
-  template(_getFloat_obj32,   sun_misc_Unsafe,    getFloat,getFloat_obj32)                        \
-  template(_getDouble_obj32,  sun_misc_Unsafe,    getDouble,getDouble_obj32)                      \
-  template(_putObject_obj32,  sun_misc_Unsafe,    putObject,putObject_obj32)                      \
-  template(_putBoolean_obj32, sun_misc_Unsafe,    putBoolean,putBoolean_obj32)                    \
-  template(_putByte_obj32,    sun_misc_Unsafe,    putByte,putByte_obj32)                          \
-  template(_putShort_obj32,   sun_misc_Unsafe,    putShort,putShort_obj32)                        \
-  template(_putChar_obj32,    sun_misc_Unsafe,    putChar,putChar_obj32)                          \
-  template(_putInt_obj32,     sun_misc_Unsafe,    putInt,putInt_obj32)                            \
-  template(_putLong_obj32,    sun_misc_Unsafe,    putLong,putLong_obj32)                          \
-  template(_putFloat_obj32,   sun_misc_Unsafe,    putFloat,putFloat_obj32)                        \
-  template(_putDouble_obj32,  sun_misc_Unsafe,    putDouble,putDouble_obj32)                      \
-  template(_getObject_obj,    sun_misc_Unsafe,    getObject,getObject_obj)                        \
-  template(_getBoolean_obj,   sun_misc_Unsafe,    getBoolean,getBoolean_obj)                      \
-  template(_getByte_obj,      sun_misc_Unsafe,    getByte,getByte_obj)                            \
-  template(_getShort_obj,     sun_misc_Unsafe,    getShort,getShort_obj)                          \
-  template(_getChar_obj,      sun_misc_Unsafe,    getChar,getChar_obj)                            \
-  template(_getInt_obj,       sun_misc_Unsafe,    getInt,getInt_obj)                              \
-  template(_getLong_obj,      sun_misc_Unsafe,    getLong,getLong_obj)                            \
-  template(_getFloat_obj,     sun_misc_Unsafe,    getFloat,getFloat_obj)                          \
-  template(_getDouble_obj,    sun_misc_Unsafe,    getDouble,getDouble_obj)                        \
-  template(_putObject_obj,    sun_misc_Unsafe,    putObject,putObject_obj)                        \
-  template(_putBoolean_obj,   sun_misc_Unsafe,    putBoolean,putBoolean_obj)                      \
-  template(_putByte_obj,      sun_misc_Unsafe,    putByte,putByte_obj)                            \
-  template(_putShort_obj,     sun_misc_Unsafe,    putShort,putShort_obj)                          \
-  template(_putChar_obj,      sun_misc_Unsafe,    putChar,putChar_obj)                            \
-  template(_putInt_obj,       sun_misc_Unsafe,    putInt,putInt_obj)                              \
-  template(_putLong_obj,      sun_misc_Unsafe,    putLong,putLong_obj)                            \
-  template(_putFloat_obj,     sun_misc_Unsafe,    putFloat,putFloat_obj)                          \
-  template(_putDouble_obj,    sun_misc_Unsafe,    putDouble,putDouble_obj)                        \
-  template(_getObjectVolatile_obj,    sun_misc_Unsafe,    getObjectVolatile,getObjectVolatile_obj)   \
-  template(_getBooleanVolatile_obj,   sun_misc_Unsafe,    getBooleanVolatile,getBooleanVolatile_obj) \
-  template(_getByteVolatile_obj,      sun_misc_Unsafe,    getByteVolatile,getByteVolatile_obj)       \
-  template(_getShortVolatile_obj,     sun_misc_Unsafe,    getShortVolatile,getShortVolatile_obj)     \
-  template(_getCharVolatile_obj,      sun_misc_Unsafe,    getCharVolatile,getCharVolatile_obj)       \
-  template(_getIntVolatile_obj,       sun_misc_Unsafe,    getIntVolatile,getIntVolatile_obj)         \
-  template(_getLongVolatile_obj,      sun_misc_Unsafe,    getLongVolatile,getLongVolatile_obj)       \
-  template(_getFloatVolatile_obj,     sun_misc_Unsafe,    getFloatVolatile,getFloatVolatile_obj)     \
-  template(_getDoubleVolatile_obj,    sun_misc_Unsafe,    getDoubleVolatile,getDoubleVolatile_obj)   \
-  template(_putObjectVolatile_obj,    sun_misc_Unsafe,    putObjectVolatile,putObjectVolatile_obj)   \
-  template(_putBooleanVolatile_obj,   sun_misc_Unsafe,    putBooleanVolatile,putBooleanVolatile_obj) \
-  template(_putByteVolatile_obj,      sun_misc_Unsafe,    putByteVolatile,putByteVolatile_obj)       \
-  template(_putShortVolatile_obj,     sun_misc_Unsafe,    putShortVolatile,putShortVolatile_obj)     \
-  template(_putCharVolatile_obj,      sun_misc_Unsafe,    putCharVolatile,putCharVolatile_obj)       \
-  template(_putIntVolatile_obj,       sun_misc_Unsafe,    putIntVolatile,putIntVolatile_obj)         \
-  template(_putLongVolatile_obj,      sun_misc_Unsafe,    putLongVolatile,putLongVolatile_obj)       \
-  template(_putFloatVolatile_obj,     sun_misc_Unsafe,    putFloatVolatile,putFloatVolatile_obj)     \
-  template(_putDoubleVolatile_obj,    sun_misc_Unsafe,    putDoubleVolatile,putDoubleVolatile_obj)   \
-  template(_getByte_raw,      sun_misc_Unsafe,    getByte,getByte_raw)                            \
-  template(_getShort_raw,     sun_misc_Unsafe,    getShort,getShort_raw)                          \
-  template(_getChar_raw,      sun_misc_Unsafe,    getChar,getChar_raw)                            \
-  template(_getInt_raw,       sun_misc_Unsafe,    getInt,getInt_raw)                              \
-  template(_getLong_raw,      sun_misc_Unsafe,    getLong,getLong_raw)                            \
-  template(_getFloat_raw,     sun_misc_Unsafe,    getFloat,getFloat_raw)                          \
-  template(_getDouble_raw,    sun_misc_Unsafe,    getDouble,getDouble_raw)                        \
-  template(_getAddress_raw,   sun_misc_Unsafe,    getAddress,getAddress_raw)                      \
-  template(_putByte_raw,      sun_misc_Unsafe,    putByte,putByte_raw)                            \
-  template(_putShort_raw,     sun_misc_Unsafe,    putShort,putShort_raw)                          \
-  template(_putChar_raw,      sun_misc_Unsafe,    putChar,putChar_raw)                            \
-  template(_putInt_raw,       sun_misc_Unsafe,    putInt,putInt_raw)                              \
-  template(_putLong_raw,      sun_misc_Unsafe,    putLong,putLong_raw)                            \
-  template(_putFloat_raw,     sun_misc_Unsafe,    putFloat,putFloat_raw)                          \
-  template(_putDouble_raw,    sun_misc_Unsafe,    putDouble,putDouble_raw)                        \
-  template(_putAddress_raw,   sun_misc_Unsafe,    putAddress,putAddress_raw)                      \
-  template(_allocateInstance, sun_misc_Unsafe,    allocateInstance,allocateInstance)              \
-  template(_compareAndSwapObject_obj,  sun_misc_Unsafe,    compareAndSwapObject,compareAndSwapObject_obj)   \
-  template(_compareAndSwapLong_obj,    sun_misc_Unsafe,    compareAndSwapLong,compareAndSwapLong_obj)       \
-  template(_compareAndSwapInt_obj,     sun_misc_Unsafe,    compareAndSwapInt,compareAndSwapInt_obj)         \
-  template(_putOrderedObject_obj,  sun_misc_Unsafe,    putOrderedObject,putOrderedObject_obj)     \
-  template(_putOrderedLong_obj,    sun_misc_Unsafe,    putOrderedLong,putOrderedLong_obj)         \
-  template(_putOrderedInt_obj,     sun_misc_Unsafe,    putOrderedInt,putOrderedInt_obj)           \
-  template(_park,             sun_misc_Unsafe,    park,park)                                      \
-  template(_unpark,           sun_misc_Unsafe,    unpark,unpark)                                  \
-  template(_prefetchRead,        sun_misc_Unsafe, prefetchRead,prefetchRead)                      \
-  template(_prefetchWrite,       sun_misc_Unsafe, prefetchWrite,prefetchWrite)                    \
-  template(_prefetchReadStatic,  sun_misc_Unsafe, prefetchReadStatic,prefetchReadStatic)          \
-  template(_prefetchWriteStatic, sun_misc_Unsafe, prefetchWriteStatic,prefetchWriteStatic)        \
-
-    /*end*/
-
-#define VM_INTRINSIC_ENUM(id, klass, name, sig)  id,
-
-// VM Intrinsic ID's uniquely identify some very special methods
-class vmIntrinsics: AllStatic {
-  friend class vmSymbols;
-  friend class ciObjectFactory;
-
- public:
-  // Accessing
-  enum ID {
-    _none = 0,                      // not an intrinsic (default answer)
-    VM_INTRINSICS_DO(VM_INTRINSIC_ENUM)
-    _vm_intrinsics_terminating_enum
-  };
-
-public:
-  static ID ID_from(int raw_id) {
-    assert(raw_id >= (int)_none && raw_id < (int)_vm_intrinsics_terminating_enum,
-	   "must be a valid intrinsic ID");
-    return (ID)raw_id;
-  }
-
-  static const char* name_at(int raw_id);
-};
-
-#undef VM_INTRINSIC_ENUM
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)constantPoolOop.hpp	1.103 07/05/05 17:06:00 JVM"
+#pragma ident "@(#)constantPoolOop.hpp	1.104 07/05/17 15:55:26 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -86,10 +86,6 @@
     assert(is_within_bounds(which), "index out of bounds");
     return (jdouble*) &base()[which];
   }
- 
-#ifdef JVMPI_SUPPORT
-  friend class ObjectDumper;                // JVMPI support
-#endif // JVMPI_SUPPORT
 
  public:
   typeArrayOop tags() const                 { return _tags; }
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)instanceKlass.cpp	1.320 07/05/05 17:06:01 JVM"
+#pragma ident "@(#)instanceKlass.cpp	1.321 07/05/17 15:55:40 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -587,34 +587,10 @@
         objArrayKlassKlass* oakk =
           (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
 
-#ifdef JVMPI_SUPPORT
-        // We grab locks above and the allocate_objArray_klass() code
-        // path needs to post OBJECT_ALLOC events for the newly
-        // allocated objects. We can't post events while holding locks
-        // so we store the information on the side until after we
-        // release the locks.
-        if (Universe::jvmpi_alloc_event_enabled()) {
-          jt->set_deferred_obj_alloc_events(
-            new (ResourceObj::C_HEAP) 
-                GrowableArray<DeferredObjAllocEvent *>(1, true));
-        }
-#endif // JVMPI_SUPPORT
         klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);                  
         this_oop->set_array_klasses(k);
       }
     }
-
-#ifdef JVMPI_SUPPORT
-    GrowableArray<DeferredObjAllocEvent *>* deferred_list =
-      jt->deferred_obj_alloc_events();
-    if (deferred_list != NULL) {
-      if (deferred_list->length() > 0) {
-        Universe::jvmpi_post_deferred_obj_alloc_events(deferred_list);
-      }
-      jt->set_deferred_obj_alloc_events(NULL);
-      delete deferred_list;
-    }
-#endif // JVMPI_SUPPORT
   }
   // _this will always be set at this point
   objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
--- a/hotspot/src/share/vm/oops/klass.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/klass.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)klass.hpp	1.140 07/05/05 17:06:03 JVM"
+#pragma ident "@(#)klass.hpp	1.141 07/05/17 15:55:46 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -171,15 +171,20 @@
   //
   // For arrays, layout helper is a negative number, containing four
   // distinct bytes, as follows:
-  //    MSB:[tag, ebt, hsz, log2(esz)]:LSB
+  //    MSB:[tag, hsz, ebt, log2(esz)]:LSB
   // where:
-  //    tag is 0x80 if the elements are non-oops, 0xC0 if oops
+  //    tag is 0x80 if the elements are oops, 0xC0 if non-oops
+  //    hsz is array header size in bytes (i.e., offset of first element)
   //    ebt is the BasicType of the elements
   //    esz is the element size in bytes
-  //    hsz is array header size in bytes (i.e., offset of first element)
   // This packed word is arranged so as to be quickly unpacked by the
   // various fast paths that use the various subfields.
   //
+  // The esz bits can be used directly by a SLL instruction, without masking.
+  //
+  // Note that the array-kind tag looks like 0x00 for instance klasses,
+  // since their length in bytes is always less than 24Mb.
+  //
   // Final note:  This comes first, immediately after Klass_vtbl,
   // because it is frequently queried.
   jint        _layout_helper;
--- a/hotspot/src/share/vm/oops/methodKlass.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)methodKlass.cpp	1.118 07/05/05 17:06:05 JVM"
+#pragma ident "@(#)methodKlass.cpp	1.119 07/05/17 15:56:56 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -71,12 +71,14 @@
   m->set_constants(NULL);
   m->set_max_stack(0);
   m->set_max_locals(0);
+  m->clear_intrinsic_id_cache();
   m->set_method_data(NULL);
   m->set_interpreter_throwout_count(0);
   m->set_vtable_index(methodOopDesc::garbage_vtable_index);  
 
   // Fix and bury in methodOop 
   m->set_interpreter_entry(NULL); // sets i2i entry and from_int
+  m->set_highest_tier_compile(CompLevel_none);
   m->set_adapter_entry(NULL); 
   m->clear_code(); // from_c/from_i get set to c2i/i2i
 
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)methodOop.cpp	1.310 07/05/05 17:06:00 JVM"
+#pragma ident "@(#)methodOop.cpp	1.311 07/05/17 15:57:00 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -818,345 +818,42 @@
   return newm;
 }
 
-
-// This function must be factored out from methodOopDesc::intrinsic_id,
-// or else some compilers will take the resulting huge combined function
-// and miscompile it.  (The solx86 fastdebug build was compiling this
-// with a huge stack frame, which was defeating overflow detection!)
-vmIntrinsics::ID unsafe_intrinsic_id(symbolOop name, symbolOop signature, bool is_static) {
-  if (is_static) {
-    if        (name == vmSymbols::sun_misc_Unsafe_prefetchReadStatic_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_prefetchReadStatic_signature ()) return vmIntrinsics::_prefetchReadStatic;
-    } else if (name == vmSymbols::sun_misc_Unsafe_prefetchWriteStatic_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_prefetchWriteStatic_signature()) return vmIntrinsics::_prefetchWriteStatic;
-    }
-
-  } else {
-    if        (name == vmSymbols::sun_misc_Unsafe_getObject_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getObject_obj32_signature ()) return vmIntrinsics::_getObject_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getObject_obj_signature   ()) return vmIntrinsics::_getObject_obj;
-      // no 'raw' version of getObject
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getBoolean_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getBoolean_obj32_signature()) return vmIntrinsics::_getBoolean_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getBoolean_obj_signature  ()) return vmIntrinsics::_getBoolean_obj;
-      // no 'raw' version of getBoolean
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getByte_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getByte_obj32_signature   ()) return vmIntrinsics::_getByte_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getByte_obj_signature     ()) return vmIntrinsics::_getByte_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_getByte_raw_signature     ()) return vmIntrinsics::_getByte_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getShort_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getShort_obj32_signature  ()) return vmIntrinsics::_getShort_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getShort_obj_signature    ()) return vmIntrinsics::_getShort_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_getShort_raw_signature    ()) return vmIntrinsics::_getShort_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getChar_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getChar_obj32_signature   ()) return vmIntrinsics::_getChar_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getChar_obj_signature     ()) return vmIntrinsics::_getChar_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_getChar_raw_signature     ()) return vmIntrinsics::_getChar_raw;
+vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
+  assert(vmIntrinsics::_none == 0, "correct coding of default case");
+  const uintptr_t max_cache_uint = right_n_bits((int)(sizeof(_intrinsic_id_cache) * BitsPerByte));
+  assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_cache_uint, "else fix cache size");
+  // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
+  // because we are not loading from core libraries
+  if (instanceKlass::cast(method_holder())->class_loader() != NULL) return vmIntrinsics::_none;
 
-    } else if (name == vmSymbols::sun_misc_Unsafe_getInt_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getInt_obj32_signature    ()) return vmIntrinsics::_getInt_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getInt_obj_signature      ()) return vmIntrinsics::_getInt_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_getInt_raw_signature      ()) return vmIntrinsics::_getInt_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getLong_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getLong_obj32_signature   ()) return vmIntrinsics::_getLong_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getLong_obj_signature     ()) return vmIntrinsics::_getLong_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_getLong_raw_signature     ()) return vmIntrinsics::_getLong_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getFloat_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getFloat_obj32_signature  ()) return vmIntrinsics::_getFloat_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getFloat_obj_signature    ()) return vmIntrinsics::_getFloat_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_getFloat_raw_signature    ()) return vmIntrinsics::_getFloat_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getDouble_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getDouble_obj32_signature ()) return vmIntrinsics::_getDouble_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_getDouble_obj_signature   ()) return vmIntrinsics::_getDouble_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_getDouble_raw_signature   ()) return vmIntrinsics::_getDouble_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_getAddress_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getAddress_raw_signature  ()) return vmIntrinsics::_getAddress_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putObject_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putObject_obj32_signature ()) return vmIntrinsics::_putObject_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putObject_obj_signature   ()) return vmIntrinsics::_putObject_obj;
-      // no 'raw' version of putObject
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putBoolean_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putBoolean_obj32_signature()) return vmIntrinsics::_putBoolean_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putBoolean_obj_signature  ()) return vmIntrinsics::_putBoolean_obj;
-      // no 'raw' version of putBoolean
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putByte_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putByte_obj32_signature   ()) return vmIntrinsics::_putByte_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putByte_obj_signature     ()) return vmIntrinsics::_putByte_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_putByte_raw_signature     ()) return vmIntrinsics::_putByte_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putShort_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putShort_obj32_signature  ()) return vmIntrinsics::_putShort_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putShort_obj_signature    ()) return vmIntrinsics::_putShort_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_putShort_raw_signature    ()) return vmIntrinsics::_putShort_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putChar_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putChar_obj32_signature   ()) return vmIntrinsics::_putChar_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putChar_obj_signature     ()) return vmIntrinsics::_putChar_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_putChar_raw_signature     ()) return vmIntrinsics::_putChar_raw;
+  // see if the klass name is well-known:
+  symbolOop klass_name    = instanceKlass::cast(method_holder())->name();
+  vmSymbols::SID klass_id = vmSymbols::find_sid(klass_name);
+  if (klass_id == vmSymbols::NO_SID)  return vmIntrinsics::_none;
 
-    } else if (name == vmSymbols::sun_misc_Unsafe_putInt_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putInt_obj32_signature    ()) return vmIntrinsics::_putInt_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putInt_obj_signature      ()) return vmIntrinsics::_putInt_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_putInt_raw_signature      ()) return vmIntrinsics::_putInt_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putLong_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putLong_obj32_signature   ()) return vmIntrinsics::_putLong_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putLong_obj_signature     ()) return vmIntrinsics::_putLong_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_putLong_raw_signature     ()) return vmIntrinsics::_putLong_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putFloat_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putFloat_obj32_signature  ()) return vmIntrinsics::_putFloat_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putFloat_obj_signature    ()) return vmIntrinsics::_putFloat_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_putFloat_raw_signature    ()) return vmIntrinsics::_putFloat_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putDouble_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putDouble_obj32_signature ()) return vmIntrinsics::_putDouble_obj32;
-      if (signature == vmSymbols::sun_misc_Unsafe_putDouble_obj_signature   ()) return vmIntrinsics::_putDouble_obj;
-      if (signature == vmSymbols::sun_misc_Unsafe_putDouble_raw_signature   ()) return vmIntrinsics::_putDouble_raw;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putAddress_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putAddress_raw_signature  ()) return vmIntrinsics::_putAddress_raw;
+  // ditto for method and signature:
+  vmSymbols::SID  name_id = vmSymbols::find_sid(name());
+  if (name_id  == vmSymbols::NO_SID)  return vmIntrinsics::_none;
+  vmSymbols::SID   sig_id = vmSymbols::find_sid(signature());
+  if (sig_id   == vmSymbols::NO_SID)  return vmIntrinsics::_none;
+  jshort flags = access_flags().as_short();
 
-    } else if (name == vmSymbols::sun_misc_Unsafe_getObjectVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getObjectVolatile_obj_signature   ()) return vmIntrinsics::_getObjectVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getBooleanVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getBooleanVolatile_obj_signature  ()) return vmIntrinsics::_getBooleanVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getByteVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getByteVolatile_obj_signature     ()) return vmIntrinsics::_getByteVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getShortVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getShortVolatile_obj_signature    ()) return vmIntrinsics::_getShortVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getCharVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getCharVolatile_obj_signature     ()) return vmIntrinsics::_getCharVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getIntVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getIntVolatile_obj_signature      ()) return vmIntrinsics::_getIntVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getLongVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getLongVolatile_obj_signature     ()) return vmIntrinsics::_getLongVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getFloatVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getFloatVolatile_obj_signature    ()) return vmIntrinsics::_getFloatVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_getDoubleVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_getDoubleVolatile_obj_signature   ()) return vmIntrinsics::_getDoubleVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putObjectVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putObjectVolatile_obj_signature   ()) return vmIntrinsics::_putObjectVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putBooleanVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putBooleanVolatile_obj_signature  ()) return vmIntrinsics::_putBooleanVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putByteVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putByteVolatile_obj_signature     ()) return vmIntrinsics::_putByteVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putShortVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putShortVolatile_obj_signature    ()) return vmIntrinsics::_putShortVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putCharVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putCharVolatile_obj_signature     ()) return vmIntrinsics::_putCharVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putIntVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putIntVolatile_obj_signature      ()) return vmIntrinsics::_putIntVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putLongVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putLongVolatile_obj_signature     ()) return vmIntrinsics::_putLongVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putFloatVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putFloatVolatile_obj_signature    ()) return vmIntrinsics::_putFloatVolatile_obj;
-    } else if (name == vmSymbols::sun_misc_Unsafe_putDoubleVolatile_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putDoubleVolatile_obj_signature   ()) return vmIntrinsics::_putDoubleVolatile_obj;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_allocateInstance_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_allocateInstance_signature()) return vmIntrinsics::_allocateInstance;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_compareAndSwapObject_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_compareAndSwapObject_obj_signature()) return vmIntrinsics::_compareAndSwapObject_obj;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_compareAndSwapInt_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_compareAndSwapInt_obj_signature()) return vmIntrinsics::_compareAndSwapInt_obj;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_compareAndSwapLong_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_compareAndSwapLong_obj_signature()) return vmIntrinsics::_compareAndSwapLong_obj;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putOrderedObject_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putOrderedObject_obj_signature()) return vmIntrinsics::_putOrderedObject_obj;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putOrderedInt_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putOrderedInt_obj_signature()) return vmIntrinsics::_putOrderedInt_obj;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_putOrderedLong_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_putOrderedLong_obj_signature()) return vmIntrinsics::_putOrderedLong_obj;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_park_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_park_signature()) return vmIntrinsics::_park;
-    } else if (name == vmSymbols::sun_misc_Unsafe_unpark_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_unpark_signature()) return vmIntrinsics::_unpark;
-
-    } else if (name == vmSymbols::sun_misc_Unsafe_prefetchRead_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_prefetchRead_signature ()) return vmIntrinsics::_prefetchRead;
-    } else if (name == vmSymbols::sun_misc_Unsafe_prefetchWrite_name()) {
-      if (signature == vmSymbols::sun_misc_Unsafe_prefetchWrite_signature()) return vmIntrinsics::_prefetchWrite;
+  // A few slightly irregular cases:
+  switch (klass_id) {
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
+    // Second chance: check in regular Math.
+    switch (name_id) {
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(min_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(max_name):
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
+      // pretend it is the corresponding method in the non-strict class:
+      klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
+      break;
     }
   }
 
-  return vmIntrinsics::_none;
-}
-
-// Factor out java.lang.Class intrinsics, since the decision tree is somewhat bulky.
-vmIntrinsics::ID class_intrinsic_id(symbolOop name, symbolOop signature) {
-  if        (name == vmSymbols::java_lang_Class_isAssignableFrom_name()) {
-    if (signature == vmSymbols::java_lang_Class_isAssignableFrom_signature()) return vmIntrinsics::_isAssignableFrom;
-  } else if (name == vmSymbols::java_lang_Class_isInstance_name()) {
-    if (signature == vmSymbols::java_lang_Class_isInstance_signature()) return vmIntrinsics::_isInstance;
-  } else if (name == vmSymbols::java_lang_Class_getModifiers_name()) {
-    if (signature == vmSymbols::java_lang_Class_getModifiers_signature()) return vmIntrinsics::_getModifiers;
-  }
-  return vmIntrinsics::_none;
-}
-
-// Factor out java.lang.Math intrinsics, since the decision tree is somewhat bulky.
-// Special case the sqrt, min, max functions because the Math and StrictMath versions
-// are identical for all cases.
-// This allows the hardware to be used for all sqrt, min, max implementations.
-vmIntrinsics::ID math_intrinsic_id(symbolOop name, symbolOop signature, bool is_strict) {
-  if        (name == vmSymbols::java_lang_Math_sqrt_name()) {
-    if (signature == vmSymbols::java_lang_Math_sqrt_signature()) return vmIntrinsics::_dsqrt;
-
-  } else if (is_strict) {
-    // No more intrinsics shared with StrictMath, so fall through...
-
-  } else if (name == vmSymbols::java_lang_Math_sin_name()) {
-    if (signature == vmSymbols::java_lang_Math_sin_signature()) return vmIntrinsics::_dsin;
-
-  } else if (name == vmSymbols::java_lang_Math_cos_name()) {
-    if (signature == vmSymbols::java_lang_Math_cos_signature()) return vmIntrinsics::_dcos;
-
-  } else if (name == vmSymbols::java_lang_Math_tan_name()) {
-    if (signature == vmSymbols::java_lang_Math_tan_signature()) return vmIntrinsics::_dtan;
-
-  } else if (name == vmSymbols::java_lang_Math_atan2_name()) {
-    if (signature == vmSymbols::java_lang_Math_atan2_signature()) return vmIntrinsics::_datan2;
-
-  } else if (name == vmSymbols::java_lang_Math_abs_name()) {
-    if (signature == vmSymbols::java_lang_Math_abs_signature()) return vmIntrinsics::_dabs;   
-
-  } else if (name == vmSymbols::java_lang_Math_exp_name()) {
-    if (signature == vmSymbols::java_lang_Math_exp_signature()) return vmIntrinsics::_dexp;  
-
-  } else if (name == vmSymbols::java_lang_Math_log_name()) {
-    if (signature == vmSymbols::java_lang_Math_log_signature()) return vmIntrinsics::_dlog;
-
-  } else if (name == vmSymbols::java_lang_Math_log10_name()) {
-    if (signature == vmSymbols::java_lang_Math_log10_signature()) return vmIntrinsics::_dlog10;
-
-  } else if (name == vmSymbols::java_lang_Math_pow_name()) {
-    if (signature == vmSymbols::java_lang_Math_pow_signature()) return vmIntrinsics::_dpow;
-  }
-  return vmIntrinsics::_none;
-}
-
-vmIntrinsics::ID methodOopDesc::intrinsic_id() const {
-  assert(vmIntrinsics::_none == 0, "correct coding of default case");
-  // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
-  // because we are not loading from core libraries
-  if (instanceKlass::cast(method_holder())->class_loader() != NULL) return vmIntrinsics::_none;
   // return intrinsic id if any
-  symbolOop klass_name = instanceKlass::cast(method_holder())->name();
-  if (klass_name == vmSymbols::java_lang_Object() && !is_static() && !is_synchronized()) {
-    if        (name() == vmSymbols::java_lang_Object_hashCode_name()) {
-      if (signature() == vmSymbols::java_lang_Object_hashCode_signature()) return vmIntrinsics::_hash;
-    
-    } else if (name() == vmSymbols::java_lang_Object_getClass_name()) {
-      if (signature() == vmSymbols::java_lang_Object_getClass_signature()) return vmIntrinsics::_getClass;
-    } else if (name() == vmSymbols::object_initializer_name()) {
-      if (signature() == vmSymbols::void_method_signature()) return vmIntrinsics::_Object_init;
-    }
-
-    // Bytes switch optimization on specific hardware
-  } else if (klass_name == vmSymbols::java_lang_Integer() && is_static() && !is_synchronized()) {
-    if (name() == vmSymbols::java_lang_Integer_reverseBytes_name()) {
-      if (signature() == vmSymbols::java_lang_Integer_reverseBytes_signature()) return vmIntrinsics::_reverseBytes_i;
-    }
-
-  } else if (klass_name == vmSymbols::java_lang_Long() && is_static() && !is_synchronized()) {
-    if (name() == vmSymbols::java_lang_Long_reverseBytes_name()) {
-      if (signature() ==  vmSymbols::java_lang_Long_reverseBytes_signature()) return vmIntrinsics::_reverseBytes_l;
-    }
-
-  } else if ((klass_name == vmSymbols::java_lang_Math() ||
-              klass_name == vmSymbols::java_lang_StrictMath())
-             && is_static() && !is_synchronized()) {
-    vmIntrinsics::ID id = math_intrinsic_id(name(), signature(),
-                            (klass_name == vmSymbols::java_lang_StrictMath()));
-    if (id != vmIntrinsics::_none)  return id;
-  } else if (klass_name == vmSymbols::java_lang_Double() && is_static() && !is_synchronized()) {
-    if (name() == vmSymbols::java_lang_Double_longBitsToDouble_name()) {
-      if (signature() == vmSymbols::long_double_signature()) return vmIntrinsics::_longBitsToDouble;
-    } else if (name() == vmSymbols::java_lang_Double_doubleToRawLongBits_name()) {
-      if (signature() == vmSymbols::double_long_signature()) return vmIntrinsics::_doubleToRawLongBits;
-    } else if (name() == vmSymbols::java_lang_Double_doubleToLongBits_name()) {
-      if (signature() == vmSymbols::double_long_signature()) return vmIntrinsics::_doubleToLongBits;
-    }
-  } else if (klass_name == vmSymbols::java_lang_Float() && is_static() && !is_synchronized()) {
-    if (name() == vmSymbols::java_lang_Float_intBitsToFloat_name()) {
-      if (signature() == vmSymbols::int_float_signature()) return vmIntrinsics::_intBitsToFloat;
-    } else if (name() == vmSymbols::java_lang_Float_floatToRawIntBits_name()) {
-      if (signature() == vmSymbols::float_int_signature()) return vmIntrinsics::_floatToRawIntBits;
-    } else if (name() == vmSymbols::java_lang_Float_floatToIntBits_name()) {
-      if (signature() == vmSymbols::float_int_signature()) return vmIntrinsics::_floatToIntBits;
-    }
-  } else if (klass_name == vmSymbols::java_lang_System() && is_static() && !is_synchronized()) {
-    if        (name() == vmSymbols::java_lang_System_arraycopy_name()) {
-      if (signature() == vmSymbols::java_lang_System_arraycopy_signature()) return vmIntrinsics::_arraycopy;
-    } else if (name() == vmSymbols::java_lang_System_identityHashCode_name()) {
-      if (signature() == vmSymbols::java_lang_System_identityHashCode_signature()) return vmIntrinsics::_identityHash;
-    } else if (name() == vmSymbols::java_lang_System_currentTimeMillis_name()) {
-      if (signature() == vmSymbols::java_lang_System_currentTimeMillis_signature()) return vmIntrinsics::_currentTimeMillis;
-    } else if (name() == vmSymbols::java_lang_System_nanoTime_name()) {
-      if (signature() == vmSymbols::java_lang_System_nanoTime_signature()) return vmIntrinsics::_nanoTime;
-    }
-
-  } else if (klass_name == vmSymbols::java_lang_Thread() && is_static() && !is_synchronized()) {
-    if        (name() == vmSymbols::java_lang_Thread_currentThread_name()) {
-      if (signature() == vmSymbols::java_lang_Thread_currentThread_signature()) return vmIntrinsics::_currentThread;
-    }
-
-  } else if (klass_name == vmSymbols::java_lang_Thread() && !is_static() && !is_synchronized()) {
-    if        (name() == vmSymbols::java_lang_Thread_isInterrupted_name()) {
-      if (signature() == vmSymbols::java_lang_Thread_isInterrupted_signature()) return vmIntrinsics::_isInterrupted;
-    }
-
-  } else if (klass_name == vmSymbols::java_lang_Class() && !is_static() && !is_synchronized() && is_native()) {
-    vmIntrinsics::ID id = class_intrinsic_id(name(), signature());
-    if (id != vmIntrinsics::_none)  return id;
-
-  } else if (klass_name == vmSymbols::sun_reflect_Reflection() && is_static() && !is_synchronized() && is_native()) {
-    if        (name() == vmSymbols::getClassAccessFlags_name()) {
-      if (signature() == vmSymbols::getClassAccessFlags_signature()) return vmIntrinsics::_getClassAccessFlags;
-    }
-
-  } else if (klass_name == vmSymbols::java_lang_String() && !is_static() && !is_synchronized()) {
-    if        (name() == vmSymbols::compareTo_name()) {
-      if (signature() == vmSymbols::string_int_signature()) return vmIntrinsics::_compareTo;
-    } else if (name() == vmSymbols::indexOf_name()) {
-      if (signature() == vmSymbols::string_int_signature()) return vmIntrinsics::_indexOf;
-    }
-
-  } else if (klass_name == vmSymbols::java_nio_Buffer() && !is_static() && !is_synchronized()) {
-    if        (name() == vmSymbols::java_nio_Buffer_checkIndex_name()) {
-      if (signature() == vmSymbols::java_nio_Buffer_checkIndex_signature ()) return vmIntrinsics::_checkIndex;
-    }
-
-  } else if (klass_name == vmSymbols::sun_misc_AtomicLongCSImpl() && !is_static() && !is_synchronized()) {
-    if        (name() == vmSymbols::sun_misc_AtomicLongCSImpl_attemptUpdate_name()) {
-      if (signature() == vmSymbols::sun_misc_AtomicLongCSImpl_attemptUpdate_signature()) return vmIntrinsics::_attemptUpdate;
-    }
-
-  } else if (klass_name == vmSymbols::sun_misc_Unsafe() && !is_synchronized() && is_native()) {
-    vmIntrinsics::ID id = unsafe_intrinsic_id(name(), signature(), is_static());
-    if (id != vmIntrinsics::_none) return id;
-  }
-
-  return vmIntrinsics::_none;
+  return vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
 }
 
 
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)methodOop.hpp	1.218 07/05/05 17:06:07 JVM"
+#pragma ident "@(#)methodOop.hpp	1.219 07/05/17 15:57:04 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -65,9 +65,9 @@
 // | method_size             | max_stack                  |
 // | max_locals              | size_of_parameters         |
 // |------------------------------------------------------|
-// | _highest_tier_compile   | throwout_count             |
+// | intrinsic_id, highest_tier  |       (unused)         |
 // |------------------------------------------------------|
-// | parameter_info          | num_breakpoints            |
+// | throwout_count          | num_breakpoints            |
 // |------------------------------------------------------|
 // | invocation_counter                                   |
 // | backedge_counter                                     |
@@ -107,6 +107,7 @@
   u2                _max_stack;                  // Maximum number of entries on the expression stack
   u2                _max_locals;                 // Number of local variables used by this method
   u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
+  u1                _intrinsic_id_cache;         // Cache for intrinsic_id; 0 or 1+vmInt::ID
   u1                _highest_tier_compile;       // Highest compile level this method has ever seen.
   u2                _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
   u2                _number_of_breakpoints;      // fullspeed debugging support
@@ -222,6 +223,8 @@
   int highest_tier_compile()                     { return _highest_tier_compile;}
   void set_highest_tier_compile(int level)      { _highest_tier_compile = level;}
 
+  void clear_intrinsic_id_cache() { _intrinsic_id_cache = 0; }
+
   // Count of times method was exited via exception while interpreting
   void interpreter_throwout_increment() { 
     if (_interpreter_throwout_count < 65534) {
@@ -546,7 +549,18 @@
   void set_cached_itable_index(int index)           { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
 
   // Support for inlining of intrinsic methods
-  vmIntrinsics::ID intrinsic_id() const;  // returns zero if not an intrinsic
+  vmIntrinsics::ID intrinsic_id() const { // returns zero if not an intrinsic
+    const u1& cache = _intrinsic_id_cache;
+    if (cache != 0) {
+      return (vmIntrinsics::ID)(cache - 1);
+    } else {
+      vmIntrinsics::ID id = compute_intrinsic_id();
+      *(u1*)&cache = ((u1) id) + 1;   // force the cache to be non-const
+      vmIntrinsics::verify_method(id, (methodOop) this);
+      assert((vmIntrinsics::ID)(cache - 1) == id, "proper conversion");
+      return id;
+    }
+  }
 
   // On-stack replacement support   
   bool has_osr_nmethod()                         { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
@@ -599,6 +613,9 @@
   void set_size_of_parameters(int size)          { _size_of_parameters = size; }
  private:
 
+  // Helper routine for intrinsic_id().
+  vmIntrinsics::ID compute_intrinsic_id() const;
+
   // Inlined elements
   address* native_function_addr() const          { assert(is_native(), "must be native"); return (address*) (this+1); }
   address* signature_handler_addr() const        { return native_function_addr() + 1; }
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)objArrayKlass.cpp	1.145 07/05/05 17:06:06 JVM"
+#pragma ident "@(#)objArrayKlass.cpp	1.146 07/05/17 15:57:07 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -181,18 +181,6 @@
       ak = objArrayKlassHandle(THREAD, this_oop->higher_dimension());
       if( ak.is_null() ) {
 
-#ifdef JVMPI_SUPPORT
-        // We grab locks above and the allocate_objArray_klass() code
-        // path needs to post OBJECT_ALLOC events for the newly
-        // allocated objects. We can't post events while holding locks
-        // so we store the information on the side until after we
-        // release the locks.
-        if (Universe::jvmpi_alloc_event_enabled()) {
-          jt->set_deferred_obj_alloc_events(
-            new (ResourceObj::C_HEAP) GrowableArray<DeferredObjAllocEvent *>(dimension + 1, true));
-        }
-#endif // JVMPI_SUPPORT
-
         // Create multi-dim klass object and link them together
         klassOop new_klass = 
           objArrayKlassKlass::cast(Universe::objArrayKlassKlassObj())->
@@ -203,19 +191,6 @@
         assert(ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");
       }
     }
-
-#ifdef JVMPI_SUPPORT
-    GrowableArray<DeferredObjAllocEvent *>* deferred_list =
-      jt->deferred_obj_alloc_events();
-    if (deferred_list != NULL) {
-      if (deferred_list->length() > 0) {
-        Universe::jvmpi_post_deferred_obj_alloc_events(deferred_list);
-      }
-      jt->set_deferred_obj_alloc_events(NULL);
-      // Return memory used by the GrowableArray.
-      delete deferred_list;
-    }
-#endif // JVMPI_SUPPORT
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
--- a/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)oopsHierarchy.hpp	1.30 07/05/05 17:06:08 JVM"
+#pragma ident "@(#)oopsHierarchy.hpp	1.31 07/05/17 15:57:10 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -65,10 +65,6 @@
 
 class Thread;
 typedef class   markOopDesc*		    markOop;
-#ifdef JVMPI_SUPPORT
-class _jobjectID;
-typedef struct _jobjectID * jobjectID;
-#endif // JVMPI_SUPPORT
 class PromotedObject;
 
 
@@ -135,10 +131,6 @@
   // from parNewGeneration and other things that want to get to the end of
   // an oop for stuff (like constMethodKlass.cpp, objArrayKlass.cpp)
   operator oop* () const              { return (oop *)obj(); }
-#ifdef JVMPI_SUPPORT
-  // from jvmpi.cpp
-  operator jobjectID()                { return (jobjectID)obj(); }
-#endif // JVMPI_SUPPORT
 };
 
 #define DEF_OOP(type)                                                      \
--- a/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)typeArrayKlass.cpp	1.123 07/05/05 17:06:08 JVM"
+#pragma ident "@(#)typeArrayKlass.cpp	1.124 07/05/17 15:57:12 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -162,17 +162,6 @@
     
       h_ak = objArrayKlassHandle(THREAD, h_this->higher_dimension());
       if (h_ak.is_null()) {
-#ifdef JVMPI_SUPPORT
-        // We grab locks above and the allocate_objArray_klass() code
-        // path needs to post OBJECT_ALLOC events for the newly
-        // allocated objects. We can't post events while holding locks
-        // so we store the information on the side until after we
-        // release the locks.
-        if (Universe::jvmpi_alloc_event_enabled()) {
-          jt->set_deferred_obj_alloc_events(
-            new (ResourceObj::C_HEAP) GrowableArray<DeferredObjAllocEvent *>(dimension + 1, true));
-        }
-#endif // JVMPI_SUPPORT
         klassOop oak = objArrayKlassKlass::cast(
           Universe::objArrayKlassKlassObj())->allocate_objArray_klass(
           dimension + 1, h_this, CHECK_NULL);
@@ -182,19 +171,6 @@
         assert(h_ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");    
       }       
     }
-
-#ifdef JVMPI_SUPPORT
-    GrowableArray<DeferredObjAllocEvent *>* deferred_list =
-      jt->deferred_obj_alloc_events();
-    if (deferred_list != NULL) {
-      if (deferred_list->length() > 0) {
-        Universe::jvmpi_post_deferred_obj_alloc_events(deferred_list);
-      }
-      jt->set_deferred_obj_alloc_events(NULL);
-      // Return memory used by the GrowableArray.
-      delete deferred_list;
-    }
-#endif // JVMPI_SUPPORT
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
--- a/hotspot/src/share/vm/opto/block.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/block.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)block.cpp	1.168 07/05/05 17:06:10 JVM"
+#pragma ident "@(#)block.cpp	1.169 07/05/17 15:57:15 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -90,6 +90,30 @@
 }
 
 //-----------------------------------------------------------------------------
+// Compute the size of first 'inst_cnt' instructions in this block.
+// Return the number of instructions left to compute if the block has 
+// less then 'inst_cnt' instructions.
+uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, 
+                                    PhaseRegAlloc* ra) {
+  uint last_inst = _nodes.size();
+  for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
+    uint inst_size = _nodes[j]->size(ra);
+    if( inst_size > 0 ) {
+      inst_cnt--;
+      uint sz = sum_size + inst_size;
+      if( sz <= (uint)OptoLoopAlignment ) {
+        // Compute size of instructions which fit into fetch buffer only
+        // since all inst_cnt instructions will not fit even if we align them.
+        sum_size = sz;
+      } else {
+        return 0;
+      }
+    }
+  }
+  return inst_cnt;
+}
+
+//-----------------------------------------------------------------------------
 uint Block::find_node( const Node *n ) const {
   for( uint i = 0; i < _nodes.size(); i++ ) {
     if( _nodes[i] == n )
@@ -153,8 +177,14 @@
   // Check for way-low freq
   if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
 
-  Block* guard = num_preds() >= 2 ? bbs[pred(1)->_idx] : NULL;
-  if (guard != NULL) {
+  const float epsilon = 0.05f;
+  const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
+  uint uncommon_preds = 0;
+  uint freq_preds = 0;
+  uint uncommon_for_freq_preds = 0;
+
+  for( uint i=1; i<num_preds(); i++ ) {
+    Block* guard = bbs[pred(i)->_idx];
     // Check to see if this block follows its guard 1 time out of 10000
     // or less. 
     //
@@ -165,12 +195,25 @@
     // Use an epsilon value of 5% to allow for variability in frequency 
     // predictions and floating point calculations. The net effect is
     // that guard_factor is set to 9500.
-    const float epsilon = 0.05f;
-    const float guard_factor = (1 / PROB_UNLIKELY_MAG(4)) * (1 - epsilon);
-    if (guard->_freq > _freq * guard_factor) {
-      return true;
+    //
+    // Ignore low-frequency blocks.
+    // The next check is (guard->_freq < 1.e-5 * 9500.).
+    if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
+      uncommon_preds++;
+    } else {
+      freq_preds++;
+      if( _freq < guard->_freq * guard_factor ) {
+        uncommon_for_freq_preds++;
+      }
     }
   }
+  if( num_preds() > 1 &&
+      // The block is uncommon if all preds are uncommon or
+      (uncommon_preds == (num_preds()-1) ||
+      // it is uncommon for all frequent preds.
+       uncommon_for_freq_preds == freq_preds) ) {
+    return true; 
+  }
   return false;
 }
 
@@ -264,7 +307,14 @@
 
 //=============================================================================
 //------------------------------PhaseCFG---------------------------------------
-PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) : Phase(CFG), _bbs(a), _root(r) {
+PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) : 
+  Phase(CFG), 
+  _bbs(a), 
+  _root(r) 
+#ifndef PRODUCT
+  , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
+#endif
+{
   ResourceMark rm;
   // I'll need a few machine-specific GotoNodes.  Make an Ideal GotoNode,
   // then Match it into a machine-specific Node.  Then clone the machine
@@ -510,6 +560,24 @@
   return true;
 }
 
+//------------------------------MoveEmptyToEnd---------------------------------
+// Move empty and uncommon blocks to the end.
+void PhaseCFG::MoveToEnd(Block *b, uint i) {
+  int e = b->is_Empty();
+  if (e != Block::not_empty) {
+    if (e == Block::empty_with_goto) {
+      // Remove the goto, but leave the block.
+      b->_nodes.pop();
+    }
+    // Mark this block as a connector block, which will cause it to be
+    // ignored in certain functions such as non_connector_successor().
+    b->set_connector();
+  }
+  // Move the empty block to the end, and don't recheck.
+  _blocks.remove(i);
+  _blocks.push(b);
+}
+
 //------------------------------RemoveEmpty------------------------------------
 // Remove empty basic blocks and useless branches.  
 void PhaseCFG::RemoveEmpty() {
@@ -531,13 +599,11 @@
 
     // Look for uncommon blocks and move to end.
     if( b->is_uncommon(_bbs) ) {
-      _blocks.remove(i);        // Move block to end
-      _blocks.push(b);
+      MoveToEnd(b, i);
       last--;                   // No longer check for being uncommon!
-     if( no_flip_branch(b) ) { // Fall-thru case must follow?
+      if( no_flip_branch(b) ) { // Fall-thru case must follow?
         b = _blocks[i];         // Find the fall-thru block
-        _blocks.remove(i);      // Move fall-thru case to end as well
-        _blocks.push(b);
+        MoveToEnd(b, i);
         last--;
       }
       i--;                      // backup block counter post-increment
@@ -545,24 +611,13 @@
   }
 
   // Remove empty blocks
-  uint full_blocks = 0;
   uint j1;
   last = _num_blocks;
   for( i=0; i < last; i++ ) {
     Block *b = _blocks[i];
     if (i > 0) {
-      int e = b->is_Empty();
-      if (e != Block::not_empty) {
-        if (e == Block::empty_with_goto) {
-          // Remove the goto, but leave the block.
-          b->_nodes.pop();
-        }
-        // Mark this block as a connector block, which will cause it to be ignored
-        // in certain functions such as non_connector_successor().
-        b->set_connector();
-        // Move the empty block to the end, and don't recheck.
-        _blocks.remove(i);
-        _blocks.push(b);
+      if (b->is_Empty() != Block::not_empty) {
+        MoveToEnd(b, i);
         last--;
         i--;
       }
--- a/hotspot/src/share/vm/opto/block.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/block.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)block.hpp	1.97 07/05/05 17:06:11 JVM"
+#pragma ident "@(#)block.hpp	1.98 07/05/17 15:57:17 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -163,6 +163,35 @@
   void    set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; }
   node_idx_t  raise_LCA_visited() const       { return _raise_LCA_visited; }
 
+  // Estimated size in bytes of first instructions in a loop. 
+  uint _first_inst_size;
+  uint first_inst_size() const     { return _first_inst_size; }
+  void set_first_inst_size(uint s) { _first_inst_size = s; }
+
+  // Compute the size of first instructions in this block.
+  uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra);
+
+  // Compute alignment padding if the block needs it.
+  // Align a loop if loop's padding is less or equal to padding limit
+  // or the size of first instructions in the loop > padding.
+  uint alignment_padding(int current_offset) {
+    int block_alignment = code_alignment();
+    int max_pad = block_alignment-relocInfo::addr_unit();
+    if( max_pad > 0 ) {
+      assert(is_power_of_2(max_pad+relocInfo::addr_unit()), "");
+      int current_alignment = current_offset & max_pad;
+      if( current_alignment != 0 ) {
+        uint padding = (block_alignment-current_alignment) & max_pad;
+        if( !head()->is_Loop() ||
+            padding <= (uint)MaxLoopPad ||
+            first_inst_size() > padding ) {
+          return padding;
+        }
+      }
+    }
+    return 0;
+  }
+
   // Connector blocks. Connector blocks are basic blocks devoid of 
   // instructions, but may have relevant non-instruction Nodes, such as
   // Phis or MergeMems. Such blocks are discovered and marked during the
@@ -187,6 +216,7 @@
       _fhrp_index(1), 
       _raise_LCA_mark(0),
       _raise_LCA_visited(0),
+      _first_inst_size(999999), 
       _connector(false) { 
     _nodes.push(headnode); 
   }
@@ -216,19 +246,19 @@
   void find_remove( const Node *n ); 
 
   // Schedule a call next in the block
-  uint sched_call(Matcher &m, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
+  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 
   // Perform basic-block local scheduling
-  Node *select(Node_List &worklist, Block_Array &bbs, int *ready_cnt, VectorSet &next_call, uint sched_slot, GrowableArray<uint> &node_latency);
+  Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot);
   void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
   void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
-  bool schedule_local(Matcher &m, Block_Array &bbs, int *ready_cnt, VectorSet &next_call, GrowableArray<uint> &node_latency);
+  bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
   // Cleanup if any code lands between a Call and his Catch
   void call_catch_cleanup(Block_Array &bbs);
   // Detect implicit-null-check opportunities.  Basically, find NULL checks 
   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   // I can generate a memory op if there is not one nearby.
-  void implicit_null_check(Block_Array &bbs, GrowableArray<uint> &latency, Node *pro, Node *val, int allowed_reasons);
+  void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
 
   // Return the empty status of a block
   enum { not_empty, empty_with_goto, completely_empty };
@@ -300,6 +330,13 @@
   Block_Array _bbs;             // Map Nodes to owning Basic Block
   Block *_broot;                // Basic block of root
   uint _rpo_ctr;
+  
+  // Per node latency estimation, valid only during GCM
+  GrowableArray<uint> _node_latency;
+
+#ifndef PRODUCT
+  bool _trace_opto_pipelining;  // tracing flag
+#endif
 
   // Build dominators
   void Dominators();
@@ -311,20 +348,34 @@
   // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
   void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
  
-  // Schedule Nodes early in their basic blocks.
-  bool schedule_early(VectorSet &visited, Node_List &roots, Block_Array &bbs);
+  // Compute the (backwards) latency of a node from the uses
+  void latency_from_uses(Node *n);
+
+  // Compute the (backwards) latency of a node from a single use
+  int latency_from_use(Node *n, const Node *def, Node *use);
+
+  // Compute the (backwards) latency of a node from the uses of this instruction
+  void partial_latency_of_defs(Node *n);
 
-  // Now schedule all codes as LATE as possible.  This is the LCA in the 
-  // dominator tree of all USES of a value.  Pick the block with the least
-  // loop nesting depth that is lowest in the dominator tree.
-  void schedule_late(VectorSet &visited, Node_List &stack, GrowableArray<uint> &latency);
+  // Schedule Nodes early in their basic blocks.
+  bool schedule_early(VectorSet &visited, Node_List &roots);
+
+  // For each node, find the latest block it can be scheduled into
+  // and then select the cheapest block between the latest and earliest
+  // block to place the node.
+  void schedule_late(VectorSet &visited, Node_List &stack);
+
+  // Pick a block between early and late that is a cheaper alternative
+  // to late. Helper for schedule_late.
+  Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
 
   // Compute the instruction global latency with a backwards walk
-  void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack, GrowableArray<uint> &latency);
+  void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack);
 
   // Remove empty basic blocks
   void RemoveEmpty();
   bool MoveToNext(Block* bx, uint b_index);
+  void MoveToEnd(Block* bx, uint b_index);
 
   // Check for NeverBranch at block end.  This needs to become a GOTO to the
   // true target.  NeverBranch are treated as a conditional branch that always
@@ -340,11 +391,15 @@
   } 
 
 #ifndef PRODUCT
+  bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
+
   // Debugging print of CFG
   void dump( ) const;           // CFG only
   void _dump_cfg( const Node *end, VectorSet &visited  ) const;
   void verify() const;
   void dump_headers();
+#else
+  bool trace_opto_pipelining() const { return false; }
 #endif
 };
 
--- a/hotspot/src/share/vm/opto/c2_globals.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/c2_globals.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)c2_globals.cpp	1.13 07/05/05 17:06:10 JVM"
+#pragma ident "@(#)c2_globals.cpp	1.14 07/05/17 15:57:19 JVM"
 #endif
 /*
  * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -28,4 +28,4 @@
 # include "incls/_precompiled.incl"
 # include "incls/_c2_globals.cpp.incl"
 
-C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)c2_globals.hpp	1.88 07/05/05 17:06:13 JVM"
+#pragma ident "@(#)c2_globals.hpp	1.89 07/05/17 15:57:21 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -29,7 +29,7 @@
 // Defines all globals flags used by the server compiler.
 //
 
-#define C2_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
+#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
                                                                             \
   notproduct(intx, CompileZapFirst, 0,                                      \
           "If +ZapDeadCompiledLocals, "                                     \
@@ -58,6 +58,12 @@
   product_pd(intx, OptoLoopAlignment,                                       \
           "Align inner loops to zero relative to this modulus")             \
                                                                             \
+  product(intx, MaxLoopPad, (OptoLoopAlignment-1),                          \
+          "Align a loop if padding size in bytes is less or equal to this value") \
+                                                                            \
+  product(intx, NumberOfLoopInstrToAlign, 4,                                \
+          "Number of first instructions in a loop to align")                \
+                                                                            \
   notproduct(intx, IndexSetWatch, 0,                                        \
           "Trace all operations on this IndexSet (-1 means all, 0 none)")   \
                                                                             \
@@ -205,7 +211,7 @@
   notproduct(bool, TraceLoopUnswitching, false,                             \
           "Trace loop unswitching")                                         \
                                                                             \
-  product(bool, UseSuperWord, false,                                        \
+  product(bool, UseSuperWord, true,                                         \
           "Transform scalar operations into superword operations")          \
                                                                             \
   develop(bool, SuperWordRTDepCheck, false,                                 \
@@ -236,6 +242,9 @@
   product(bool, UseOnlyInlinedBimorphic, true,                              \
           "Don't use BimorphicInlining if can't inline a second method")    \
                                                                             \
+  product(bool, InsertMemBarAfterArraycopy, true,                           \
+          "Insert memory barrier after arraycopy call")                     \
+                                                                            \
   /* controls for tier 1 compilations */                                    \
                                                                             \
   develop(bool, Tier1CountInvocations, true,                                \
@@ -322,6 +331,9 @@
   notproduct(bool, PrintLockStatistics, false,                              \
           "Print precise statistics on the dynamic lock usage")             \
                                                                             \
+  diagnostic(bool, PrintPreciseBiasedLockingStatistics, false,              \
+          "Print per-lock-site statistics of biased locking in JVM")        \
+                                                                            \
   notproduct(bool, PrintEliminateLocks, false,                              \
           "Print out when locks are eliminated")                            \
                                                                             \
@@ -334,5 +346,5 @@
   product(intx, MaxLabelRootDepth, 1100, 				    \
           "Maximum times call Label_Root to prevent stack overflow")        \
 
-C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
 
--- a/hotspot/src/share/vm/opto/callnode.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)callnode.hpp	1.191 07/05/05 17:06:12 JVM"
+#pragma ident "@(#)callnode.hpp	1.192 07/05/17 15:57:24 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -405,6 +405,7 @@
   const TypeFunc *_tf;        // Function type
   address      _entry_point;  // Address of method being called
   float        _cnt;          // Estimate of number of times called
+  PointsToNode::EscapeState _escape_state;
 
   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
     : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
@@ -414,6 +415,7 @@
   {
     init_class_id(Class_Call);
     init_flags(Flag_is_Call);
+    _escape_state = PointsToNode::UnknownEscape;
   }
 
   const TypeFunc* tf()        const { return _tf; }
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)cfgnode.cpp	1.259 07/05/05 17:06:09 JVM"
+#pragma ident "@(#)cfgnode.cpp	1.260 07/05/17 15:57:27 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1551,6 +1551,45 @@
     if (opt != NULL)  return opt;
   }
 
+  if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) {
+    // Try to undo Phi of AddP:
+    //   (Phi (AddP base base y) (AddP base2 base2 y))
+    // becomes:
+    //   newbase := (Phi base base2)
+    //   (AddP newbase newbase y)
+    //
+    // This occurs as a result of unsuccessful split_thru_phi and
+    // interferes with taking advantage of addressing modes.  See the
+    // clone_shift_expressions code in matcher.cpp
+    Node* addp = in(1);
+    const Type* type = addp->in(AddPNode::Base)->bottom_type();
+    Node* y = addp->in(AddPNode::Offset);
+    if (y != NULL && addp->in(AddPNode::Base) == addp->in(AddPNode::Address)) {
+      // make sure that all the inputs are similar to the first one,
+      // i.e. AddP with base == address and same offset as first AddP
+      bool doit = true;
+      for (uint i = 2; i < req(); i++) {
+        if (in(i) == NULL ||
+            in(i)->Opcode() != Op_AddP ||
+            in(i)->in(AddPNode::Base) != in(i)->in(AddPNode::Address) ||
+            in(i)->in(AddPNode::Offset) != y) {
+          doit = false;
+          break;
+        }
+        // Accumulate type for resulting Phi
+        type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
+      }
+      if (doit) {
+        Node* base = new (phase->C, in(0)->req()) PhiNode(in(0), type, NULL);
+        for (uint i = 1; i < req(); i++) {
+          base->init_req(i, in(i)->in(AddPNode::Base));
+        }
+        phase->is_IterGVN()->register_new_node_with_optimizer(base);
+        return new (phase->C, 4) AddPNode(base, base, y);
+      }
+    }
+  }
+
   // Split phis through memory merges, so that the memory merges will go away.
   // Piggy-back this transformation on the search for a unique input....
   // It will be as if the merged memory is the unique value of the phi.
--- a/hotspot/src/share/vm/opto/coalesce.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/coalesce.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)coalesce.cpp	1.194 07/05/05 17:06:15 JVM"
+#pragma ident "@(#)coalesce.cpp	1.195 07/05/17 17:43:24 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -451,7 +451,10 @@
           if( _phc.Find(m) != name ) {
             Node *copy;
             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
-            if( m->is_Mach() && m->as_Mach()->rematerialize() ) {
+            // At this point it is unsafe to extend live ranges (6550579).
+            // Rematerialize only constants as we do for Phi above.
+            if( m->is_Mach() && m->as_Mach()->is_Con() && 
+                m->as_Mach()->rematerialize() ) {
               copy = m->clone();
               // Insert the copy in the basic block, just before us
               b->_nodes.insert( l++, copy );
--- a/hotspot/src/share/vm/opto/compile.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)compile.cpp	1.630 07/05/05 17:06:14 JVM"
+#pragma ident "@(#)compile.cpp	1.631 07/05/17 15:57:33 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -110,6 +110,8 @@
       // Save it for next time:
       register_intrinsic(cg);
       return cg;
+    } else {
+      gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
     }
   }
   return NULL;
@@ -119,39 +121,99 @@
 // in library_call.cpp.
 
 
-ciMethod* Compile::get_Method_invoke() {
-  assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "Should not reach here otherwise");
+#ifndef PRODUCT
+// statistics gathering...
+
+juint  Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0};
+jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0};
 
-  if (_Method_invoke == NULL) {
-    ciInstanceKlass* _Method =
-        env()->find_system_klass(ciSymbol::make("java/lang/reflect/Method"))->as_instance_klass();
-    if (!_Method->is_loaded())  return NULL;
-    _Method_invoke =
-      _Method->find_method(get_invoke_name(),
-                           ciSymbol::make("(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"));
-    assert(_Method_invoke != NULL, "must be able to find");
+bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
+  assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
+  int oflags = _intrinsic_hist_flags[id];
+  assert(flags != 0, "what happened?");
+  if (is_virtual) {
+    flags |= _intrinsic_virtual;
+  }
+  bool changed = (flags != oflags);
+  if ((flags & _intrinsic_worked) != 0) {
+    juint count = (_intrinsic_hist_count[id] += 1);
+    if (count == 1) {
+      changed = true;           // first time
+    }
+    // increment the overall count also:
+    _intrinsic_hist_count[vmIntrinsics::_none] += 1;
   }
-  return _Method_invoke;
+  if (changed) {
+    if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
+      // Something changed about the intrinsic's virtuality.
+      if ((flags & _intrinsic_virtual) != 0) {
+        // This is the first use of this intrinsic as a virtual call.
+        if (oflags != 0) {
+          // We already saw it as a non-virtual, so note both cases.
+          flags |= _intrinsic_both;
+        }
+      } else if ((oflags & _intrinsic_both) == 0) {
+        // This is the first use of this intrinsic as a non-virtual
+        flags |= _intrinsic_both;
+      }
+    }
+    _intrinsic_hist_flags[id] = (jubyte) (oflags | flags);
+  }
+  // update the overall flags also:
+  _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags;
+  return changed;
 }
 
-
-ciSymbol* Compile::get_invoke_name() {
-  assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "Should not reach here otherwise");
-
-  if (_invoke_name == NULL) {
-    _invoke_name = ciSymbol::make("invoke");
-  }
-  return _invoke_name;
+static char* format_flags(int flags, char* buf) {
+  buf[0] = 0;
+  if ((flags & Compile::_intrinsic_worked) != 0)    strcat(buf, ",worked");
+  if ((flags & Compile::_intrinsic_failed) != 0)    strcat(buf, ",failed");
+  if ((flags & Compile::_intrinsic_disabled) != 0)  strcat(buf, ",disabled");
+  if ((flags & Compile::_intrinsic_virtual) != 0)   strcat(buf, ",virtual");
+  if ((flags & Compile::_intrinsic_both) != 0)      strcat(buf, ",nonvirtual");
+  if (buf[0] == 0)  strcat(buf, ",");
+  assert(buf[0] == ',', "must be");
+  return &buf[1];
 }
 
-ciInstanceKlass* Compile::get_MethodAccessorImpl() {
-  assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "Should not reach here otherwise");
+void Compile::print_intrinsic_statistics() {
+  char flagsbuf[100];
+  ttyLocker ttyl;
+  if (xtty != NULL)  xtty->head("statistics type='intrinsic'");
+  tty->print_cr("Compiler intrinsic usage:");
+  juint total = _intrinsic_hist_count[vmIntrinsics::_none];
+  if (total == 0)  total = 1;  // avoid div0 in case of no successes
+  #define PRINT_STAT_LINE(name, c, f) \
+    tty->print_cr("  %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
+  for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) {
+    vmIntrinsics::ID id = (vmIntrinsics::ID) index;
+    int   flags = _intrinsic_hist_flags[id];
+    juint count = _intrinsic_hist_count[id];
+    if ((flags | count) != 0) {
+      PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
+    }
+  }
+  PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
+  if (xtty != NULL)  xtty->tail("statistics");
+}
 
-  if (_MethodAccessorImpl == NULL) {
-    _MethodAccessorImpl = env()->find_system_klass(ciSymbol::make("sun/reflect/MethodAccessorImpl"))->as_instance_klass();
+void Compile::print_statistics() {
+  { ttyLocker ttyl;
+    if (xtty != NULL)  xtty->head("statistics type='opto'");
+    Parse::print_statistics();
+    PhaseCCP::print_statistics();
+    PhaseRegAlloc::print_statistics();
+    Scheduling::print_statistics();
+    PhasePeephole::print_statistics();
+    PhaseIdealLoop::print_statistics();
+    if (xtty != NULL)  xtty->tail("statistics");
   }
-  return _MethodAccessorImpl;
+  if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) {
+    // put this under its own <statistics> element.
+    print_intrinsic_statistics();
+  }
 }
+#endif //PRODUCT
 
 // Support for bundling info
 Bundle* Compile::node_bundling(const Node *n) {
@@ -504,6 +566,7 @@
 
   // Perform escape analysis
   if (_congraph != NULL) {
+    NOT_PRODUCT( TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, TimeCompiler); )
     _congraph->compute_escape();
 #ifndef PRODUCT
     if (PrintEscapeAnalysis) {
@@ -689,14 +752,6 @@
 
   set_24_bit_selection_and_mode(Use24BitFP, false);
 
-#ifdef JVMPI_SUPPORT
-  // Capture status of JVMPI once for the compilation
-  _need_jvmpi_compiled_method_event = jvmpi::is_event_enabled(JVMPI_EVENT_COMPILED_METHOD_LOAD);
-  _need_jvmpi_method_entry_event    = jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY);
-  _need_jvmpi_method_entry2_event   = jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY2);
-  _need_jvmpi_method_exit_event     = jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT);
-#endif // JVMPI_SUPPORT
-
   _node_note_array = NULL;
   _default_node_notes = NULL;
 
@@ -777,9 +832,6 @@
   // A NULL adr_type hits in the cache right away.  Preload the right answer.
   probe_alias_cache(NULL)->_index = AliasIdxTop;
 
-  _Method_invoke = NULL;
-  _invoke_name = NULL;
-  _MethodAccessorImpl = NULL;
   _intrinsics = NULL;
   _macro_nodes = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   register_library_intrinsics();
--- a/hotspot/src/share/vm/opto/compile.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/compile.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)compile.hpp	1.229 07/05/05 17:06:15 JVM"
+#pragma ident "@(#)compile.hpp	1.230 07/05/17 15:57:38 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -171,21 +171,10 @@
   ciEnv*                _env;                   // CI interface
   CompileLog*           _log;                   // from CompilerThread
   const char*           _failure_reason;        // for record_failure/failing pattern
-  ciMethod*             _Method_invoke;         // Method.invoke(Object, Object[])
-  ciSymbol*             _invoke_name;           // "invoke"
-  ciInstanceKlass*      _MethodAccessorImpl;    // sun.reflect.MethodAccessorImpl
   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
   ConnectionGraph*      _congraph;
 
-#ifdef JVMPI_SUPPORT
-  //                Capture JVMPI status once for the compilation.
-  bool                  _need_jvmpi_compiled_method_event;
-  bool                  _need_jvmpi_method_entry_event;
-  bool                  _need_jvmpi_method_entry2_event;
-  bool                  _need_jvmpi_method_exit_event;
-#endif // JVMPI_SUPPORT
-
   // Node management
   uint                  _unique;                // Counter for unique Node indices
   debug_only(static int _debug_idx;)            // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
@@ -349,9 +338,6 @@
   bool              failing() const             { return _env->failing() || _failure_reason != NULL; }
   const char* failure_reason() { return _failure_reason; }
   bool              failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
-  ciMethod*         get_Method_invoke();
-  ciSymbol*         get_invoke_name();
-  ciInstanceKlass*  get_MethodAccessorImpl();
 
   void record_failure(const char* reason);
   void record_method_not_compilable(const char* reason, bool all_tiers = false) { 
@@ -373,18 +359,6 @@
     }
   }
 
-#ifdef JVMPI_SUPPORT
-  bool need_jvmpi_compiled_method_event() const { return _need_jvmpi_compiled_method_event; }
-  bool need_jvmpi_method_entry_event() const    { return _need_jvmpi_method_entry_event;    }
-  bool need_jvmpi_method_entry2_event() const   { return _need_jvmpi_method_entry2_event;   }
-  bool need_jvmpi_method_exit_event() const     { return _need_jvmpi_method_exit_event;     }
-  bool need_jvmpi_method_event() const {
-    return need_jvmpi_method_entry_event()
-        || need_jvmpi_method_entry2_event()
-        || need_jvmpi_method_exit_event();
-  }
-#endif // JVMPI_SUPPORT
-
   // Node management
   uint              unique() const              { return _unique; }
   uint         next_unique()                    { return _unique++; }
@@ -616,6 +590,10 @@
   // Determine which variable sized branches can be shortened
   void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size);
 
+  // Compute the size of first NumberOfLoopInstrToAlign instructions 
+  // at the head of a loop.
+  void compute_loop_first_inst_sizes();
+
   // Compute the information for the exception tables
   void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels);
 
@@ -670,7 +648,26 @@
   CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual);             // query fn
   void           register_intrinsic(CallGenerator* cg);                    // update fn
 
+#ifndef PRODUCT
+  static juint  _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
+  static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
+#endif
+
  public:
+
+  // Note:  Histogram array size is about 1 Kb.
+  enum {                        // flag bits:
+    _intrinsic_worked = 1,      // succeeded at least once
+    _intrinsic_failed = 2,      // tried it but it failed
+    _intrinsic_disabled = 4,    // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
+    _intrinsic_virtual = 8,     // was seen in the virtual form (rare)
+    _intrinsic_both = 16        // was seen in the non-virtual form (usual)
+  };
+  // Update histogram.  Return boolean if this is a first-time occurrence.
+  static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
+                                          bool is_virtual, int flags) PRODUCT_RETURN0;
+  static void print_intrinsic_statistics() PRODUCT_RETURN;
+
   // Graph verification code
   // Walk the node list, verifying that there is a one-to-one
   // correspondence between Use-Def edges and Def-Use edges
@@ -681,6 +678,9 @@
   // Print bytecodes, including the scope inlining tree
   void print_codes();
 
+  // End-of-run dumps.
+  static void print_statistics() PRODUCT_RETURN;
+
   // Dump formatted assembly
   void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN;
   void dump_pc(int *pcs, int pc_limit, Node *n);
--- a/hotspot/src/share/vm/opto/connode.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/connode.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)connode.cpp	1.216 07/05/05 17:06:10 JVM"
+#pragma ident "@(#)connode.cpp	1.217 07/05/17 15:57:42 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -495,24 +495,6 @@
         : in_type;
     } else {
       result =  my_type->cast_to_ptr_type( my_type->join_ptr(in_ptr) );
-      // If we are casting an unescaped object see if we can cast the type to a unique instance.
-
-      const TypeOopPtr *res_oop_type = result->isa_oopptr();
-      ConnectionGraph *cgr = Compile::current()->congraph();
-    
-      if (cgr != NULL && res_oop_type != NULL && !res_oop_type->is_instance()) {
-        Node *in1 = in(1);
-        Node *alloc = in1->is_Proj() ? in1->in(0) : NULL;
-        if (alloc != NULL && cgr->escape_state(in1, phase) == PointsToNode::NoEscape && !cgr->hidden_alias(in1)) {
-          bool found_alias = false;
-          for (DUIterator_Fast imax, i = fast_outs(imax); !found_alias && i < imax; i++) {
-            Node *use = fast_out(i);
-            found_alias = found_alias || can_cause_alias(use, phase);
-          }
-          if (!found_alias) 
-            result = res_oop_type->cast_to_instance(alloc->_idx);
-        }
-      }
     }
   }
   return result;
--- a/hotspot/src/share/vm/opto/doCall.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)doCall.cpp	1.205 07/05/05 17:06:17 JVM"
+#pragma ident "@(#)doCall.cpp	1.206 07/05/17 15:57:45 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -49,13 +49,8 @@
 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) {
   CallGenerator* cg;
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI/Dtrace currently doesn't work unless all calls are vanilla
-  if (need_jvmpi_method_event() || DTraceMethodProbes) {
-#else // !JVMPI_SUPPORT
   // Dtrace currently doesn't work unless all calls are vanilla
   if (DTraceMethodProbes) {
-#endif // JVMPI_SUPPORT
     allow_inline = false;
   }
 
@@ -709,8 +704,11 @@
   if (can_rerun_bytecode()) {
     // Do not push_ex_oop here!
     // Re-executing the bytecode will reproduce the throwing condition.
+    bool must_throw = true;
     uncommon_trap(Deoptimization::Reason_unhandled,
-                  Deoptimization::Action_none);
+                  Deoptimization::Action_none,
+                  (ciKlass*)NULL, (const char*)NULL, // default args
+                  must_throw);
     return;
   }
 
--- a/hotspot/src/share/vm/opto/escape.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/escape.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)escape.cpp	1.9 07/05/05 17:06:14 JVM"
+#pragma ident "@(#)escape.cpp	1.10 07/05/17 15:58:23 JVM"
 #endif
 /*
  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -90,7 +90,7 @@
 }
 #endif
 
-ConnectionGraph::ConnectionGraph(Compile * C) : _processed(C->comp_arena()) {
+ConnectionGraph::ConnectionGraph(Compile * C) : _processed(C->comp_arena()), _node_map(C->comp_arena()) {
   _collecting = true;
   this->_compile = C;
   const PointsToNode &dummy = PointsToNode();
@@ -196,6 +196,12 @@
     ptset.set(n->_idx);
     return;
   }
+  // we may have a Phi which has not been processed
+  if (npt._node == NULL) {
+    assert(n->is_Phi(), "unprocessed node must be a Phi");
+    record_for_escape_analysis(n);
+    npt = _nodes->at(n->_idx);
+  }
   worklist.push(n->_idx);
   while(worklist.length() > 0) {
     int ni = worklist.pop();
@@ -307,8 +313,489 @@
   }
 }
 
+//
+// Search memory chain of "mem" to find a MemNode whose address
+// is the specified alias index.  Returns the MemNode found or the
+// first non-MemNode encountered.
+// 
+Node *ConnectionGraph::find_mem(Node *mem, int alias_idx, PhaseGVN  *igvn) {
+  if (mem == NULL)
+    return mem;
+  while (mem->is_Mem()) {
+    const Type *at = igvn->type(mem->in(MemNode::Address));
+    if (at != Type::TOP) {
+      assert (at->isa_ptr() != NULL, "pointer type required.");
+      int idx = _compile->get_alias_index(at->is_ptr());
+      if (idx == alias_idx)
+        break;
+    }
+    mem = mem->in(MemNode::Memory);
+  }
+  return mem;
+}
+
+//
+// Adjust the type and inputs of an AddP which computes the
+// address of a field of an instance
+//
+void ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
+  const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
+  const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
+  assert(t != NULL,  "expecting oopptr");
+  assert(base_t != NULL && base_t->is_instance(), "expecting instance oopptr");
+  uint inst_id =  base_t->instance_id();
+  assert(!t->is_instance() || t->instance_id() == inst_id,
+                             "old type must be non-instance or match new type");
+  const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
+  // ensure an alias index is allocated for the instance type
+  int alias_idx = _compile->get_alias_index(tinst);
+  igvn->set_type(addp, tinst);
+  // record the allocation in the node map
+  set_map(addp->_idx, get_map(base->_idx));
+  // if the Address input is not the appropriate instance type (due to intervening
+  // casts,) insert a cast
+  Node *adr = addp->in(AddPNode::Address);
+  const TypeOopPtr  *atype = igvn->type(adr)->isa_oopptr();
+  if (atype->instance_id() != inst_id) {
+    assert(!atype->is_instance(), "no conflicting instances");
+    const TypeOopPtr *new_atype = base_t->add_offset(atype->offset())->isa_oopptr();
+    Node *acast = new (_compile, 2) CastPPNode(adr, new_atype);
+    acast->set_req(0, adr->in(0));
+    igvn->set_type(acast, new_atype);
+    record_for_optimizer(acast);
+    Node *bcast = acast;
+    Node *abase = addp->in(AddPNode::Base);
+    if (abase != adr) {
+      bcast = new (_compile, 2) CastPPNode(abase, base_t);
+      bcast->set_req(0, abase->in(0));
+      igvn->set_type(bcast, base_t);
+      record_for_optimizer(bcast);
+    }
+    igvn->hash_delete(addp);
+    addp->set_req(AddPNode::Base, bcast);
+    addp->set_req(AddPNode::Address, acast);
+    igvn->hash_insert(addp);
+    record_for_optimizer(addp);
+  }
+}
+
+//
+// Create a new version of orig_phi if necessary. Returns either the newly
+// created phi or an existing phi.  Sets create_new to indicate wheter  a new
+// phi was created.  Cache the last newly created phi in the node map.
+//
+PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
+  Compile *C = _compile;
+  new_created = false;
+  int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
+  // nothing to do if orig_phi is bottom memory or matches alias_idx
+  if (phi_alias_idx == Compile::AliasIdxBot || phi_alias_idx == alias_idx) {
+    return orig_phi;
+  }
+  // have we already created a Phi for this alias index?
+  PhiNode *result = get_map_phi(orig_phi->_idx);
+  const TypePtr *atype = C->get_adr_type(alias_idx);
+  if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
+    return result;
+  }
+
+  orig_phi_worklist.append_if_missing(orig_phi);
+  result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
+  set_map_phi(orig_phi->_idx, result);
+  igvn->set_type(result, result->bottom_type());
+  record_for_optimizer(result);
+  new_created = true;
+  return result;
+}
+
+//
+// Return a new version  of Memory Phi "orig_phi" with the inputs having the
+// specified alias index.
+//
+PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
+
+  assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
+  Compile *C = _compile;
+  bool new_phi_created;
+  PhiNode *result =  create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
+  if (!new_phi_created) {
+    return result;
+  }
+
+  GrowableArray<PhiNode *>  phi_list;
+  GrowableArray<uint>  cur_input;
+
+  PhiNode *phi = orig_phi;
+  uint idx = 1;
+  bool finished = false;
+  while(!finished) {
+    while (idx < phi->req()) {
+      Node *mem = find_mem(phi->in(idx), alias_idx, igvn);
+      if (mem != NULL && mem->is_Phi()) {
+        PhiNode *nphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
+        if (new_phi_created) {
+          // found an phi for which we created a new split, push current one on worklist and begin
+          // processing new one
+          phi_list.push(phi);
+          cur_input.push(idx);
+          phi = mem->as_Phi();
+          result = nphi;
+          idx = 1;
+          continue;
+        } else {
+          mem = nphi;
+        }
+      }
+      result->set_req(idx++, mem);
+    }
+#ifdef ASSERT
+    // verify that the new Phi has an input for each input of the original
+    assert( phi->req() == result->req(), "must have same number of inputs.");
+    assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
+    for (uint i = 1; i < phi->req(); i++) {
+      assert((phi->in(i) == NULL) == (result->in(i) == NULL), "inputs must correspond.");
+    }
+#endif
+    // we have finished processing a Phi, see if there are any more to do
+    finished = (phi_list.length() == 0 );
+    if (!finished) {
+      phi = phi_list.pop();
+      idx = cur_input.pop();
+      PhiNode *prev_phi = get_map_phi(phi->_idx);
+      prev_phi->set_req(idx++, result);
+      result = prev_phi;
+    }
+  }
+  return result;
+}
+
+//
+//  Convert the types of unescaped object to instance types where possible,
+//  propagate the new type information through the graph, and update memory
+//  edges and MergeMem inputs to reflect the new type.  
+//
+//  We start with allocations (and calls which may be allocations)  on alloc_worklist.
+//  The processing is done in 4 phases:
+//
+//  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
+//            types for the CheckCastPP for allocations where possible.
+//            Propagate the the new types through users as follows:
+//               casts and Phi:  push users on alloc_worklist
+//               AddP:  cast Base and Address inputs to the instance type
+//                      push any AddP users on alloc_worklist and push any memnode
+//                      users onto memnode_worklist.
+//  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
+//            search the Memory chain for a store with the appropriate type
+//            address type.  If a Phi is found, create a new version with
+//            the approriate memory slices from each of the Phi inputs.
+//            For stores, process the users as follows:
+//               MemNode:  push on memnode_worklist
+//               MergeMem: push on mergemem_worklist
+//  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
+//            moving the first node encountered of each  instance type to the
+//            the input corresponding to its alias index.
+//            appropriate memory slice.
+//  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
+//
+// In the following example, the CheckCastPP nodes are the cast of allocation
+// results and the allocation of node 29 is unescaped and eligible to be an
+// instance type.
+//
+// We start with:
+//
+//     7 Parm #memory
+//    10  ConI  "12"
+//    19  CheckCastPP   "Foo"
+//    20  AddP  _ 19 19 10  Foo+12  alias_index=4
+//    29  CheckCastPP   "Foo"
+//    30  AddP  _ 29 29 10  Foo+12  alias_index=4
+//
+//    40  StoreP  25   7  20   ... alias_index=4
+//    50  StoreP  35  40  30   ... alias_index=4
+//    60  StoreP  45  50  20   ... alias_index=4
+//    70  LoadP    _  60  30   ... alias_index=4
+//    80  Phi     75  50  60   Memory alias_index=4
+//    90  LoadP    _  80  30   ... alias_index=4
+//   100  LoadP    _  80  20   ... alias_index=4
+//
+//
+// Phase 1 creates an instance type for node 29 assigning it an instance id of 24
+// and creating a new alias index for node 30.  This gives:
+//
+//     7 Parm #memory
+//    10  ConI  "12"
+//    19  CheckCastPP   "Foo"
+//    20  AddP  _ 19 19 10  Foo+12  alias_index=4
+//    29  CheckCastPP   "Foo"  iid=24
+//    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
+//
+//    40  StoreP  25   7  20   ... alias_index=4
+//    50  StoreP  35  40  30   ... alias_index=6
+//    60  StoreP  45  50  20   ... alias_index=4
+//    70  LoadP    _  60  30   ... alias_index=6
+//    80  Phi     75  50  60   Memory alias_index=4
+//    90  LoadP    _  80  30   ... alias_index=6
+//   100  LoadP    _  80  20   ... alias_index=4
+//
+// In phase 2, new memory inputs are computed for the loads and stores,
+// And a new version of the phi is created.  In phase 4, the inputs to
+// node 80 are updated and then the memory nodes are updated with the
+// values computed in phase 2.  This results in:
+//
+//     7 Parm #memory
+//    10  ConI  "12"
+//    19  CheckCastPP   "Foo"
+//    20  AddP  _ 19 19 10  Foo+12  alias_index=4
+//    29  CheckCastPP   "Foo"  iid=24
+//    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
+//
+//    40  StoreP  25  7   20   ... alias_index=4
+//    50  StoreP  35  7   30   ... alias_index=6
+//    60  StoreP  45  40  20   ... alias_index=4
+//    70  LoadP    _  50  30   ... alias_index=6
+//    80  Phi     75  40  60   Memory alias_index=4
+//   120  Phi     75  50  50   Memory alias_index=6
+//    90  LoadP    _ 120  30   ... alias_index=6
+//   100  LoadP    _  80  20   ... alias_index=4
+//
+void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
+  GrowableArray<Node *>  memnode_worklist;
+  GrowableArray<Node *>  mergemem_worklist;
+  GrowableArray<PhiNode *>  orig_phis;
+  PhaseGVN  *igvn = _compile->initial_gvn();
+  uint new_index_start = (uint) _compile->num_alias_types();
+  VectorSet visited(Thread::current()->resource_area());
+  VectorSet ptset(Thread::current()->resource_area());
+
+  //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
+  //            types for the CheckCastPP for allocations where possible.
+  while (alloc_worklist.length() != 0) {
+    Node *n = alloc_worklist.pop();
+    uint ni = n->_idx;
+    if (n->is_Call()) {
+      CallNode *alloc = n->as_Call();
+      // copy escape information to call node
+      PointsToNode ptn = _nodes->at(alloc->_idx);
+      PointsToNode::EscapeState es = escape_state(alloc, igvn);
+      alloc->_escape_state = es;
+      // find CheckCastPP of call return value
+      n = alloc->proj_out(TypeFunc::Parms);
+      if (n != NULL && n->outcnt() == 1) {
+        n = n->unique_out();
+        if (n->Opcode() != Op_CheckCastPP) {
+          continue;
+        }
+      } else {
+        continue;
+      }
+      // we have an allocation or call which returns a Java object, see if it is unescaped
+      if (es != PointsToNode::NoEscape || !ptn._unique_type) {
+        continue; //  can't make a unique type
+      }
+      set_map(alloc->_idx, n);
+      set_map(n->_idx, alloc);
+      const TypeInstPtr *t = igvn->type(n)->isa_instptr();
+      // Unique types which are arrays are not currently supported.
+      // The check for AllocateArray is needed in case an array
+      // allocation is immediately cast to Object
+      if (t == NULL || alloc->is_AllocateArray())
+        continue;  // not a TypeInstPtr
+      const TypeOopPtr *tinst = t->cast_to_instance(ni);
+      igvn->hash_delete(n);
+      igvn->set_type(n,  tinst);
+      n->raise_bottom_type(tinst);
+      igvn->hash_insert(n);
+    } else if (n->is_AddP()) {
+      ptset.Clear();
+      PointsTo(ptset, n->in(AddPNode::Address), igvn);
+      assert(ptset.Size() == 1, "AddP address is unique");
+      Node *base = get_map(ptset.getelem());
+      split_AddP(n, base, igvn);
+    } else if (n->is_Phi() || n->Opcode() == Op_CastPP || n->Opcode() == Op_CheckCastPP) {
+      if (visited.test_set(n->_idx)) {
+        assert(n->is_Phi(), "loops only through Phi's");
+        continue;  // already processed
+      }
+      ptset.Clear();
+      PointsTo(ptset, n, igvn);
+      if (ptset.Size() == 1) {
+        TypeNode *tn = n->as_Type();
+        Node *val = get_map(ptset.getelem());
+        const TypeInstPtr *val_t = igvn->type(val)->isa_instptr();;
+        assert(val_t != NULL && val_t->is_instance(), "instance type expected.");
+        const TypeInstPtr *tn_t = igvn->type(tn)->isa_instptr();;
+
+        if (tn_t != NULL && val_t->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE)->higher_equal(tn_t)) {
+          igvn->hash_delete(tn);
+          igvn->set_type(tn, val_t);
+          tn->set_type(val_t);
+          igvn->hash_insert(tn);
+        }
+      }
+    } else {
+      continue;
+    }
+    // push users on appropriate worklist
+    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+      Node *use = n->fast_out(i);
+      if(use->is_Mem() && use->in(MemNode::Address) == n) {
+        memnode_worklist.push(use);
+      } else if (use->is_AddP() || use->is_Phi() || use->Opcode() == Op_CastPP || use->Opcode() == Op_CheckCastPP) {
+        alloc_worklist.push(use);
+      }
+    }
+
+  }
+  uint new_index_end = (uint) _compile->num_alias_types();
+
+  //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
+  //            compute new values for Memory inputs  (the Memory inputs are not
+  //            actually updated until phase 4.)
+  if (memnode_worklist.length() == 0)
+    return;  // nothing to do
+
+
+  while (memnode_worklist.length() != 0) {
+    Node *n = memnode_worklist.pop();
+    if (n->is_Phi()) {
+      assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required");
+      // we don't need to do anything, but the users must be pushed if we haven't processed
+      // this Phi before
+      if (visited.test_set(n->_idx))
+        continue;
+    } else {
+      assert(n->is_Mem(), "memory node required.");
+      Node *addr = n->in(MemNode::Address);
+      const Type *addr_t = igvn->type(addr);
+      if (addr_t == Type::TOP)
+        continue;
+      assert (addr_t->isa_ptr() != NULL, "pointer type required.");
+      int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
+      Node *mem = find_mem(n->in(MemNode::Memory), alias_idx, igvn);
+      if (mem->is_Phi()) {
+        mem = split_memory_phi(mem->as_Phi(), alias_idx, orig_phis, igvn);
+      }
+      if (mem != n->in(MemNode::Memory))
+        set_map(n->_idx, mem);
+      if (n->is_Load()) {
+        continue;  // don't push users
+      } else if (n->is_LoadStore()) {
+        // get the memory projection
+        for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+          Node *use = n->fast_out(i);
+          if (use->Opcode() == Op_SCMemProj) {
+            n = use;
+            break;
+          }
+        }
+        assert(n->Opcode() == Op_SCMemProj, "memory projection required");
+      }
+    }
+    // push user on appropriate worklist
+    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+      Node *use = n->fast_out(i);
+      if (use->is_Phi()) {
+        memnode_worklist.push(use);
+      } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
+        memnode_worklist.push(use);
+      } else if (use->is_MergeMem()) {
+        mergemem_worklist.push(use);
+      }
+    }
+  }
+
+  //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
+  //            moving the first node encountered of each  instance type to the
+  //            the input corresponding to its alias index.
+  while (mergemem_worklist.length() != 0) {
+    Node *n = mergemem_worklist.pop();
+    assert(n->is_MergeMem(), "MergeMem node required.");
+    MergeMemNode *nmm = n->as_MergeMem();
+    // Note: we don't want to use MergeMemStream here because we only want to
+    //       scan inputs which exist at the start, not ones we add during processing
+    uint nslices = nmm->req();
+    igvn->hash_delete(nmm);
+    for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
+      Node * mem = nmm->in(i);
+      Node * cur = NULL;
+      if (mem == NULL || mem->is_top())
+        continue;
+      while (mem->is_Mem()) {
+        const Type *at = igvn->type(mem->in(MemNode::Address));
+        if (at != Type::TOP) {
+          assert (at->isa_ptr() != NULL, "pointer type required.");
+          uint idx = (uint)_compile->get_alias_index(at->is_ptr());
+          if (idx == i) {
+            if (cur == NULL)
+              cur = mem;
+          } else {
+            if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
+              nmm->set_memory_at(idx, mem);
+            }
+          }
+        }
+        mem = mem->in(MemNode::Memory);
+      }
+      nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
+      if (mem->is_Phi()) {
+        // We have encountered a Phi, we need to split the Phi for
+        // any  instance of the current type if we haven't encountered
+        //  a value of the instance along the chain.
+        for (uint ni = new_index_start; ni < new_index_end; ni++) {
+          if((uint)_compile->get_general_index(ni) == i) {
+            Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
+            if (nmm->is_empty_memory(m)) {
+              nmm->set_memory_at(ni, split_memory_phi(mem->as_Phi(), ni, orig_phis, igvn));
+            }
+          }
+        }
+      }
+    }
+    igvn->hash_insert(nmm);
+    record_for_optimizer(nmm);
+  }
+
+  //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes
+  //
+  // First update the inputs of any non-instance Phi's from
+  // which we split out an instance Phi.  Note we don't have
+  // to recursively process Phi's encounted on the input memory
+  // chains as is done in split_memory_phi() since they  will
+  // also be processed here.
+  while (orig_phis.length() != 0) {
+    PhiNode *phi = orig_phis.pop();
+    int alias_idx = _compile->get_alias_index(phi->adr_type());
+    igvn->hash_delete(phi);
+    for (uint i = 1; i < phi->req(); i++) {
+      Node *mem = phi->in(i);
+      Node *new_mem = find_mem(mem, alias_idx, igvn);
+      if (mem != new_mem) {
+        phi->set_req(i, new_mem);
+      }
+    }
+    igvn->hash_insert(phi);
+    record_for_optimizer(phi);
+  }
+
+  // Update the memory inputs of MemNodes with the value we computed
+  // in Phase 2.
+  for (int i = 0; i < _nodes->length(); i++) {
+    Node *nmem = get_map(i);
+    if (nmem != NULL) {
+      Node *n = _nodes->at(i)._node;
+      if (n != NULL && n->is_Mem()) {
+        igvn->hash_delete(n);
+        n->set_req(MemNode::Memory, nmem);
+        igvn->hash_insert(n);
+        record_for_optimizer(n);
+      }
+    }
+  }
+}
+
 void ConnectionGraph::compute_escape() {
-  GrowableArray<uint>  worklist;
+  GrowableArray<int>  worklist;
+  GrowableArray<Node *>  alloc_worklist;
   VectorSet visited(Thread::current()->resource_area());
   PhaseGVN  *igvn = _compile->initial_gvn();
 
@@ -320,13 +807,39 @@
     process_phi_escape(phi, igvn);
   }
 
-  // remove deferred edges from the graph
+  VectorSet ptset(Thread::current()->resource_area());
+
+  // remove deferred edges from the graph and collect
+  // information we will need for type splitting
   for (uint ni = 0; ni < (uint)_nodes->length(); ni++) {
     PointsToNode * ptn = _nodes->adr_at(ni);
     PointsToNode::NodeType nt = ptn->node_type();
 
+    if (nt == PointsToNode::UnknownType) {
+      continue;  // not a node we are interested in
+    }
+    Node *n = ptn->_node;
     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
       remove_deferred(ni);
+      if (n->is_AddP()) {
+        // if this AddP computes an address which may point to more that one
+        // object, nothing the address points to can be a unique type.
+        Node *base = n->in(AddPNode::Base);
+        ptset.Clear();
+        PointsTo(ptset, base, igvn);
+        if (ptset.Size() > 1) {
+          for( VectorSetI j(&ptset); j.test(); ++j ) {
+            PointsToNode *ptaddr = _nodes->adr_at(j.elem);
+            ptaddr->_unique_type = false;
+          }
+        }
+      }
+    } else if (n->is_Call()) {
+        // initialize _escape_state of calls to GlobalEscape
+        n->as_Call()->_escape_state = PointsToNode::GlobalEscape;
+        // push call on alloc_worlist (alocations are calls)
+        // for processing by split_unique_types()
+        alloc_worklist.push(n);
     }
   }
   // push all GlobalEscape nodes on the worklist
@@ -367,6 +880,10 @@
     }
   }
   _collecting = false;
+
+  // Now use the escape information to create unique types for
+  // unescaped objects
+  split_unique_types(alloc_worklist);
 }
 
 Node * ConnectionGraph::skip_casts(Node *n) {
@@ -502,9 +1019,13 @@
   switch (call->Opcode()) {
     case Op_Allocate:
     {
-      TypeNode *k = call->in(AllocateNode::KlassNode)->as_Type();
-      assert(k != NULL, "TypeNode required.");
-      const TypeKlassPtr *kt = k->type()->isa_klassptr();
+      Node *k = call->in(AllocateNode::KlassNode);
+      const TypeKlassPtr *kt;
+      if (k->Opcode() == Op_LoadKlass) {
+        kt = k->as_Load()->type()->isa_klassptr();
+      } else {
+        kt = k->as_Type()->type()->isa_klassptr();
+      }
       assert(kt != NULL, "TypeKlassPtr  required.");
       ciKlass* cik = kt->klass();
       ciInstanceKlass* ciik = cik->as_instance_klass();
@@ -776,6 +1297,18 @@
 
       break;
     }
+    case Op_CastPP:
+    case Op_CheckCastPP:
+    {
+      ptadr->set_node_type(PointsToNode::LocalVar);
+      int ti = n->in(1)->_idx;
+      if (_nodes->at(ti).node_type() == PointsToNode::JavaObject) {
+        add_pointsto_edge(n->_idx, ti);
+      } else {
+        add_deferred_edge(n->_idx, ti);
+      }
+      break;
+    }
     default:
       ;
       // nothing to do
--- a/hotspot/src/share/vm/opto/escape.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/escape.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)escape.hpp	1.8 07/05/05 17:06:17 JVM"
+#pragma ident "@(#)escape.hpp	1.9 07/05/17 15:58:25 JVM"
 #endif
 /*
  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -69,8 +69,9 @@
 //     Phi    (pointer values)
 //     LoadP
 //     Proj  (value returned from callnodes including allocations)
+//     CheckCastPP
 //
-// The LoadP and Proj behave like variables assigned to only once.  Only
+// The LoadP, Proj and CheckCastPP behave like variables assigned to only once.  Only
 // a Phi can have multiple assignments.  Each input to a Phi is treated
 // as an assignment to it.
 //
@@ -105,9 +106,11 @@
 class  PhiNode;
 class  PhaseTransform;
 class  Type;
+class  TypePtr;
 class  VectorSet;
 
 class PointsToNode {
+friend class ConnectionGraph;
 public:
   typedef enum {
     UnknownType    = 0,
@@ -143,6 +146,7 @@
   GrowableArray<uint>* _edges;  // outgoing edges
   int                  _offset; // for fields
 
+  bool       _unique_type;       // For allocated objects, this node may be a unique type
 public:
   Node*      _node;              // Ideal node corresponding to this PointsTo node
   int        _inputs_processed;  // the number of Phi inputs that have been processed so far
@@ -150,7 +154,7 @@
                                  // creating a hidden alias
 
 
-  PointsToNode(): _offset(-1), _type(UnknownType), _escape(UnknownEscape), _edges(NULL), _node(NULL), _inputs_processed(0), _hidden_alias(false) {}
+  PointsToNode(): _offset(-1), _type(UnknownType), _escape(UnknownEscape), _edges(NULL), _node(NULL), _inputs_processed(0), _hidden_alias(false), _unique_type(true) {}
 
   EscapeState escape_state() const { return _escape; }
   NodeType node_type() const { return _type;}
@@ -250,6 +254,37 @@
   // to "ni".
   void remove_deferred(uint ni);
 
+  Node_Array _node_map; // used for bookeeping during type splitting
+                        // Used for the following purposes:
+                        // Memory Phi    - most recent unique Phi split out
+                        //                 from this Phi
+                        // MemNode       - new memory input for this node
+                        // ChecCastPP    - allocation that this is a cast of
+                        // allocation    - CheckCastPP of the allocation
+  void split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn);
+  PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created);
+  PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn);
+  Node *find_mem(Node *mem, int alias_idx, PhaseGVN  *igvn);
+  // Propagate unique types created for unescaped allocated objects
+  // through the graph
+  void split_unique_types(GrowableArray<Node *>  &alloc_worklist);
+
+  // manage entries in _node_map
+  void  set_map(int idx, Node *n)        { _node_map.map(idx, n); }
+  void  set_map_phi(int idx, PhiNode *p) { _node_map.map(idx, (Node *) p); }
+  Node *get_map(int idx)                 { return _node_map[idx]; }
+  PhiNode *get_map_phi(int idx) {
+    Node *phi = _node_map[idx];
+    return (phi == NULL) ? NULL : phi->as_Phi();
+  }
+
+  // Notify optimizer that a node has been modified
+  // Node:  This assumes that escape analysis is run before
+  //        PhaseIterGVN creation
+  void record_for_optimizer(Node *n) {
+    _compile->record_for_igvn(n);
+  }
+
   // Set the escape state of a node
   void set_escape_state(uint ni, PointsToNode::EscapeState es);
 
--- a/hotspot/src/share/vm/opto/gcm.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)gcm.cpp	1.250 07/05/05 17:06:09 JVM"
+#pragma ident "@(#)gcm.cpp	1.251 07/05/17 15:58:45 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -139,7 +139,7 @@
 // Find the earliest Block any instruction can be placed in.  Some instructions
 // are pinned into Blocks.  Unpinned instructions can appear in last block in 
 // which all their inputs occur.
-bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots, Block_Array &bbs) {
+bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
   // Allocate stack with enough space to avoid frequent realloc
   Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats
   // roots.push(_root); _root will be processed among C->top() inputs
@@ -168,7 +168,7 @@
           const Node *p = in0->is_block_proj();
           if (p != NULL && p != n) {    // Control from a block projection?
             // Find trailing Region
-            Block *pb = bbs[in0->_idx]; // Block-projection already has basic block
+            Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
             uint j = 0;
             if (pb->_num_succs != 1) {  // More then 1 successor?
               // Search for successor
@@ -203,7 +203,7 @@
         ++i; 
         if (in == NULL) continue;    // Ignore NULL, missing inputs
         int is_visited = visited.test_set(in->_idx);
-        if (!bbs.lookup(in->_idx)) { // Missing block selection?
+        if (!_bbs.lookup(in->_idx)) { // Missing block selection?
           if (is_visited) {
             // assert( !visited.test(in->_idx), "did not schedule early" );
             return false;
@@ -225,7 +225,7 @@
         // any projections which depend on them.
         if (!n->pinned()) {
           // Set earliest legal block.
-          bbs.map(n->_idx, find_deepest_input(n, bbs));
+          _bbs.map(n->_idx, find_deepest_input(n, _bbs));
         }
 
         if (nstack.is_empty()) {
@@ -786,83 +786,87 @@
     self = unvisited;
   } // End recursion loop
 
-  return (self);
+  return self;
 }
 
 //------------------------------ComputeLatenciesBackwards----------------------
 // Compute the latency of all the instructions.
-void PhaseCFG::ComputeLatenciesBackwards( VectorSet &visited, Node_List &stack, GrowableArray<uint> &node_latency ) {
+void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) {
 #ifndef PRODUCT
-  if (TraceOptoPipelining)
+  if (trace_opto_pipelining())
     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
 #endif
 
   Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
-  Node *self;
+  Node *n;
 
   // Walk over all the nodes from last to first
-  while (self = iter.next()) {
+  while (n = iter.next()) {
     // Set the latency for the definitions of this instruction
-    self->partial_latency_of_defs(_bbs, node_latency);
+    partial_latency_of_defs(n);
   }
 } // end ComputeLatenciesBackwards
 
 //------------------------------partial_latency_of_defs------------------------
-// Compute the latency impact of this instruction on all defs.  This computes
+// Compute the latency impact of this node on all defs.  This computes
 // a number that increases as we approach the beginning of the routine.
-void Node::partial_latency_of_defs(Block_Array &bbs, GrowableArray<uint> &node_latency) {
+void PhaseCFG::partial_latency_of_defs(Node *n) {
   // Set the latency for this instruction
 #ifndef PRODUCT
-  if (TraceOptoPipelining) {
-    tty->print("# Backward: latency %3d for ", node_latency.at_grow(_idx));
+  if (trace_opto_pipelining()) {
+    tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
+               n->_idx, _node_latency.at_grow(n->_idx));
     dump();
-    tty->print("# now scanning defs");
   }
 #endif
 
-  Node *use = is_Proj() ? in(0) : this;
+  if (n->is_Proj())
+    n = n->in(0);
 
-  if (use->is_Root())
+  if (n->is_Root())
     return;
 
-  uint nlen = use->len();
-  uint use_latency = node_latency.at_grow(use->_idx);
-  uint use_pre_order = bbs[use->_idx]->_pre_order;
+  uint nlen = n->len();
+  uint use_latency = _node_latency.at_grow(n->_idx);
+  uint use_pre_order = _bbs[n->_idx]->_pre_order;
 
   for ( uint j=0; j<nlen; j++ ) {
-    Node *def = use->in(j);
+    Node *def = n->in(j);
 
-    if (!def || def == use)
+    if (!def || def == n)
       continue;
       
     // Walk backwards thru projections
-    Node *real_def = def->is_Proj() ? def->in(0) : def;
+    if (def->is_Proj())
+      def = def->in(0);
 
 #ifndef PRODUCT
-    if (TraceOptoPipelining) {
-      tty->print("#    thru in(%2d): ", j);
-      real_def->dump(); 
+    if (trace_opto_pipelining()) {
+      tty->print("#    in(%2d): ", j);
+      def->dump(); 
     }
 #endif
 
     // If the defining block is not known, assume it is ok
-    Block *def_block = bbs[real_def->_idx];
+    Block *def_block = _bbs[def->_idx];
     uint def_pre_order = def_block ? def_block->_pre_order : 0;
 
     if ( (use_pre_order <  def_pre_order) ||
-         (use_pre_order == def_pre_order && use->is_Phi()) )
+         (use_pre_order == def_pre_order && n->is_Phi()) )
       continue;
 
-    uint delta_latency = use->latency(j);
+    uint delta_latency = n->latency(j);
     uint current_latency = delta_latency + use_latency;
 
-    if (node_latency.at_grow(real_def->_idx) < current_latency)
-      node_latency.at_put_grow(real_def->_idx, current_latency);
+    if (_node_latency.at_grow(def->_idx) < current_latency) {
+      _node_latency.at_put_grow(def->_idx, current_latency);
+    }
 
 #ifndef PRODUCT
-    if (TraceOptoPipelining) {
-      tty->print("#      [%4d]->latency(%d) == %d (%d), node_latency.at_grow(%4d) == %d\n",
-        use->_idx, j, delta_latency, current_latency, real_def->_idx, node_latency.at_grow(real_def->_idx));
+    if (trace_opto_pipelining()) {
+      tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
+                    use_latency, j, delta_latency, current_latency, def->_idx, 
+                    _node_latency.at_grow(def->_idx));
     }
 #endif
   }
@@ -870,24 +874,24 @@
 
 //------------------------------latency_from_use-------------------------------
 // Compute the latency of a specific use
-int Node::latency_from_use(Block_Array &bbs, GrowableArray<uint> &node_latency, const Node *def, Node *use) const {
+int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
+  // If self-reference, return no latency
+  if (use == n || use->is_Root())
+    return 0;
+    
+  uint def_pre_order = _bbs[def->_idx]->_pre_order;
+  uint latency = 0;
+
+  // If the use is not a projection, then it is simple...
+  if (!use->is_Proj()) {
 #ifndef PRODUCT
-  if (TraceOptoPipelining) {
-    tty->print("#      ");
-    use->dump();
-  }
+    if (trace_opto_pipelining()) {
+      tty->print("#    out(): ");
+      use->dump();
+    }
 #endif
 
-  // If self-reference, return no latency
-  if (use == this || use->is_Root())
-    return 0;
-    
-  uint def_pre_order = bbs[def->_idx]->_pre_order;
-  uint latency = 0;
-
-  // If this is not a projection, then it is simple...
-  if (!use->is_Proj()) {
-    uint use_pre_order = bbs[use->_idx]->_pre_order;
+    uint use_pre_order = _bbs[use->_idx]->_pre_order;
 
     if (use_pre_order < def_pre_order)
       return 0;
@@ -896,70 +900,70 @@
       return 0;
 
     uint nlen = use->len();
-    uint nl = node_latency.at_grow(use->_idx);
+    uint nl = _node_latency.at_grow(use->_idx);
 
     for ( uint j=0; j<nlen; j++ ) {
-      if (use->in(j) == this) {
+      if (use->in(j) == n) {
         // Change this if we want local latencies
         uint ul = use->latency(j);
         uint  l = ul + nl;
+        if (latency < l) latency = l;
 #ifndef PRODUCT
-        if (TraceOptoPipelining) {
-          tty->print("#      [%4d]->latency(%d) == %d, node_latency.at_grow(%4d) == %d\n",
-            use->_idx, j, ul, use->_idx, nl);
+        if (trace_opto_pipelining()) {
+          tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
+                        nl, j, ul, l, latency);
         }
 #endif
-        if (latency < l) latency = l;
       }
     }
-  }
-
-  // This is a projection, just grab the latency of the use(s)
-  else {
+  } else {
+    // This is a projection, just grab the latency of the use(s)
     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
-      uint l = use->latency_from_use(bbs, node_latency, def, use->fast_out(j));
+      uint l = latency_from_use(use, def, use->fast_out(j));
       if (latency < l) latency = l;
     }
   }
 
-  return (latency);
+  return latency;
 }
 
 //------------------------------latency_from_uses------------------------------
 // Compute the latency of this instruction relative to all of it's uses.
 // This computes a number that increases as we approach the beginning of the
 // routine.
-int Node::latency_from_uses(Block_Array &bbs, GrowableArray<uint> &node_latency) const {
+void PhaseCFG::latency_from_uses(Node *n) {
   // Set the latency for this instruction
 #ifndef PRODUCT
-  if (TraceOptoPipelining) {
-    tty->print("# scanning uses for ");
+  if (trace_opto_pipelining()) {
+    tty->print("# latency_from_outputs: node_latency[%d] = %d for node", 
+               n->_idx, _node_latency.at_grow(n->_idx));
     dump();
   }
 #endif
   uint latency=0;
-  const Node *def = this->is_Proj() ? in(0): this;
+  const Node *def = n->is_Proj() ? n->in(0): n;
 
-  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
-    uint l = latency_from_use(bbs, node_latency, def, fast_out(i));
+  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+    uint l = latency_from_use(n, def, n->fast_out(i));
 
     if (latency < l) latency = l;
   }
 
-  return (latency);
+  _node_latency.at_put_grow(n->_idx, latency);
 }
 
-static Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self,
-                                     Compile* C, const Block* root_block,
-                                     GrowableArray<uint> &node_latency,
-                                     Block_Array &bbs) {
-  const double delta = 1.0001;
+//------------------------------hoist_to_cheaper_block-------------------------
+// Pick a block for node self, between early and LCA, that is a cheaper 
+// alternative to LCA.
+Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
+  const double delta = 1+PROB_UNLIKELY_MAG(4);
   Block* least       = LCA;
   double least_freq  = least->_freq;
-  uint target        = node_latency.at_grow(self->_idx);
-  uint start_latency = node_latency.at_grow(LCA->_nodes[0]->_idx);
-  uint end_latency   = node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
+  uint target        = _node_latency.at_grow(self->_idx);
+  uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx);
+  uint end_latency   = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   bool in_latency    = (target <= start_latency);
+  const Block* root_block = _bbs[_root->_idx];
 
   // Turn off latency scheduling if scheduling is just plain off
   if (!C->do_scheduling())
@@ -973,11 +977,11 @@
     in_latency = true;
 
 #ifndef PRODUCT
-  if (TraceOptoPipelining) {
-    tty->print("# Choose block for latency %3d: ",
-      node_latency.at_grow(self->_idx));
+  if (trace_opto_pipelining()) {
+    tty->print("# Find cheaper block for latency %d: ",
+      _node_latency.at_grow(self->_idx));
     self->dump();
-    tty->print("#  BB#%03d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g\n",
+    tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
       LCA->_pre_order,
       LCA->_nodes[0]->_idx,
       start_latency,
@@ -1002,13 +1006,13 @@
     if (mach && LCA == root_block)
       break;
 
-    uint start_lat = node_latency.at_grow(LCA->_nodes[0]->_idx);
+    uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx);
     uint end_idx   = LCA->end_idx();
-    uint end_lat   = node_latency.at_grow(LCA->_nodes[end_idx]->_idx);
+    uint end_lat   = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx);
     double LCA_freq = LCA->_freq;
 #ifndef PRODUCT
-    if (TraceOptoPipelining) {
-      tty->print("#  BB#%03d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g\n",
+    if (trace_opto_pipelining()) {
+      tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
         LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
     }
 #endif
@@ -1030,8 +1034,8 @@
   }
 
 #ifndef PRODUCT
-  if (TraceOptoPipelining) {
-    tty->print("# Choose block BB#%03d with start latency=%3d and freq=%g\n",
+  if (trace_opto_pipelining()) {
+    tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
       least->_pre_order, start_latency, least_freq);
   }
 #endif
@@ -1039,12 +1043,12 @@
   // See if the latency needs to be updated
   if (target < end_latency) {
 #ifndef PRODUCT
-    if (TraceOptoPipelining) {
-      tty->print("# Change latency for [%4d] from %3d to %3d\n", self->_idx, target, end_latency);
+    if (trace_opto_pipelining()) {
+      tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
     }
 #endif
-    node_latency.at_put_grow(self->_idx, end_latency);
-    self->partial_latency_of_defs(bbs, node_latency);
+    _node_latency.at_put_grow(self->_idx, end_latency);
+    partial_latency_of_defs(self);
   }
 
   return least;
@@ -1056,10 +1060,14 @@
 // dominator tree of all USES of a value.  Pick the block with the least
 // loop nesting depth that is lowest in the dominator tree.
 extern const char must_clone[];
-void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack, GrowableArray<uint> &node_latency) {
+void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
+#ifndef PRODUCT
+  if (trace_opto_pipelining())
+    tty->print("\n#---- schedule_late ----\n");
+#endif
+
   Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
   Node *self;
-  const Block* root_block = _bbs[_root->_idx];
 
   // Walk over all the nodes from last to first
   while (self = iter.next()) {
@@ -1159,8 +1167,7 @@
       // Now find the block with the least execution frequency.
       // Start at the latest schedule and work up to the earliest schedule
       // in the dominator tree.  Thus the Node will dominate all its uses.
-      late = hoist_to_cheaper_block(LCA, early, self,
-                                    C, root_block, node_latency, _bbs);
+      late = hoist_to_cheaper_block(LCA, early, self);
     } else {
       // Just use the LCA of the uses.
       late = LCA;
@@ -1185,6 +1192,12 @@
 void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) {
   ResourceMark rm;
 
+#ifndef PRODUCT
+  if (trace_opto_pipelining()) {
+    tty->print("\n---- Start GlobalCodeMotion ----\n");
+  }
+#endif
+
   // Initialize the bbs.map for things on the proj_list
   uint i;
   for( i=0; i < proj_list.size(); i++ )
@@ -1201,7 +1214,7 @@
   visited.Clear();
   Node_List stack(a);
   stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list
-  if (!schedule_early(visited,stack,_bbs)) {
+  if (!schedule_early(visited, stack)) {
     // Bailout without retry
     C->record_method_not_compilable("early schedule failed");
     return;
@@ -1214,16 +1227,16 @@
   // Compute the latency information (via backwards walk) for all the
   // instructions in the graph
   GrowableArray<uint> node_latency;
-
+  _node_latency = node_latency;
 
   if( C->do_scheduling() )
-    ComputeLatenciesBackwards(visited, stack, node_latency); 
+    ComputeLatenciesBackwards(visited, stack); 
 
   // Now schedule all codes as LATE as possible.  This is the LCA in the 
   // dominator tree of all USES of a value.  Pick the block with the least
   // loop nesting depth that is lowest in the dominator tree.  
   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
-  schedule_late(visited, stack, node_latency);
+  schedule_late(visited, stack);
   if( C->failing() ) {
     // schedule_late fails only when graph is incorrect.
     assert(!VerifyGraphEdges, "verification should have failed");
@@ -1232,6 +1245,12 @@
 
   unique = C->unique();
 
+#ifndef PRODUCT
+  if (trace_opto_pipelining()) {
+    tty->print("\n---- Detect implicit null checks ----\n");
+  }
+#endif
+
   // Detect implicit-null-check opportunities.  Basically, find NULL checks 
   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   // I can generate a memory op if there is not one nearby.
@@ -1247,12 +1266,11 @@
     }
     // By reversing the loop direction we get a very minor gain on mpegaudio.
     // Feel free to revert to a forward loop for clarity.
-//    for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
+    // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
     for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
       Node *proj = matcher._null_check_tests[i  ];
       Node *val  = matcher._null_check_tests[i+1];
-      _bbs[proj->_idx]->implicit_null_check(_bbs, node_latency,
-                                            proj, val, allowed_reasons);
+      _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
       // The implicit_null_check will only perform the transformation
       // if the null branch is truly uncommon, *and* it leads to an
       // uncommon trap.  Combined with the too_many_traps guards
@@ -1261,13 +1279,19 @@
     }
   }
 
+#ifndef PRODUCT
+  if (trace_opto_pipelining()) {
+    tty->print("\n---- Start Local Scheduling ----\n");
+  }
+#endif
+
   // Schedule locally.  Right now a simple topological sort.
   // Later, do a real latency aware scheduler.
   int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique());
   memset( ready_cnt, -1, C->unique() * sizeof(int) );
   visited.Clear();
   for (i = 0; i < _num_blocks; i++) {
-    if (!_blocks[i]->schedule_local(matcher, _bbs, ready_cnt, visited, node_latency)) {
+    if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
         C->record_method_not_compilable("local schedule failed");
       }
@@ -1281,13 +1305,10 @@
     _blocks[i]->call_catch_cleanup(_bbs);
 
 #ifndef PRODUCT
-  if (TraceOptoPipelining) {
+  if (trace_opto_pipelining()) {
     tty->print("\n---- After GlobalCodeMotion ----\n");
     for (uint i = 0; i < _num_blocks; i++) {
-      tty->print("\nBB#%03d:\n", i);
-      Block *bb = _blocks[i];
-      for (uint j = 0; j < bb->_nodes.size(); j++)
-        bb->_nodes[j]->dump();
+      _blocks[i]->dump();
     }
   }
 #endif
@@ -1308,7 +1329,7 @@
 void PhaseCFG::Estimate_Block_Frequency() {
   assert( _blocks[0] == _broot, "" );
   int cnts = C->method() ? C->method()->interpreter_invocation_count() : 1;
-  // Most of our algoritms will die horribly if frequency can become
+  // Most of our algorithms will die horribly if frequency can become
   // negative so make sure cnts is a sane value.
   if( cnts <= 0 ) cnts = 1;
   float f = (float)cnts/(float)FreqCountInvocations;
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)graphKit.cpp	1.126 07/05/05 17:06:15 JVM"
+#pragma ident "@(#)graphKit.cpp	1.128 07/05/17 17:43:32 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -460,10 +460,13 @@
 
 //------------------------------builtin_throw----------------------------------
 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
+  bool must_throw = true;
+
   if (JvmtiExport::can_post_exceptions()) {
     // Do not try anything fancy if we're notifying the VM on every throw.
     // Cf. case Bytecodes::_athrow in parse2.cpp.
-    uncommon_trap(reason, Deoptimization::Action_none);
+    uncommon_trap(reason, Deoptimization::Action_none,
+                  (ciKlass*)NULL, (char*)NULL, must_throw);
     return;
   }
 
@@ -569,7 +572,6 @@
     action = Deoptimization::Action_none;
   }
 
-  bool must_throw = true;
   // "must_throw" prunes the JVM state to include only the stack, if there
   // are no local exception handlers.  This should cut down on register
   // allocation time and code size, by drastically reducing the number
@@ -762,31 +764,10 @@
   // input to the current bytecode.
   bool can_prune_locals = false;
   uint stack_slots_not_pruned = 0;
+  int inputs = 0, depth = 0;
   if (must_throw) {
     assert(method() == youngest_jvms->method(), "sanity");
-    Bytecodes::Code code = java_bc();
-    if (code != Bytecodes::_illegal) {
-      BasicType rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
-      uint      depth = Bytecodes::depth(code);       // checkcast=0, athrow=-1
-      uint      inputs = - ((int)depth);
-      if (rtype != T_ILLEGAL) {
-        inputs += type2size[rtype];  // output reduces apparent negative depth
-      } else {
-        switch (code) {
-        case Bytecodes::_putfield:
-        case Bytecodes::_getfield:
-        case Bytecodes::_putstatic:
-        case Bytecodes::_getstatic:
-          // conservative estimate; don't bother to refine
-          inputs += type2size[T_LONG];
-          break;
-        default:
-          // Must be an invoke.  Keep the whole stack.
-          inputs = max_uint;
-          break;
-        }
-      }
-      // inputs:  checkcast=1, athrow=1
+    if (compute_stack_effects(inputs, depth)) {
       can_prune_locals = true;
       stack_slots_not_pruned = inputs;
     }
@@ -902,6 +883,131 @@
   assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
 }
 
+bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
+  Bytecodes::Code code = java_bc();
+  if (code == Bytecodes::_wide) {
+    code = method()->java_code_at_bci(bci() + 1);
+  }
+
+  BasicType rtype = T_ILLEGAL;
+  int       rsize = 0;
+
+  if (code != Bytecodes::_illegal) {
+    depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
+    rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
+    if (rtype < T_CONFLICT)
+      rsize = type2size[rtype];
+  }
+
+  switch (code) {
+  case Bytecodes::_illegal:
+    return false;
+
+  case Bytecodes::_ldc:
+  case Bytecodes::_ldc_w:
+  case Bytecodes::_ldc2_w:
+    inputs = 0;
+    break;
+
+  case Bytecodes::_dup:         inputs = 1;  break;
+  case Bytecodes::_dup_x1:      inputs = 2;  break;
+  case Bytecodes::_dup_x2:      inputs = 3;  break;
+  case Bytecodes::_dup2:        inputs = 2;  break;
+  case Bytecodes::_dup2_x1:     inputs = 3;  break;
+  case Bytecodes::_dup2_x2:     inputs = 4;  break;
+  case Bytecodes::_swap:        inputs = 2;  break;
+  case Bytecodes::_arraylength: inputs = 1;  break;
+
+  case Bytecodes::_getstatic:
+  case Bytecodes::_putstatic:
+  case Bytecodes::_getfield:
+  case Bytecodes::_putfield:
+    {
+      bool is_get = (depth >= 0), is_static = (depth & 1);
+      bool ignore;
+      ciBytecodeStream iter(method());
+      iter.reset_to_bci(bci());
+      iter.next();
+      ciField* field = iter.get_field(ignore);
+      int      size  = field->type()->size();
+      inputs  = (is_static ? 0 : 1);
+      if (is_get) {
+        depth = size - inputs;
+      } else {
+        inputs += size;        // putxxx pops the value from the stack
+        depth = - inputs;
+      }
+    }
+    break;
+
+  case Bytecodes::_invokevirtual:
+  case Bytecodes::_invokespecial:
+  case Bytecodes::_invokestatic:
+  case Bytecodes::_invokeinterface:
+    {
+      bool is_static = (depth == 0);
+      bool ignore;
+      ciBytecodeStream iter(method());
+      iter.reset_to_bci(bci());
+      iter.next();
+      ciMethod* method = iter.get_method(ignore);
+      inputs = method->arg_size_no_receiver();
+      if (!is_static)  inputs += 1;
+      int size = method->return_type()->size();
+      depth = size - inputs;
+    }
+    break;
+
+  case Bytecodes::_multianewarray:
+    {
+      ciBytecodeStream iter(method());
+      iter.reset_to_bci(bci());
+      iter.next();
+      inputs = iter.get_dimensions();
+      assert(rsize == 1, "");
+      depth = rsize - inputs;
+    }
+    break;
+
+  case Bytecodes::_ireturn:
+  case Bytecodes::_lreturn:
+  case Bytecodes::_freturn:
+  case Bytecodes::_dreturn:
+  case Bytecodes::_areturn:
+    assert(rsize = -depth, "");
+    inputs = rsize;
+    break;
+
+  case Bytecodes::_jsr:
+  case Bytecodes::_jsr_w:
+    inputs = 0;
+    depth  = 1;                  // S.B. depth=1, not zero
+    break;
+
+  default:
+    // bytecode produces a typed result
+    inputs = rsize - depth;
+    assert(inputs >= 0, "");
+    break;
+  }
+
+#ifdef ASSERT
+  // spot check
+  int outputs = depth + inputs;
+  assert(outputs >= 0, "sanity");
+  switch (code) {
+  case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break;
+  case Bytecodes::_athrow:    assert(inputs == 1 && outputs == 0, ""); break;
+  case Bytecodes::_aload_0:   assert(inputs == 0 && outputs == 1, ""); break;
+  case Bytecodes::_return:    assert(inputs == 0 && outputs == 0, ""); break;
+  case Bytecodes::_drem:      assert(inputs == 4 && outputs == 2, ""); break;
+  }
+#endif //ASSERT
+
+  return true;
+}
+
+
 
 //------------------------------basic_plus_adr---------------------------------
 Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) {
@@ -1439,6 +1545,19 @@
   // occurs here, the runtime will make sure an MDO exists.  There is
   // no need to call method()->build_method_data() at this point.
 
+#ifdef ASSERT
+  if (!must_throw) {
+    // Make sure the stack has at least enough depth to execute
+    // the current bytecode.
+    int inputs, ignore;
+    if (compute_stack_effects(inputs, ignore)) {
+      assert(sp() >= inputs, "must have enough JVMS stack to execute");
+      // It is a frequent error in library_call.cpp to issue an
+      // uncommon trap with the _sp value already popped.
+    }
+  }
+#endif
+
   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 
@@ -2043,6 +2162,10 @@
 // (2) subklass does not overlap with superklass => always fail
 // (3) superklass has NO subtypes and we can check with a simple compare.
 int GraphKit::static_subtype_check(ciKlass* superk, ciKlass* subk) {
+  if (StressReflectiveCode) {
+    return SSC_full_test;       // Let caller generate the general case.
+  }
+
   if (superk == env()->Object_klass()) {
     return SSC_always_true;     // (0) this test cannot fail
   }
@@ -2391,6 +2514,10 @@
   Node* mem = reset_memory();
 
   FastLockNode * flock = _gvn.transform(new (C, 3) FastLockNode(0, obj, box) )->as_FastLock();
+  if (PrintPreciseBiasedLockingStatistics) {
+    // Create the counters for this fast lock.
+    flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
+  }
   // Add monitor to debug info for the slow path.  If we block inside the
   // slow path and de-opt, we need the monitor hanging around
   map()->push_monitor( flock );
@@ -2490,9 +2617,6 @@
   }
 }
 
-// Temporary definition, until we promote it to a global switch:
-static const bool StressReflectiveCode = false;
-
 //-------------------------------get_layout_helper-----------------------------
 // If the given klass is a constant or known to be an array,
 // fetch the constant layout helper value into constant_value
@@ -2663,6 +2787,22 @@
   Node* layout_val = get_layout_helper(klass_node, layout_con);
   int   layout_is_con = (layout_val == NULL);
 
+  if (!layout_is_con && !StressReflectiveCode &&
+      !too_many_traps(Deoptimization::Reason_class_check)) {
+    // This is a reflective array creation site.
+    // Optimistically assume that it is a subtype of Object[],
+    // so that we can fold up all the address arithmetic.
+    layout_con = Klass::array_layout_helper(T_OBJECT);
+    Node* cmp_lh = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(layout_con)) );
+    Node* bol_lh = _gvn.transform( new(C, 2) BoolNode(cmp_lh, BoolTest::eq) );
+    { BuildCutout unless(this, bol_lh, PROB_MAX);
+      uncommon_trap(Deoptimization::Reason_class_check,
+                    Deoptimization::Action_maybe_recompile);
+    }
+    layout_val = NULL;
+    layout_is_con = true;
+  }
+
   // Generate the initial go-slow test.  Make sure we do not overflow
   // if length is huge (near 2Gig) or negative!  We do not need
   // exact double-words here, just a close approximation of needed
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)graphKit.hpp	1.55 07/05/05 17:06:17 JVM"
+#pragma ident "@(#)graphKit.hpp	1.56 07/05/17 15:58:52 JVM"
 #endif
 /*
  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -251,6 +251,11 @@
   void add_safepoint_edges(SafePointNode* call,
                            bool must_throw = false);
 
+  // How many stack inputs does the current BC consume?
+  // And, how does the stack change after the bytecode?
+  // Returns false if unknown.
+  bool compute_stack_effects(int& inputs, int& depth);
+
   // Add a fixed offset to a pointer
   Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
     return basic_plus_adr(base, ptr, MakeConX(offset));
@@ -432,12 +437,6 @@
   // CMS card-marks have an input from the corresponding oop_store
   void  cms_card_mark(Node* ctl, Node* adr, Node* val, Node* oop_store);
 
-#ifdef JVMPI_SUPPORT
-  //---------------- JVMPI support --------------------
-  void make_jvmpi_method_entry();
-  void make_jvmpi_method_exit(ciMethod* method);
-#endif // JVMPI_SUPPORT
-
   //---------------- Dtrace support --------------------
   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
   void make_dtrace_method_entry(ciMethod* method) {
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)ifnode.cpp	1.59 07/05/05 17:06:12 JVM"
+#pragma ident "@(#)ifnode.cpp	1.60 07/05/17 17:43:44 JVM"
 #endif
 /*
  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -418,17 +418,24 @@
   // along the OOB path.  Otherwise, it's possible that the user wrote
   // something which optimized to look like a range check but behaves
   // in some other way.
-  ProjNode* iftrap = proj_out(flip_test == 2 ? true : false);
+  Node* iftrap = proj_out(flip_test == 2 ? true : false);
   bool found_trap = false;
   if (iftrap != NULL) {
-    for (DUIterator_Fast jmax, j = iftrap->fast_outs(jmax); j < jmax; j++) {
-      Node* u = iftrap->fast_out(j);
+    Node* u = iftrap->unique_ctrl_out();
+    if (u != NULL) {
+      // It could be a merge point (Region) for uncommon trap.
+      if (u->is_Region()) {
+        Node* c = u->unique_ctrl_out();
+        if (c != NULL) {
+          iftrap = u;
+          u = c;
+        }
+      }
       if (u->in(0) == iftrap && u->is_CallStaticJava()) {
         int req = u->as_CallStaticJava()->uncommon_trap_request();
         if (Deoptimization::trap_request_reason(req) ==
             Deoptimization::Reason_range_check) {
           found_trap = true;
-          break;
         }
       }
     }
--- a/hotspot/src/share/vm/opto/lcm.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)lcm.cpp	1.101 07/05/05 17:06:16 JVM"
+#pragma ident "@(#)lcm.cpp	1.102 07/05/17 15:58:55 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -36,7 +36,7 @@
 // I can generate a memory op if there is not one nearby.
 // The proj is the control projection for the not-null case.
 // The val is the pointer being checked for nullness.
-void Block::implicit_null_check(Block_Array &bbs, GrowableArray<uint> &latency, Node *proj, Node *val, int allowed_reasons) {
+void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
   // Assume if null check need for 0 offset then always needed
   // Intel solaris doesn't support any null checks yet and no
   // mechanism exists (yet) to set the switches at an os_cpu level
@@ -161,7 +161,7 @@
     }
 
     // Check ctrl input to see if the null-check dominates the memory op
-    Block *cb = bbs[mach->_idx];
+    Block *cb = cfg->_bbs[mach->_idx];
     cb = cb->_idom;             // Always hoist at least 1 block
     if( !was_store ) {          // Stores can be hoisted only one block
       while( cb->_dom_depth > (_dom_depth + 1))
@@ -182,7 +182,7 @@
     for( j = mach->req()-1; j > 0; j-- ) {
       if( mach->in(j) == val ) vidx = j;
       // Block of memory-op input
-      Block *inb = bbs[mach->in(j)->_idx];
+      Block *inb = cfg->_bbs[mach->in(j)->_idx];
       Block *b = this;          // Start from nul check
       while( b != inb && b->_dom_depth > inb->_dom_depth )
         b = b->_idom;           // search upwards for input
@@ -192,7 +192,7 @@
     }
     if( j > 0 ) 
       continue;
-    Block *mb = bbs[mach->_idx]; 
+    Block *mb = cfg->_bbs[mach->_idx]; 
     // Hoisting stores requires more checks for the anti-dependence case.
     // Give up hoisting if we have to move the store past any load.
     if( was_store ) {
@@ -211,7 +211,7 @@
           break;                // Found anti-dependent load
         // Make sure control does not do a merge (would have to check allpaths)
         if( b->num_preds() != 2 ) break;
-        b = bbs[b->pred(1)->_idx]; // Move up to predecessor block
+        b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
       }
       if( b != this ) continue;
     }
@@ -224,7 +224,7 @@
     // Found a candidate!  Pick one with least dom depth - the highest 
     // in the dom tree should be closest to the null check.
     if( !best || 
-        bbs[mach->_idx]->_dom_depth < bbs[best->_idx]->_dom_depth ) {
+        cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
       best = mach;
       bidx = vidx;
 
@@ -238,10 +238,10 @@
   implicit_null_checks++;
 
   // Hoist the memory candidate up to the end of the test block.
-  Block *old_block = bbs[best->_idx];
+  Block *old_block = cfg->_bbs[best->_idx];
   old_block->find_remove(best);
   add_inst(best);
-  bbs.map(best->_idx,this);
+  cfg->_bbs.map(best->_idx,this);
 
   // Move the control dependence
   if (best->in(0) && best->in(0) == old_block->_nodes[0])
@@ -252,13 +252,13 @@
   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
     Node* n = best->fast_out(j);
     if( n->Opcode() == Op_MachProj ) {
-      bbs[n->_idx]->find_remove(n);
+      cfg->_bbs[n->_idx]->find_remove(n);
       add_inst(n);
-      bbs.map(n->_idx,this);
+      cfg->_bbs.map(n->_idx,this);
     }
   }
 
-  Compile *C = Compile::current();
+  Compile *C = cfg->C;
   // proj==Op_True --> ne test; proj==Op_False --> eq test.
   // One of two graph shapes got matched:
   //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
@@ -285,24 +285,16 @@
   Node *old_tst = proj->in(0);
   MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
   _nodes.map(end_idx(),nul_chk);
-  bbs.map(nul_chk->_idx,this);
+  cfg->_bbs.map(nul_chk->_idx,this);
   // Redirect users of old_test to nul_chk
   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
     old_tst->last_out(i2)->set_req(0, nul_chk);
   // Clean-up any dead code
   for (uint i3 = 0; i3 < old_tst->req(); i3++)
     old_tst->set_req(i3, NULL);
-  latency.at_put_grow(nul_chk->_idx, nul_chk->latency_from_uses(bbs, latency));
-  latency.at_put_grow(best   ->_idx, best   ->latency_from_uses(bbs, latency));
 
-#ifndef PRODUCT
-  if (TraceOptoPipelining) {
-    tty->print("# implicit_null_check: latency %4d for ", latency.at_grow(best->_idx));
-    best->fast_dump();
-    tty->print("# implicit_null_check: latency %4d for ", latency.at_grow(nul_chk->_idx));
-    nul_chk->fast_dump();
-  }
-#endif
+  cfg->latency_from_uses(nul_chk);
+  cfg->latency_from_uses(best);
 }
 
 
@@ -316,7 +308,7 @@
 // remaining cases (most), choose the instruction with the greatest latency
 // (that is, the most number of pseudo-cycles required to the end of the
 // routine). If there is a tie, choose the instruction with the most inputs.
-Node *Block::select(Node_List &worklist, Block_Array &bbs, int *ready_cnt, VectorSet &next_call, uint sched_slot, GrowableArray<uint> &node_latency) {
+Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot) {
 
   // If only a single entry on the stack, use it
   uint cnt = worklist.size();
@@ -370,7 +362,7 @@
         Node* use = n->fast_out(j);
 
         // The use is a conditional branch, make them adjacent
-        if (use->is_MachIf() && bbs[use->_idx]==this ) {
+        if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
           found_machif = true;
           break;
         }
@@ -403,7 +395,7 @@
       n_choice = 1;
     }
 
-    uint n_latency = node_latency.at_grow(n->_idx);
+    uint n_latency = cfg->_node_latency.at_grow(n->_idx);
     uint n_score   = n->req();   // Many inputs get high score to break ties
 
     // Keep best latency found
@@ -460,7 +452,7 @@
 }
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &m, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -493,12 +485,12 @@
 
   // Act as if the call defines the Frame Pointer.
   // Certainly the FP is alive and well after the call.
-  regs.Insert(m.c_frame_pointer());
+  regs.Insert(matcher.c_frame_pointer());
 
   // Set all registers killed and not already defined by the call.
   uint r_cnt = mcall->tf()->range()->cnt();
   int op = mcall->ideal_Opcode();
-  MachProjNode *proj = new (m.C, 1) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
+  MachProjNode *proj = new (matcher.C, 1) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
   bbs.map(proj->_idx,this);
   _nodes.insert(node_cnt++, proj);
 
@@ -509,13 +501,13 @@
     case Op_CallLeaf:
     case Op_CallLeafNoFP:
       // Calling C code so use C calling convention
-      save_policy = m._c_reg_save_policy;
+      save_policy = matcher._c_reg_save_policy;
       break;
 
     case Op_CallStaticJava:
     case Op_CallDynamicJava:
       // Calling Java code so use Java calling convention
-      save_policy =  m._register_save_policy;
+      save_policy = matcher._register_save_policy;
       break;
 
     default:
@@ -550,20 +542,20 @@
 
 //------------------------------schedule_local---------------------------------
 // Topological sort within a block.  Someday become a real scheduler.
-bool Block::schedule_local(Matcher &matcher, Block_Array &bbs,int *ready_cnt, VectorSet &next_call, GrowableArray<uint> &node_latency) {
+bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, VectorSet &next_call) {
   // Already "sorted" are the block start Node (as the first entry), and
   // the block-ending Node and any trailing control projections.  We leave
   // these alone.  PhiNodes and ParmNodes are made to follow the block start
   // Node.  Everything else gets topo-sorted.
 
 #ifndef PRODUCT
-    if (TraceOptoPipelining) {
-      tty->print("# before schedule_local\n");
+    if (cfg->trace_opto_pipelining()) {
+      tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
       for (uint i = 0;i < _nodes.size();i++) {
         tty->print("# ");
         _nodes[i]->fast_dump();
       }
-      tty->print("\n");
+      tty->print_cr("#");
     }
 #endif
 
@@ -587,7 +579,7 @@
       uint local = 0;
       for( uint j=0; j<cnt; j++ ) {
         Node *m = n->in(j);
-        if( m && bbs[m->_idx] == this && !m->is_top() )
+        if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
           local++;              // One more block-local input
       }
       ready_cnt[n->_idx] = local; // Count em up
@@ -603,7 +595,7 @@
           Node *oop_store = n->in(n->req() - 1);
           n->del_req(n->req() - 1);
           n->add_prec(oop_store);
-          assert(bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+          assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
         }
       }
       if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ) {
@@ -622,7 +614,7 @@
     Node *n = _nodes[i3];       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if( bbs[m->_idx] ==this ) // Local-block user
+      if( cfg->_bbs[m->_idx] ==this ) // Local-block user
         ready_cnt[m->_idx]--;   // Fix ready count
     }
   }
@@ -649,15 +641,15 @@
   }
 
   // Warm up the 'next_call' heuristic bits
-  needed_for_next_call(_nodes[0], next_call, bbs);
+  needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
 
 #ifndef PRODUCT
-    if (TraceOptoPipelining) {
+    if (cfg->trace_opto_pipelining()) {
       for (uint j=0; j<_nodes.size(); j++) {
         Node     *n = _nodes[j];
         int     idx = n->_idx;
         tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
-        tty->print("latency:%3d  ", node_latency.at_grow(idx));
+        tty->print("latency:%3d  ", cfg->_node_latency.at_grow(idx));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
@@ -667,45 +659,45 @@
   while( worklist.size() ) {    // Worklist is not ready
 
 #ifndef PRODUCT
-    uint before_size = worklist.size();
-
-    if (TraceOptoPipelining && before_size > 1) {
-      tty->print("#    before select:");
+    if (cfg->trace_opto_pipelining()) {
+      tty->print("#   ready list:");
       for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
         Node *n = worklist[i];      // Get Node on worklist
-        tty->print(" %4d", n->_idx);
+        tty->print(" %d", n->_idx);
       }
-      tty->print("\n");
+      tty->cr();
     }
 #endif
 
     // Select and pop a ready guy from worklist
-    Node* n = select(worklist, bbs, ready_cnt, next_call, phi_cnt, node_latency);
+    Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
     _nodes.map(phi_cnt++,n);    // Schedule him next
 
 #ifndef PRODUCT
-    if (TraceOptoPipelining && before_size > 1) {
-      tty->print("#  select %d: %s", n->_idx, n->Name());
-      tty->print(", latency:%d", node_latency.at_grow(n->_idx));
+    if (cfg->trace_opto_pipelining()) {
+      tty->print("#    select %d: %s", n->_idx, n->Name());
+      tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx));
       n->dump();
-      tty->print("#    after select:");
-      for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
-        Node *n = worklist[i];      // Get Node on worklist
-        tty->print(" %4d", n->_idx);
+      if (Verbose) {
+        tty->print("#   ready list:");
+        for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
+          Node *n = worklist[i];      // Get Node on worklist
+          tty->print(" %d", n->_idx);
+        }
+        tty->cr();
       }
-      tty->print("\n");
     }
 
 #endif
     if( n->is_MachCall() ) {
       MachCallNode *mcall = n->as_MachCall();
-      phi_cnt = sched_call(matcher, bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
+      phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
       continue;
     }
     // Children are now all ready
     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
       Node* m = n->fast_out(i5); // Get user
-      if( bbs[m->_idx] != this ) continue;
+      if( cfg->_bbs[m->_idx] != this ) continue;
       if( m->is_Phi() ) continue;
       if( !--ready_cnt[m->_idx] ) 
         worklist.push(m);
@@ -726,13 +718,14 @@
   }
 
 #ifndef PRODUCT
-  if (TraceOptoPipelining) {
-    tty->print("# after schedule_local\n");
+  if (cfg->trace_opto_pipelining()) {
+    tty->print_cr("#");
+    tty->print_cr("# after schedule_local");
     for (uint i = 0;i < _nodes.size();i++) {
       tty->print("# ");
       _nodes[i]->fast_dump();
     }
-    tty->print("\n");
+    tty->cr();
   }
 #endif
 
--- a/hotspot/src/share/vm/opto/library_call.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)library_call.cpp	1.163 07/05/05 17:06:21 JVM"
+#pragma ident "@(#)library_call.cpp	1.164 07/05/17 15:59:02 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -31,22 +31,12 @@
 class LibraryIntrinsic : public InlineCallGenerator {
   // Extend the set of intrinsics known to the runtime:
  public:
-  enum C2_IntrinsicId {
-    _none = 0,
-    _lower_limit = vmIntrinsics::_vm_intrinsics_terminating_enum,
-
-    _Array_newInstance,                // java.lang.reflect.Array.newInstance
-    _getCallerClass,                   // sun.reflect.Reflection.getCallerClass
-    _AtomicLong_get,                   // sun.util.AtomicLongCSImpl.get
-    _AtomicLong_attemptUpdate,         // sun.util.AtomicLongCSImpl.attemptUpdate
-    _upper_limit
-  };
  private:
-  bool           _is_virtual;
-  C2_IntrinsicId _intrinsic_id;
+  bool             _is_virtual;
+  vmIntrinsics::ID _intrinsic_id;
 
  public:
-  LibraryIntrinsic(ciMethod* m, bool is_virtual, C2_IntrinsicId id)
+  LibraryIntrinsic(ciMethod* m, bool is_virtual, vmIntrinsics::ID id)
     : InlineCallGenerator(m),
       _is_virtual(is_virtual),
       _intrinsic_id(id)
@@ -55,9 +45,10 @@
   virtual bool is_intrinsic() const { return true; }
   virtual bool is_virtual()   const { return _is_virtual; }
   virtual JVMState* generate(JVMState* jvms);
-  C2_IntrinsicId intrinsic_id() const { return _intrinsic_id; }
+  vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
 };
 
+
 // Local helper class for LibraryIntrinsic:
 class LibraryCallKit : public GraphKit {
  private:
@@ -70,12 +61,10 @@
   {
   }
 
-  typedef LibraryIntrinsic::C2_IntrinsicId C2_IntrinsicId;
-
-  ciMethod*         caller()    const    { return jvms()->caller()->method(); }
+  ciMethod*         caller()    const    { return jvms()->method(); }
   int               bci()       const    { return jvms()->bci(); }
   LibraryIntrinsic* intrinsic() const    { return _intrinsic; }
-  C2_IntrinsicId    intrinsic_id() const { return _intrinsic->intrinsic_id(); }
+  vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
   ciMethod*         callee()    const    { return _intrinsic->method(); }
   ciSignature*      signature() const    { return callee()->signature(); }
   int               arg_size()  const    { return callee()->arg_size(); }
@@ -88,11 +77,13 @@
   Node* generate_slow_guard(Node* test, RegionNode* region);
   Node* generate_fair_guard(Node* test, RegionNode* region);
   Node* generate_negative_guard(Node* index, RegionNode* region);
+  Node* generate_nonpositive_guard(Node* index, bool never_negative);
   Node* generate_limit_guard(Node* offset, Node* subseq_length,
                              Node* array_length,
                              RegionNode* region);
   Node* generate_current_thread(Node* &tls_output);
-  address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset, const char* &name);
+  address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
+                              bool disjoint_bases, const char* &name);
   Node* load_mirror_from_klass(Node* klass);
   Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
                                       int nargs,
@@ -105,8 +96,41 @@
                                          region, null_path,
                                          offset);
   }
+  Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
+                                     int nargs,
+                                     RegionNode* region, int null_path) {
+    int offset = java_lang_Class::array_klass_offset_in_bytes();
+    return load_klass_from_mirror_common(mirror, never_see_null, nargs,
+                                         region, null_path,
+                                         offset);
+  }
+  Node* generate_access_flags_guard(Node* kls,
+                                    int modifier_mask, int modifier_bits,
+                                    RegionNode* region);
+  Node* generate_interface_guard(Node* kls, RegionNode* region);
+  Node* generate_array_guard(Node* kls, RegionNode* region) {
+    return generate_array_guard_common(kls, region, false, false);
+  }
+  Node* generate_non_array_guard(Node* kls, RegionNode* region) {
+    return generate_array_guard_common(kls, region, false, true);
+  }
+  Node* generate_objArray_guard(Node* kls, RegionNode* region) {
+    return generate_array_guard_common(kls, region, true, false);
+  }
+  Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
+    return generate_array_guard_common(kls, region, true, true);
+  }
+  Node* generate_array_guard_common(Node* kls, RegionNode* region,
+                                    bool obj_array, bool not_array);
   Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
-  CallJavaNode* generate_method_call(bool is_virtual, bool is_static);
+  CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
+                                     bool is_virtual = false, bool is_static = false);
+  CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
+    return generate_method_call(method_id, false, true);
+  }
+  CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
+    return generate_method_call(method_id, true, false);
+  }
 
   bool inline_string_compareTo();
   bool inline_string_indexOf();
@@ -114,25 +138,31 @@
   Node* pop_math_arg();
   bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
   bool inline_math_native(vmIntrinsics::ID id);
-  bool inline_trig( vmIntrinsics::ID id );
-  bool inline_trans( vmIntrinsics::ID id );
-  bool inline_abs( vmIntrinsics::ID id );
-  bool inline_sqrt( vmIntrinsics::ID id );
-  bool inline_pow( vmIntrinsics::ID id );
-  bool inline_exp( vmIntrinsics::ID id );
+  bool inline_trig(vmIntrinsics::ID id);
+  bool inline_trans(vmIntrinsics::ID id);
+  bool inline_abs(vmIntrinsics::ID id);
+  bool inline_sqrt(vmIntrinsics::ID id);
+  bool inline_pow(vmIntrinsics::ID id);
+  bool inline_exp(vmIntrinsics::ID id);
+  bool inline_min_max(vmIntrinsics::ID id);
+  Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
   // This returns Type::AnyPtr, RawPtr, or OopPtr.
   int classify_unsafe_addr(Node* &base, Node* &offset);
   Node* make_unsafe_address(Node* base, Node* offset);
-  bool inline_unsafe_access(bool is_native_ptr, bool offset_is_long, bool is_store, BasicType type, bool is_volatile);
-  bool inline_unsafe_prefetch(bool is_native_ptr, bool offset_is_long, bool is_store, bool is_static);
+  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
+  bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
   bool inline_unsafe_allocate();
+  bool inline_unsafe_copyMemory();
   bool inline_native_currentThread();
   bool inline_native_time_funcs(bool isNano);
   bool inline_native_isInterrupted();
   bool inline_native_Class_query(vmIntrinsics::ID id);
   bool inline_native_subtype_check();
 
-  bool inline_native_Array_newInstance();
+  bool inline_native_newArray();
+  bool inline_native_getLength();
+  bool inline_array_copyOf(bool is_copyOfRange);
+  bool inline_native_clone(bool is_virtual);
   bool inline_native_Reflection_getCallerClass();
   bool inline_native_AtomicLong_get();
   bool inline_native_AtomicLong_attemptUpdate();
@@ -143,23 +173,32 @@
 
   // Helper functions for inlining arraycopy
   bool inline_arraycopy();
-  void generate_arraycopy(BasicType basic_elem_type,
+  void generate_arraycopy(const TypePtr* adr_type,
+                          BasicType basic_elem_type,
                           Node* src,  Node* src_offset,
                           Node* dest, Node* dest_offset,
                           Node* copy_length,
                           int nargs,  // arguments on stack for debug info
+                          bool disjoint_bases = false,
+			  bool length_never_negative = false,
                           Node* slow_region = NULL);
-  Node* generate_slow_arraycopy(const TypePtr* adr_type,
-                                Node* src,  Node* src_offset,
-                                Node* dest, Node* dest_offset,
-                                Node* copy_length,
-                                int nargs);
-  void generate_generic_arraycopy(const TypePtr* adr_type,
-                                 Node* src,  Node* src_offset,
-                                 Node* dest, Node* dest_offset,
-                                 Node* copy_length);
+  void generate_slow_arraycopy(const TypePtr* adr_type,
+                               Node* src,  Node* src_offset,
+                               Node* dest, Node* dest_offset,
+                               Node* copy_length,
+                               int nargs);
+  Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
+                                     Node* dest_elem_klass,
+                                     Node* src,  Node* src_offset,
+                                     Node* dest, Node* dest_offset,
+                                     Node* copy_length, int nargs);
+  Node* generate_generic_arraycopy(const TypePtr* adr_type,
+                                   Node* src,  Node* src_offset,
+                                   Node* dest, Node* dest_offset,
+                                   Node* copy_length, int nargs);
   void generate_unchecked_arraycopy(BasicType basic_elem_type,
                                     const TypePtr* adr_type,
+                                    bool disjoint_bases,
                                     Node* src,  Node* src_offset,
                                     Node* dest, Node* dest_offset,
                                     Node* copy_length);
@@ -175,16 +214,26 @@
   vmIntrinsics::ID id = m->intrinsic_id();
   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 
+  if (DisableIntrinsic[0] != '\0'
+      && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
+    // disabled by a user request on the command line:
+    // example: -XX:DisableIntrinsic=_hashCode,_getClass
+    return NULL;
+  }
+
   if (!m->is_loaded()) {
     // do not attempt to inline unloaded methods
     return NULL;
   }
 
-  // Only a few intrinsics implement a virtual dispatch:
+  // Only a few intrinsics implement a virtual dispatch.
+  // They are expensive calls which are also frequently overridden.
   if (is_virtual) {
     switch (id) {
-    case vmIntrinsics::_hash:
-      break;  // OK, Object.hashCode intrinsic comes in both flavors
+    case vmIntrinsics::_hashCode:
+    case vmIntrinsics::_clone:
+      // OK, Object.hashCode and Object.clone intrinsics come in both flavors
+      break;
     default:
       return NULL;
     }
@@ -211,23 +260,49 @@
   case vmIntrinsics::_arraycopy:
     if (!InlineArrayCopy)  return NULL;
     break;
-  case vmIntrinsics::_hash:
+  case vmIntrinsics::_copyMemory:
+    if (StubRoutines::unsafe_arraycopy() == NULL)  return NULL;
+    if (!InlineArrayCopy)  return NULL;
+    break;
+  case vmIntrinsics::_hashCode:
     if (!InlineObjectHash)  return NULL;
     break;
+  case vmIntrinsics::_clone:
+  case vmIntrinsics::_copyOf:
+  case vmIntrinsics::_copyOfRange:
+    if (!InlineObjectCopy)  return NULL;
+    // These also use the arraycopy intrinsic mechanism:
+    if (!InlineArrayCopy)  return NULL;
+    break;
   case vmIntrinsics::_checkIndex:
     // We do not intrinsify this.  The optimizer does fine with it.
     return NULL;
 
+  case vmIntrinsics::_get_AtomicLong:
+  case vmIntrinsics::_attemptUpdate:
+    if (!InlineAtomicLong)  return NULL;
+    break;
+
   case vmIntrinsics::_Object_init:
-    // We do not intrinsify this.
+  case vmIntrinsics::_invoke:
+    // We do not intrinsify these; they are marked for other purposes.
     return NULL;
 
+  case vmIntrinsics::_getCallerClass:
+    if (!UseNewReflection)  return NULL;
+    if (!InlineReflectionGetCallerClass)  return NULL;
+    if (!JDK_Version::is_gte_jdk14x_version())  return NULL;
+    break;
+
  default:
     break;
   }
 
   // -XX:-InlineClassNatives disables natives from the Class class.
-  if (m->holder()->name() == ciSymbol::java_lang_Class()) {
+  // The flag applies to all reflective calls, notably Array.newArray
+  // (visible to Java programmers as Array.newInstance).
+  if (m->holder()->name() == ciSymbol::java_lang_Class() ||
+      m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
     if (!InlineClassNatives)  return NULL;
   }
 
@@ -248,107 +323,59 @@
     if (!InlineUnsafeOps)  return NULL;
   }
 
-  return new LibraryIntrinsic(m, is_virtual, (LibraryIntrinsic::C2_IntrinsicId)id);
+  return new LibraryIntrinsic(m, is_virtual, (vmIntrinsics::ID) id);
 }
 
 //----------------------register_library_intrinsics-----------------------
-// Register here any intrinsics which are not presently known to the runtime.
+// Initialize this file's data structures, for each Compile instance.
 void Compile::register_library_intrinsics() {
-  ciMethod*                    methods[LibraryIntrinsic::_upper_limit];
-  LibraryIntrinsic::C2_IntrinsicId ids[LibraryIntrinsic::_upper_limit];
-  int ni = 0;
-
-  ciInstanceKlass* object = env()->Object_klass();
-  ciInstanceKlass* clazz = env()->Class_klass();
-
-  ciInstanceKlass* system =
-    env()->find_system_klass(ciSymbol::java_lang_System())->as_instance_klass();
-  assert(system->is_loaded(), "must be able to find");
-
-  ciInstanceKlass* array =
-    env()->find_system_klass(ciSymbol::make("java/lang/reflect/Array"))->as_instance_klass();
-
-  ciInstanceKlass* atomicLong =
-    env()->find_system_klass(ciSymbol::make("sun/misc/AtomicLongCSImpl"))->as_instance_klass();
-
-#define OBJ "Ljava/lang/Object;"
-#define CLS "Ljava/lang/Class;"
-
-  // Collect intrinsics here, as enabled by command line options:
-
-  if(    JDK_Version::is_gte_jdk14x_version()
-      && UseNewReflection
-      && InlineReflectionGetCallerClass ) {
-    ciInstanceKlass* reflection =
-      env()->find_system_klass(ciSymbol::make("sun/reflect/Reflection"))->as_instance_klass();
-    if (reflection->is_loaded()) {
-      methods[ni] =
-        reflection->find_method(ciSymbol::make("getCallerClass"),
-                                ciSymbol::make("(I)Ljava/lang/Class;"));
-      ids[ni++] = LibraryIntrinsic::_getCallerClass;
-    }
-  }
-
-  if (InlineClassNatives && array->is_loaded()) { // %%% InlineNewInstance ?
-    methods[ni] =
-      array->find_method(ciSymbol::make("newArray"),
-                         ciSymbol::make("("CLS"I)"OBJ""));
-    ids[ni++] = LibraryIntrinsic::_Array_newInstance;
-  }
-
-  if (InlineAtomicLong && atomicLong->is_loaded()) {
-    methods[ni] =
-      atomicLong->find_method(ciSymbol::make("get"),
-                         ciSymbol::make("()J"));
-    ids[ni++] = LibraryIntrinsic::_AtomicLong_get;
-    methods[ni] =
-      atomicLong->find_method(ciSymbol::make("attemptUpdate"),
-                         ciSymbol::make("(JJ)Z"));
-    ids[ni++] = LibraryIntrinsic::_AtomicLong_attemptUpdate;
-  }
-
-  // Now register them all.
-  assert(ni <= LibraryIntrinsic::_upper_limit, "oob");
-  while (--ni >= 0) {
-    ciMethod* m = methods[ni];
-#ifndef PRODUCT
-    if (m == NULL)
-      tty->print_cr("*** intrinsic %d[%d] not found", ids[ni], ni);
-#endif
-    register_intrinsic(new LibraryIntrinsic(m, /*!virtual*/ false, ids[ni]));
-  }
-
-#undef OBJ
-#undef CLS
+  // Nothing to do here.
 }
 
 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
   LibraryCallKit kit(jvms, this);
   Compile* C = kit.C;
-#ifdef JVMPI_SUPPORT
-  assert(!C->need_jvmpi_method_event(), "don't attempt this if jvmpi is on");
-#endif // JVMPI_SUPPORT
   int nodes = C->unique();
+#ifndef PRODUCT
+  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+    char buf[1000];
+    const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
+    tty->print_cr("Intrinsic %s", str);
+  }
+#endif
   if (kit.try_to_inline()) {
-    if (PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
-      tty->print("  Inlining intrinsic ");
-      kit.callee()->print_name();
-      tty->cr();
+    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+      tty->print("Inlining intrinsic %s%s at bci:%d in",
+                 vmIntrinsics::name_at(intrinsic_id()),
+                 (is_virtual() ? " (virtual)" : ""), kit.bci());
+      kit.caller()->print_short_name(tty);
+      tty->print_cr(" (%d bytes)", kit.caller()->code_size());
     }
+    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
     if (C->log()) {
-      const char* iname = vmIntrinsics::name_at((int) intrinsic_id());
-      char buf[20];
-      if (iname == NULL) {
-        sprintf(buf, "#%d", intrinsic_id());
-        iname = buf;
-      }
       C->log()->elem("intrinsic id='%s'%s nodes='%d'",
-                     iname, (is_virtual() ? " virtual='1'" : ""),
+                     vmIntrinsics::name_at(intrinsic_id()),
+                     (is_virtual() ? " virtual='1'" : ""),
                      C->unique() - nodes);
     }
     return kit.transfer_exceptions_into_jvms();
   }
 
+  if (PrintIntrinsics) {
+    switch (intrinsic_id()) {
+    case vmIntrinsics::_invoke:
+    case vmIntrinsics::_Object_init:
+      // We do not expect to inline these, so do not produce any noise about them.
+      break;
+    default:
+      tty->print("Did not inline intrinsic %s%s at bci:%d in",
+                 vmIntrinsics::name_at(intrinsic_id()),
+                 (is_virtual() ? " (virtual)" : ""), kit.bci());
+      kit.caller()->print_short_name(tty);
+      tty->print_cr(" (%d bytes)", kit.caller()->code_size());
+    }
+  }
+  C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
   return NULL;
 }
 
@@ -357,275 +384,239 @@
   const bool is_store       = true;
   const bool is_native_ptr  = true;
   const bool is_static      = true;
-  const bool offset_is_long = true;
-
-  if (intrinsic_id() < LibraryIntrinsic::_lower_limit) {
-    vmIntrinsics::ID iid = (vmIntrinsics::ID)intrinsic_id();
-    switch (iid) {
-    case vmIntrinsics::_hash:
-      return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
-    case vmIntrinsics::_identityHash:
-      return inline_native_hashcode(/*!virtual*/ false, is_static);
-    case vmIntrinsics::_getClass:
-      return inline_native_getClass();
-
-    case vmIntrinsics::_dsin:
-    case vmIntrinsics::_dcos:
-    case vmIntrinsics::_dtan:
-    case vmIntrinsics::_dabs:
-    case vmIntrinsics::_datan2:
-    case vmIntrinsics::_dsqrt:
-    case vmIntrinsics::_dexp:
-    case vmIntrinsics::_dlog:
-    case vmIntrinsics::_dlog10:
-    case vmIntrinsics::_dpow:
-      return inline_math_native(iid);
-
-    case vmIntrinsics::_arraycopy:
-      return inline_arraycopy();
-
-    case vmIntrinsics::_compareTo:
-      return inline_string_compareTo();
-    case vmIntrinsics::_indexOf:
-      return inline_string_indexOf();
-
-    case vmIntrinsics::_getObject_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_OBJECT, false);
-    case vmIntrinsics::_getBoolean_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_BOOLEAN, false);
-    case vmIntrinsics::_getByte_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_BYTE, false);
-    case vmIntrinsics::_getShort_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_SHORT, false);
-    case vmIntrinsics::_getChar_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_CHAR, false);
-    case vmIntrinsics::_getInt_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_INT, false);
-    case vmIntrinsics::_getLong_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_LONG, false);
-    case vmIntrinsics::_getFloat_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_FLOAT, false);
-    case vmIntrinsics::_getDouble_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, !is_store, T_DOUBLE, false);
-
-    case vmIntrinsics::_putObject_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_OBJECT, false);
-    case vmIntrinsics::_putBoolean_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_BOOLEAN, false);
-    case vmIntrinsics::_putByte_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_BYTE, false);
-    case vmIntrinsics::_putShort_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_SHORT, false);
-    case vmIntrinsics::_putChar_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_CHAR, false);
-    case vmIntrinsics::_putInt_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_INT, false);
-    case vmIntrinsics::_putLong_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_LONG, false);
-    case vmIntrinsics::_putFloat_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_FLOAT, false);
-    case vmIntrinsics::_putDouble_obj32:
-      return inline_unsafe_access(!is_native_ptr, !offset_is_long, is_store, T_DOUBLE, false);
-
-    case vmIntrinsics::_getObject_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_OBJECT, false);
-    case vmIntrinsics::_getBoolean_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_BOOLEAN, false);
-    case vmIntrinsics::_getByte_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_BYTE, false);
-    case vmIntrinsics::_getShort_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_SHORT, false);
-    case vmIntrinsics::_getChar_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_CHAR, false);
-    case vmIntrinsics::_getInt_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_INT, false);
-    case vmIntrinsics::_getLong_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_LONG, false);
-    case vmIntrinsics::_getFloat_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_FLOAT, false);
-    case vmIntrinsics::_getDouble_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_DOUBLE, false);
-
-    case vmIntrinsics::_putObject_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_OBJECT, false);
-    case vmIntrinsics::_putBoolean_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_BOOLEAN, false);
-    case vmIntrinsics::_putByte_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_BYTE, false);
-    case vmIntrinsics::_putShort_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_SHORT, false);
-    case vmIntrinsics::_putChar_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_CHAR, false);
-    case vmIntrinsics::_putInt_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_INT, false);
-    case vmIntrinsics::_putLong_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_LONG, false);
-    case vmIntrinsics::_putFloat_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_FLOAT, false);
-    case vmIntrinsics::_putDouble_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_DOUBLE, false);
-
-    case vmIntrinsics::_getByte_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_BYTE, false);
-    case vmIntrinsics::_getShort_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_SHORT, false);
-    case vmIntrinsics::_getChar_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_CHAR, false);
-    case vmIntrinsics::_getInt_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_INT, false);
-    case vmIntrinsics::_getLong_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_LONG, false);
-    case vmIntrinsics::_getFloat_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_FLOAT, false);
-    case vmIntrinsics::_getDouble_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_DOUBLE, false);
-    case vmIntrinsics::_getAddress_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, !is_store, T_ADDRESS, false);
-
-    case vmIntrinsics::_putByte_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_BYTE, false);
-    case vmIntrinsics::_putShort_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_SHORT, false);
-    case vmIntrinsics::_putChar_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_CHAR, false);
-    case vmIntrinsics::_putInt_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_INT, false);
-    case vmIntrinsics::_putLong_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_LONG, false);
-    case vmIntrinsics::_putFloat_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_FLOAT, false);
-    case vmIntrinsics::_putDouble_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_DOUBLE, false);
-    case vmIntrinsics::_putAddress_raw:
-      return inline_unsafe_access(is_native_ptr, !offset_is_long, is_store, T_ADDRESS, false);
-
-    case vmIntrinsics::_getObjectVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_OBJECT, true);
-    case vmIntrinsics::_getBooleanVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_BOOLEAN, true);
-    case vmIntrinsics::_getByteVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_BYTE, true);
-    case vmIntrinsics::_getShortVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_SHORT, true);
-    case vmIntrinsics::_getCharVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_CHAR, true);
-    case vmIntrinsics::_getIntVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_INT, true);
-    case vmIntrinsics::_getLongVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_LONG, true);
-    case vmIntrinsics::_getFloatVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_FLOAT, true);
-    case vmIntrinsics::_getDoubleVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, !is_store, T_DOUBLE, true);
-
-    case vmIntrinsics::_putObjectVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_OBJECT, true);
-    case vmIntrinsics::_putBooleanVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_BOOLEAN, true);
-    case vmIntrinsics::_putByteVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_BYTE, true);
-    case vmIntrinsics::_putShortVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_SHORT, true);
-    case vmIntrinsics::_putCharVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_CHAR, true);
-    case vmIntrinsics::_putIntVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_INT, true);
-    case vmIntrinsics::_putLongVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_LONG, true);
-    case vmIntrinsics::_putFloatVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_FLOAT, true);
-    case vmIntrinsics::_putDoubleVolatile_obj:
-      return inline_unsafe_access(!is_native_ptr, offset_is_long, is_store, T_DOUBLE, true);
-
-    case vmIntrinsics::_prefetchRead:
-      return inline_unsafe_prefetch(!is_native_ptr, offset_is_long, !is_store, !is_static);
-    case vmIntrinsics::_prefetchWrite:
-      return inline_unsafe_prefetch(!is_native_ptr, offset_is_long, is_store, !is_static);
-    case vmIntrinsics::_prefetchReadStatic:
-      return inline_unsafe_prefetch(!is_native_ptr, offset_is_long, !is_store, is_static);
-    case vmIntrinsics::_prefetchWriteStatic:
-      return inline_unsafe_prefetch(!is_native_ptr, offset_is_long, is_store, is_static);
-
-    case vmIntrinsics::_compareAndSwapObject_obj:
-      return inline_unsafe_CAS(T_OBJECT);
-    case vmIntrinsics::_compareAndSwapInt_obj:
-      return inline_unsafe_CAS(T_INT);
-    case vmIntrinsics::_compareAndSwapLong_obj:
-      return inline_unsafe_CAS(T_LONG);
-
-    case vmIntrinsics::_putOrderedObject_obj:
-      return inline_unsafe_ordered_store(T_OBJECT);
-    case vmIntrinsics::_putOrderedInt_obj:
-      return inline_unsafe_ordered_store(T_INT);
-    case vmIntrinsics::_putOrderedLong_obj:
-      return inline_unsafe_ordered_store(T_LONG);
-
-    case vmIntrinsics::_currentThread:
-      return inline_native_currentThread();
-    case vmIntrinsics::_isInterrupted:
-      return inline_native_isInterrupted();
-
-    case vmIntrinsics::_currentTimeMillis:
-      return inline_native_time_funcs(false);
-    case vmIntrinsics::_nanoTime:
-      return inline_native_time_funcs(true);
-    case vmIntrinsics::_allocateInstance:
-      return inline_unsafe_allocate();
-
-    case vmIntrinsics::_isAssignableFrom:
-      return inline_native_subtype_check();
-
-    case vmIntrinsics::_isInstance:
-    case vmIntrinsics::_getModifiers:
-    case vmIntrinsics::_getClassAccessFlags:
-      return inline_native_Class_query(iid);
-
-    case vmIntrinsics::_floatToRawIntBits:
-    case vmIntrinsics::_floatToIntBits:
-    case vmIntrinsics::_intBitsToFloat:
-    case vmIntrinsics::_doubleToRawLongBits:
-    case vmIntrinsics::_doubleToLongBits:
-    case vmIntrinsics::_longBitsToDouble:
-      return inline_fp_conversions(iid);
-
-    case vmIntrinsics::_reverseBytes_i:
-    case vmIntrinsics::_reverseBytes_l:
-      return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
-
-    default:
-      // If you get here, it may be that someone has added a new intrinsic
-      // to the list in vmSymbols.hpp without implementing it here.
+
+  switch (intrinsic_id()) {
+  case vmIntrinsics::_hashCode:
+    return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
+  case vmIntrinsics::_identityHashCode:
+    return inline_native_hashcode(/*!virtual*/ false, is_static);
+  case vmIntrinsics::_getClass:
+    return inline_native_getClass();
+
+  case vmIntrinsics::_dsin:
+  case vmIntrinsics::_dcos:
+  case vmIntrinsics::_dtan:
+  case vmIntrinsics::_dabs:
+  case vmIntrinsics::_datan2:
+  case vmIntrinsics::_dsqrt:
+  case vmIntrinsics::_dexp:
+  case vmIntrinsics::_dlog:
+  case vmIntrinsics::_dlog10:
+  case vmIntrinsics::_dpow:
+    return inline_math_native(intrinsic_id());
+
+  case vmIntrinsics::_min:
+  case vmIntrinsics::_max:
+    return inline_min_max(intrinsic_id());
+
+  case vmIntrinsics::_arraycopy:
+    return inline_arraycopy();
+
+  case vmIntrinsics::_compareTo:
+    return inline_string_compareTo();
+  case vmIntrinsics::_indexOf:
+    return inline_string_indexOf();
+
+  case vmIntrinsics::_getObject:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, false);
+  case vmIntrinsics::_getBoolean:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, false);
+  case vmIntrinsics::_getByte:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, false);
+  case vmIntrinsics::_getShort:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, false);
+  case vmIntrinsics::_getChar:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, false);
+  case vmIntrinsics::_getInt:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, false);
+  case vmIntrinsics::_getLong:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, false);
+  case vmIntrinsics::_getFloat:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, false);
+  case vmIntrinsics::_getDouble:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, false);
+
+  case vmIntrinsics::_putObject:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, false);
+  case vmIntrinsics::_putBoolean:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, false);
+  case vmIntrinsics::_putByte:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, false);
+  case vmIntrinsics::_putShort:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, false);
+  case vmIntrinsics::_putChar:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, false);
+  case vmIntrinsics::_putInt:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_INT, false);
+  case vmIntrinsics::_putLong:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, false);
+  case vmIntrinsics::_putFloat:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, false);
+  case vmIntrinsics::_putDouble:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, false);
+
+  case vmIntrinsics::_getByte_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_BYTE, false);
+  case vmIntrinsics::_getShort_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_SHORT, false);
+  case vmIntrinsics::_getChar_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_CHAR, false);
+  case vmIntrinsics::_getInt_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_INT, false);
+  case vmIntrinsics::_getLong_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_LONG, false);
+  case vmIntrinsics::_getFloat_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_FLOAT, false);
+  case vmIntrinsics::_getDouble_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_DOUBLE, false);
+  case vmIntrinsics::_getAddress_raw:
+    return inline_unsafe_access(is_native_ptr, !is_store, T_ADDRESS, false);
+
+  case vmIntrinsics::_putByte_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_BYTE, false);
+  case vmIntrinsics::_putShort_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_SHORT, false);
+  case vmIntrinsics::_putChar_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_CHAR, false);
+  case vmIntrinsics::_putInt_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_INT, false);
+  case vmIntrinsics::_putLong_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_LONG, false);
+  case vmIntrinsics::_putFloat_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_FLOAT, false);
+  case vmIntrinsics::_putDouble_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_DOUBLE, false);
+  case vmIntrinsics::_putAddress_raw:
+    return inline_unsafe_access(is_native_ptr, is_store, T_ADDRESS, false);
+
+  case vmIntrinsics::_getObjectVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, true);
+  case vmIntrinsics::_getBooleanVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, true);
+  case vmIntrinsics::_getByteVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, true);
+  case vmIntrinsics::_getShortVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, true);
+  case vmIntrinsics::_getCharVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, true);
+  case vmIntrinsics::_getIntVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, true);
+  case vmIntrinsics::_getLongVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, true);
+  case vmIntrinsics::_getFloatVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, true);
+  case vmIntrinsics::_getDoubleVolatile:
+    return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, true);
+
+  case vmIntrinsics::_putObjectVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, true);
+  case vmIntrinsics::_putBooleanVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, true);
+  case vmIntrinsics::_putByteVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, true);
+  case vmIntrinsics::_putShortVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, true);
+  case vmIntrinsics::_putCharVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true);
+  case vmIntrinsics::_putIntVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true);
+  case vmIntrinsics::_putLongVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true);
+  case vmIntrinsics::_putFloatVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true);
+  case vmIntrinsics::_putDoubleVolatile:
+    return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true);
+
+  case vmIntrinsics::_prefetchRead:
+    return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
+  case vmIntrinsics::_prefetchWrite:
+    return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
+  case vmIntrinsics::_prefetchReadStatic:
+    return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
+  case vmIntrinsics::_prefetchWriteStatic:
+    return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
+
+  case vmIntrinsics::_compareAndSwapObject:
+    return inline_unsafe_CAS(T_OBJECT);
+  case vmIntrinsics::_compareAndSwapInt:
+    return inline_unsafe_CAS(T_INT);
+  case vmIntrinsics::_compareAndSwapLong:
+    return inline_unsafe_CAS(T_LONG);
+
+  case vmIntrinsics::_putOrderedObject:
+    return inline_unsafe_ordered_store(T_OBJECT);
+  case vmIntrinsics::_putOrderedInt:
+    return inline_unsafe_ordered_store(T_INT);
+  case vmIntrinsics::_putOrderedLong:
+    return inline_unsafe_ordered_store(T_LONG);
+
+  case vmIntrinsics::_currentThread:
+    return inline_native_currentThread();
+  case vmIntrinsics::_isInterrupted:
+    return inline_native_isInterrupted();
+
+  case vmIntrinsics::_currentTimeMillis:
+    return inline_native_time_funcs(false);
+  case vmIntrinsics::_nanoTime:
+    return inline_native_time_funcs(true);
+  case vmIntrinsics::_allocateInstance:
+    return inline_unsafe_allocate();
+  case vmIntrinsics::_copyMemory:
+    return inline_unsafe_copyMemory();
+  case vmIntrinsics::_newArray:
+    return inline_native_newArray();
+  case vmIntrinsics::_getLength:
+    return inline_native_getLength();
+  case vmIntrinsics::_copyOf:
+    return inline_array_copyOf(false);
+  case vmIntrinsics::_copyOfRange:
+    return inline_array_copyOf(true);
+  case vmIntrinsics::_clone:
+    return inline_native_clone(intrinsic()->is_virtual());
+
+  case vmIntrinsics::_isAssignableFrom:
+    return inline_native_subtype_check();
+
+  case vmIntrinsics::_isInstance:
+  case vmIntrinsics::_getModifiers:
+  case vmIntrinsics::_isInterface:
+  case vmIntrinsics::_isArray:
+  case vmIntrinsics::_isPrimitive:
+  case vmIntrinsics::_getSuperclass:
+  case vmIntrinsics::_getComponentType:
+  case vmIntrinsics::_getClassAccessFlags:
+    return inline_native_Class_query(intrinsic_id());
+
+  case vmIntrinsics::_floatToRawIntBits:
+  case vmIntrinsics::_floatToIntBits:
+  case vmIntrinsics::_intBitsToFloat:
+  case vmIntrinsics::_doubleToRawLongBits:
+  case vmIntrinsics::_doubleToLongBits:
+  case vmIntrinsics::_longBitsToDouble:
+    return inline_fp_conversions(intrinsic_id());
+
+  case vmIntrinsics::_reverseBytes_i:
+  case vmIntrinsics::_reverseBytes_l:
+    return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
+
+  case vmIntrinsics::_get_AtomicLong:
+    return inline_native_AtomicLong_get();
+  case vmIntrinsics::_attemptUpdate:
+    return inline_native_AtomicLong_attemptUpdate();
+
+  case vmIntrinsics::_getCallerClass:
+    return inline_native_Reflection_getCallerClass();
+
+  default:
+    // If you get here, it may be that someone has added a new intrinsic
+    // to the list in vmSymbols.hpp without implementing it here.
 #ifndef PRODUCT
-      if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
-        tty->print_cr("*** Warning: Unimplemented intrinsic %d", iid);
-      }
-#endif
-      return false;
+    if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
+      tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
+                    vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
     }
-  } else {
-    // C2_IntrinsicId
-    switch (intrinsic_id()) {
-    case LibraryIntrinsic::_Array_newInstance:
-      return inline_native_Array_newInstance();
-
-    case LibraryIntrinsic::_getCallerClass:
-      return inline_native_Reflection_getCallerClass();
-
-    case LibraryIntrinsic::_AtomicLong_get:
-      return inline_native_AtomicLong_get();
-    case LibraryIntrinsic::_AtomicLong_attemptUpdate:
-      return inline_native_AtomicLong_attemptUpdate();
-
-    default:
-      // If you get here, it may be that someone has added a new intrinsic
-      // to the list in vmSymbols.hpp without implementing it here.
-#ifndef PRODUCT
-      if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
-        tty->print_cr("*** Warning: Unimplemented intrinsic %d", intrinsic_id());
-      }
 #endif
-      return false;
-    }
+    return false;
   }
 }
 
@@ -653,8 +644,7 @@
     return NULL;
   }
 
-  IfNode* iff = new (C, 2) IfNode( control(), test, true_prob, COUNT_UNKNOWN );
-  _gvn.set_type(iff, iff->Value(&_gvn));
+  IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
 
   Node* if_slow = _gvn.transform( new (C, 1) IfTrueNode(iff) );
   if (if_slow == top()) {
@@ -681,7 +671,7 @@
 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region) {
   if (stopped())
     return NULL;                // already stopped
-  if (_gvn.type(index)->higher_equal(TypeInt::POS))
+  if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
     return NULL;                // index is already adequately typed
   Node* cmp_lt = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) );
   Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) );
@@ -689,6 +679,18 @@
   return is_neg;
 }
 
+inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative) {
+  if (stopped())
+    return NULL;                // already stopped
+  if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
+    return NULL;                // index is already adequately typed
+  Node* cmp_le = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) );
+  BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
+  Node* bol_le = _gvn.transform( new (C, 2) BoolNode(cmp_le, le_or_eq) );
+  Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
+  return is_notp;
+}
+
 // Make sure that 'position' is a valid limit index, in [0..length].
 // There are two equivalent plans for checking this:
 //   A. (offset + copyLength)  unsigned<=  arrayLength
@@ -1009,7 +1011,7 @@
 //------------------------------inline_trig----------------------------------
 // Inline sin/cos/tan instructions, if possible.  If rounding is required, do
 // argument reduction which will turn into a fast/slow diamond.
-bool LibraryCallKit::inline_trig( vmIntrinsics::ID id ) {
+bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
   _sp += arg_size();            // restore stack pointer
   Node* arg = pop_math_arg();
   Node* trig = NULL;
@@ -1122,7 +1124,7 @@
 
 //------------------------------inline_sqrt-------------------------------------
 // Inline square root instruction, if possible.
-bool LibraryCallKit::inline_sqrt( vmIntrinsics::ID id ) {
+bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) {
   assert(id == vmIntrinsics::_dsqrt, "Not square root");
   _sp += arg_size();        // restore stack pointer
   push_pair(_gvn.transform(new (C, 2) SqrtDNode(0, pop_math_arg())));
@@ -1131,7 +1133,7 @@
 
 //------------------------------inline_abs-------------------------------------
 // Inline absolute value instruction, if possible.
-bool LibraryCallKit::inline_abs( vmIntrinsics::ID id ) {
+bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) {
   assert(id == vmIntrinsics::_dabs, "Not absolute value");
   _sp += arg_size();        // restore stack pointer
   push_pair(_gvn.transform(new (C, 2) AbsDNode(pop_math_arg())));
@@ -1141,7 +1143,7 @@
 //------------------------------inline_exp-------------------------------------
 // Inline exp instructions, if possible.  The Intel hardware only misses
 // really odd corner cases (+/- Infinity).  Just uncommon-trap them.
-bool LibraryCallKit::inline_exp( vmIntrinsics::ID id ) {
+bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) {
   assert(id == vmIntrinsics::_dexp, "Not exp");
 
   // If this inlining ever returned NaN in the past, we do not intrinsify it
@@ -1180,7 +1182,7 @@
 
 //------------------------------inline_pow-------------------------------------
 // Inline power instructions, if possible.  
-bool LibraryCallKit::inline_pow( vmIntrinsics::ID id ) {
+bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
   assert(id == vmIntrinsics::_dpow, "Not pow");
 
   // If this inlining ever returned NaN in the past, we do not intrinsify it
@@ -1322,7 +1324,7 @@
 //------------------------------inline_trans-------------------------------------
 // Inline transcendental instructions, if possible.  The Intel hardware gets 
 // these right, no funny corner cases missed.
-bool LibraryCallKit::inline_trans( vmIntrinsics::ID id ) {
+bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) {
   _sp += arg_size();        // restore stack pointer
   Node* arg = pop_math_arg();
   Node* trans = NULL;
@@ -1414,6 +1416,193 @@
   }
 }
 
+static bool is_simple_name(Node* n) {
+  return (n->req() == 1         // constant
+          || (n->is_Type() && n->as_Type()->type()->singleton())
+          || n->is_Proj()       // parameter or return value
+          || n->is_Phi()        // local of some sort
+          );
+}
+
+//----------------------------inline_min_max-----------------------------------
+bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
+  push(generate_min_max(id, argument(0), argument(1)));
+
+  return true;
+}
+
+Node*
+LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
+  // These are the candidate return value:
+  Node* xvalue = x0;
+  Node* yvalue = y0;
+
+  if (xvalue == yvalue) {
+    return xvalue;
+  }
+
+  bool want_max = (id == vmIntrinsics::_max);
+
+  const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
+  const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
+  if (txvalue == NULL || tyvalue == NULL)  return top();
+  // This is not really necessary, but it is consistent with a
+  // hypothetical MaxINode::Value method:
+  int widen = MAX2(txvalue->_widen, tyvalue->_widen);
+
+  // %%% This folding logic should (ideally) be in a different place.
+  // Some should be inside IfNode, and there to be a more reliable
+  // transformation of ?: style patterns into cmoves.  We also want
+  // more powerful optimizations around cmove and min/max.
+
+  // Try to find a dominating comparison of these guys.
+  // It can simplify the index computation for Arrays.copyOf
+  // and similar uses of System.arraycopy.
+  // First, compute the normalized version of CmpI(x, y).
+  int   cmp_op = Op_CmpI;
+  Node* xkey = xvalue;
+  Node* ykey = yvalue;
+  Node* ideal_cmpxy = _gvn.transform( new(C, 3) CmpINode(xkey, ykey) );
+  if (ideal_cmpxy->is_Cmp()) {
+    // E.g., if we have CmpI(length - offset, count),
+    // it might idealize to CmpI(length, count + offset)
+    cmp_op = ideal_cmpxy->Opcode();
+    xkey = ideal_cmpxy->in(1);
+    ykey = ideal_cmpxy->in(2);
+  }
+
+  // Start by locating any relevant comparisons.
+  Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
+  Node* cmpxy = NULL;
+  Node* cmpyx = NULL;
+  for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
+    Node* cmp = start_from->fast_out(k);
+    if (cmp->outcnt() > 0 &&            // must have prior uses
+        cmp->in(0) == NULL &&           // must be context-independent
+        cmp->Opcode() == cmp_op) {      // right kind of compare
+      if (cmp->in(1) == xkey && cmp->in(2) == ykey)  cmpxy = cmp;
+      if (cmp->in(1) == ykey && cmp->in(2) == xkey)  cmpyx = cmp;
+    }
+  }
+
+  const int NCMPS = 2;
+  Node* cmps[NCMPS] = { cmpxy, cmpyx };
+  int cmpn;
+  for (cmpn = 0; cmpn < NCMPS; cmpn++) {
+    if (cmps[cmpn] != NULL)  break;     // find a result
+  }
+  if (cmpn < NCMPS) {
+    // Look for a dominating test that tells us the min and max.
+    int depth = 0;                // Limit search depth for speed
+    Node* dom = control();
+    for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
+      if (++depth >= 100)  break;
+      Node* ifproj = dom;
+      if (!ifproj->is_Proj())  continue;
+      Node* iff = ifproj->in(0);
+      if (!iff->is_If())  continue;
+      Node* bol = iff->in(1);
+      if (!bol->is_Bool())  continue;
+      Node* cmp = bol->in(1);
+      if (cmp == NULL)  continue;
+      for (cmpn = 0; cmpn < NCMPS; cmpn++)
+        if (cmps[cmpn] == cmp)  break;
+      if (cmpn == NCMPS)  continue;
+      BoolTest::mask btest = bol->as_Bool()->_test._test;
+      if (ifproj->is_IfFalse())  btest = BoolTest(btest).negate();
+      if (cmp->in(1) == ykey)    btest = BoolTest(btest).commute();
+      // At this point, we know that 'x btest y' is true.
+      switch (btest) {
+      case BoolTest::eq:
+        // They are proven equal, so we can collapse the min/max.
+        // Either value is the answer.  Choose the simpler.
+        if (is_simple_name(yvalue) && !is_simple_name(xvalue))
+          return yvalue;
+        return xvalue;
+      case BoolTest::lt:          // x < y
+      case BoolTest::le:          // x <= y
+        return (want_max ? yvalue : xvalue);
+      case BoolTest::gt:          // x > y
+      case BoolTest::ge:          // x >= y
+        return (want_max ? xvalue : yvalue);
+      }
+    }
+  }
+
+  // We failed to find a dominating test.
+  // Let's pick a test that might GVN with prior tests.
+  Node*          best_bol   = NULL;
+  BoolTest::mask best_btest = BoolTest::illegal;
+  for (cmpn = 0; cmpn < NCMPS; cmpn++) {
+    Node* cmp = cmps[cmpn];
+    if (cmp == NULL)  continue;
+    for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
+      Node* bol = cmp->fast_out(j);
+      if (!bol->is_Bool())  continue;
+      BoolTest::mask btest = bol->as_Bool()->_test._test;
+      if (btest == BoolTest::eq || btest == BoolTest::ne)  continue;
+      if (cmp->in(1) == ykey)   btest = BoolTest(btest).commute();
+      if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
+        best_bol   = bol->as_Bool();
+        best_btest = btest;
+      }
+    }
+  }
+
+  Node* answer_if_true  = NULL;
+  Node* answer_if_false = NULL;
+  switch (best_btest) {
+  default:
+    if (cmpxy == NULL)
+      cmpxy = ideal_cmpxy;
+    best_bol = _gvn.transform( new(C, 2) BoolNode(cmpxy, BoolTest::lt) );
+    // and fall through:
+  case BoolTest::lt:          // x < y
+  case BoolTest::le:          // x <= y
+    answer_if_true  = (want_max ? yvalue : xvalue);
+    answer_if_false = (want_max ? xvalue : yvalue);
+    break;
+  case BoolTest::gt:          // x > y
+  case BoolTest::ge:          // x >= y
+    answer_if_true  = (want_max ? xvalue : yvalue);
+    answer_if_false = (want_max ? yvalue : xvalue);
+    break;
+  }
+
+  jint hi, lo;
+  if (want_max) {
+    // We can sharpen the minimum.
+    hi = MAX2(txvalue->_hi, tyvalue->_hi);
+    lo = MAX2(txvalue->_lo, tyvalue->_lo);
+  } else {
+    // We can sharpen the maximum.
+    hi = MIN2(txvalue->_hi, tyvalue->_hi);
+    lo = MIN2(txvalue->_lo, tyvalue->_lo);
+  }
+
+  // Use a flow-free graph structure, to avoid creating excess control edges
+  // which could hinder other optimizations.
+  Node* cmov = CMoveNode::make(C, NULL, best_bol,
+                               answer_if_false, answer_if_true,
+                               TypeInt::make(lo, hi, widen));
+
+  return _gvn.transform(cmov);
+
+  /*
+  // This is not as desirable as it may seem, since Min and Max
+  // nodes do not have a full set of optimizations.
+  // And they would interfere, anyway, with 'if' optimizations
+  // and with CMoveI canonical forms.
+  switch (id) {
+  case vmIntrinsics::_min:
+    result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
+  case vmIntrinsics::_max:
+    result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
+  default:
+    ShouldNotReachHere();
+  }
+  */
+}
 
 inline int
 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
@@ -1487,30 +1676,25 @@
 // Interpret Unsafe.fieldOffset cookies correctly:
 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
 
-bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool offset_is_long, bool is_store, BasicType type, bool is_volatile) {
+bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
   if (callee()->is_static())  return false;  // caller must have the capability!
 
 #ifndef PRODUCT
   {
     ResourceMark rm;
-    // Check the signatures, and/or print a cheerful message.
+    // Check the signatures.
     ciSignature* sig = signature();
-    const char* name = callee()->name()->as_utf8();
-
-    if (PrintInlining || PrintOptoInlining)
-      tty->print_cr("Inlining Unsafe.%s%s", name, sig->as_symbol()->as_utf8());
-
 #ifdef ASSERT
     if (!is_store) {
       // Object getObject(Object base, int/long offset), etc.
       BasicType rtype = sig->return_type()->basic_type();
-      if (rtype == T_ADDRESS_HOLDER && 0 == strcmp(name, "getAddress"))
+      if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
           rtype = T_ADDRESS;  // it is really a C void*
       assert(rtype == type, "getter must return the expected value");
       if (!is_native_ptr) {
         assert(sig->count() == 2, "oop getter has 2 arguments");
         assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
-        assert(sig->type_at(1)->basic_type() == (offset_is_long ? T_LONG : T_INT), "getter offset is correct");
+        assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
       } else {
         assert(sig->count() == 1, "native getter has 1 argument");
         assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
@@ -1521,13 +1705,13 @@
       if (!is_native_ptr) {
         assert(sig->count() == 3, "oop putter has 3 arguments");
         assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
-        assert(sig->type_at(1)->basic_type() == (offset_is_long ? T_LONG : T_INT), "putter offset is correct");
+        assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
       } else {
         assert(sig->count() == 2, "native putter has 2 arguments");
         assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
       }
       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
-      if (vtype == T_ADDRESS_HOLDER && 0 == strcmp(name, "putAddress"))
+      if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
         vtype = T_ADDRESS;  // it is really a C void*
       assert(vtype == type, "putter must accept the expected value");
     }
@@ -1540,7 +1724,7 @@
   int type_words = type2size[ (type == T_ADDRESS) ? T_LONG : type ];
 
   // Argument words:  "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words
-  int nargs = 1 + (is_native_ptr ? 2 : offset_is_long ? 3 : 2) + (is_store ? type_words : 0);
+  int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0);
 
   debug_only(int saved_sp = _sp);
   _sp += nargs;
@@ -1566,21 +1750,16 @@
   Node *heap_base_oop = top();
   if (!is_native_ptr) {
     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
-    Node *offset   = offset_is_long ? pop_pair() : pop();
+    Node* offset = pop_pair();
     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
-    Node *base     = pop();
+    Node* base   = pop();
     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
     // to be plain byte offsets, which are also the same as those accepted
     // by oopDesc::field_base.
     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
            "fieldOffset must be byte-scaled");
-    if (offset_is_long) {
-      // 32-bit machines ignore the high half!
-      offset = ConvL2X(offset);
-    } else {
-      // 64-bit machines require ConXNode to be long.
-      offset = ConvI2X(offset);
-    }
+    // 32-bit machines ignore the high half!
+    offset = ConvL2X(offset);
     adr = make_unsafe_address(base, offset);
     heap_base_oop = base;
   } else {
@@ -1641,7 +1820,7 @@
       value_type = tjp;
 
 #ifndef PRODUCT
-      if (PrintInlining || PrintOptoInlining) {
+      if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
         tty->print("  from base type:  ");   adr_type->dump();
         tty->print("  sharpened value: "); value_type->dump();
       }
@@ -1774,24 +1953,19 @@
 
 //----------------------------inline_unsafe_prefetch----------------------------
 
-bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool offset_is_long, bool is_store, bool is_static) {
+bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
 #ifndef PRODUCT
   {
     ResourceMark rm;
-    // Check the signatures, and/or print a cheerful message.
+    // Check the signatures.
     ciSignature* sig = signature();
-    const char* name = callee()->name()->as_utf8();
-
-    if (PrintInlining || PrintOptoInlining) {
-      tty->print_cr("Inlining Unsafe.%s%s", name, sig->as_symbol()->as_utf8());
-    }
 #ifdef ASSERT
     // Object getObject(Object base, int/long offset), etc.
     BasicType rtype = sig->return_type()->basic_type();
     if (!is_native_ptr) {
       assert(sig->count() == 2, "oop prefetch has 2 arguments");
       assert(sig->type_at(0)->basic_type() == T_OBJECT, "prefetch base is object");
-      assert(sig->type_at(1)->basic_type() == (offset_is_long ? T_LONG : T_INT), "prefetcha offset is correct");
+      assert(sig->type_at(1)->basic_type() == T_LONG, "prefetcha offset is correct");
     } else {
       assert(sig->count() == 1, "native prefetch has 1 argument");
       assert(sig->type_at(0)->basic_type() == T_LONG, "prefetch base is long");
@@ -1803,7 +1977,7 @@
   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
 
   // Argument words:  "this" if not static, plus (oop/offset) or (lo/hi) args
-  int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : offset_is_long ? 3 : 2);
+  int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : 3);
 
   debug_only(int saved_sp = _sp);
   _sp += nargs;
@@ -1812,21 +1986,16 @@
   Node *adr;
   if (!is_native_ptr) {
     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
-    Node *offset   = offset_is_long ? pop_pair() : pop();
+    Node* offset = pop_pair();
     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
-    Node *base     = pop();
+    Node* base   = pop();
     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
     // to be plain byte offsets, which are also the same as those accepted
     // by oopDesc::field_base.
     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
            "fieldOffset must be byte-scaled");
-    if (offset_is_long) {
-      // 32-bit machines ignore the high half!
-      offset = ConvL2X(offset);
-    } else {
-      // 64-bit machines require ConXNode to be long.
-      offset = ConvI2X(offset);
-    }
+    // 32-bit machines ignore the high half!
+    offset = ConvL2X(offset);
     adr = make_unsafe_address(base, offset);
   } else {
     Node* ptr = pop_pair();
@@ -1881,13 +2050,8 @@
 #ifndef PRODUCT
   {
     ResourceMark rm;
-    // Check the signatures, and/or print a cheerful message.
+    // Check the signatures.
     ciSignature* sig = signature();
-    const char* name = callee()->name()->as_utf8();
-
-    if (PrintInlining || PrintOptoInlining)
-      tty->print_cr("Inlining Unsafe.%s%s", name, sig->as_symbol()->as_utf8());
-
 #ifdef ASSERT
     BasicType rtype = sig->return_type()->basic_type();
     assert(rtype == T_BOOLEAN, "CAS must return boolean");
@@ -2003,13 +2167,8 @@
 #ifndef PRODUCT
   {
     ResourceMark rm;
-    // Check the signatures, and/or print a cheerful message.
+    // Check the signatures.
     ciSignature* sig = signature();
-    const char* name = callee()->name()->as_utf8();
-
-    if (PrintInlining || PrintOptoInlining)
-      tty->print_cr("Inlining Unsafe.%s%s", name, sig->as_symbol()->as_utf8());
-
 #ifdef ASSERT
     BasicType rtype = sig->return_type()->basic_type();
     assert(rtype == T_VOID, "must return void");
@@ -2068,52 +2227,30 @@
 
 bool LibraryCallKit::inline_unsafe_allocate() {
   if (callee()->is_static())  return false;  // caller must have the capability!
-
-  // Object allocateInstance(Class cls).
-  // Argument words:  "this" plus 1 class argument
   int nargs = 1 + 1;
-  assert(signature()->count() == nargs-1, "alloc has 1 argument");
-
-  debug_only(int saved_sp = _sp);
-  _sp += nargs;
-
-  Node *cls = pop();
-
-  // Pop receiver last:  it was pushed first.
-  Node *receiver = pop();
-
-  assert(saved_sp == _sp, "must have correct argument count");
-
-  const TypeInstPtr *cls_mirror = _gvn.type(cls)->isa_instptr();
-
-  ciType* k = !cls_mirror ? NULL : cls_mirror->java_mirror_type();
-  if( k == NULL || !k->is_instance_klass() )  return false;
-
-  ciInstanceKlass* ik = k->as_instance_klass();
-  if( !ik->is_initialized() )  return false;
-  if( ik->is_abstract() || ik->is_interface() ||
-      ik->name() == ciSymbol::java_lang_Class() )
-    return false;
-
-  if (PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
-    tty->print("Inlining Unsafe.allocateInstance on constant operand ");
-    ik->print_name();
-    tty->cr();
-  }
-
-  // Null check on self without removing any arguments.  The argument
-  // null check technically happens in the wrong place, which can lead to
-  // invalid stack traces when the primitive is inlined into a method
-  // which handles NullPointerExceptions.
-  _sp += nargs;
-  do_null_check(receiver, T_OBJECT);
+  assert(signature()->size() == nargs-1, "alloc has 1 argument");
+  null_check_receiver(callee());  // check then ignore argument(0)
+  _sp += nargs;  // set original stack for use by uncommon_trap
+  Node* cls = do_null_check(argument(1), T_OBJECT);
+  _sp -= nargs;
+  if (stopped())  return true;
+
+  Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
+  _sp += nargs;  // set original stack for use by uncommon_trap
+  kls = do_null_check(kls, T_OBJECT);
   _sp -= nargs;
-  if (stopped()) {
-    return true;
-  }
-
-  Node* kls = makecon(TypeKlassPtr::make(ik));
-  Node* obj = new_instance(kls);
+  if (stopped())  return true;  // argument was like int.class
+
+  // Note:  The argument might still be an illegal value like
+  // Serializable.class or Object[].class.   The runtime will handle it.
+  // But we must make an explicit check for initialization.
+  Node* insp = basic_plus_adr(kls, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc));
+  Node* inst = make_load(NULL, insp, TypeInt::INT, T_INT);
+  Node* bits = intcon(instanceKlass::fully_initialized);
+  Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) );
+  // The 'test' is non-zero if we need to take a slow path.
+
+  Node* obj = new_instance(kls, test);
   push(obj);
 
   return true;
@@ -2222,7 +2359,7 @@
     result_val->init_req(slow_result_path, top());
   } else {
     // non-virtual because it is a private non-static
-    CallJavaNode* slow_call = generate_method_call(false, false);
+    CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
 
     Node* slow_val = set_results_for_java_call(slow_call);
     // this->control() comes from set_results_for_java_call
@@ -2288,13 +2425,28 @@
   return kls;
 }
 
+//--------------------(inline_native_Class_query helpers)---------------------
+// Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
+// Fall through if (mods & mask) == bits, take the guard otherwise.
+Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
+  // Branch around if the given klass has the given modifier bit set.
+  // Like generate_guard, adds a new path onto the region.
+  Node* modp = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
+  Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
+  Node* mask = intcon(modifier_mask);
+  Node* bits = intcon(modifier_bits);
+  Node* mbit = _gvn.transform( new (C, 3) AndINode(mods, mask) );
+  Node* cmp  = _gvn.transform( new (C, 3) CmpINode(mbit, bits) );
+  Node* bol  = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
+  return generate_fair_guard(bol, region);
+}
+Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
+  return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
+}
+
 //-------------------------inline_native_Class_query-------------------
 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
-  // other candidates for Class query intrinsics: isInterface,
-  //     isArray, isPrimitive, getName, getSuperclass, getComponentType
-  NOT_PRODUCT(const char* iname = NULL);
   int nargs = 1+0;  // just the Class mirror, in most cases
-  bool null_check_obj = false;
   const Type* return_type = TypeInt::BOOL;
   Node* prim_return_value = top();  // what happens if it's a primitive class?
   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
@@ -2307,17 +2459,33 @@
     nargs = 1+1;  // the Class mirror, plus the object getting queried about
     // nothing is an instance of a primitive type
     prim_return_value = intcon(0);
-    NOT_PRODUCT(iname = "Class.isInstance");
     break;
   case vmIntrinsics::_getModifiers:
     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
     assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
     return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
-    NOT_PRODUCT(iname = "Class.getModifiers");
+    break;
+  case vmIntrinsics::_isInterface:
+    prim_return_value = intcon(0);
+    break;
+  case vmIntrinsics::_isArray:
+    prim_return_value = intcon(0);
+    expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
+    break;
+  case vmIntrinsics::_isPrimitive:
+    prim_return_value = intcon(1);
+    expect_prim = true;  // obviously
+    break;
+  case vmIntrinsics::_getSuperclass:
+    prim_return_value = null();
+    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
+    break;
+  case vmIntrinsics::_getComponentType:
+    prim_return_value = null();
+    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
     break;
   case vmIntrinsics::_getClassAccessFlags:
     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
-    NOT_PRODUCT(iname = "Reflection.getClassAccessFlags");
     return_type = TypeInt::INT;  // not bool!  6297094
     break;
   default:
@@ -2331,14 +2499,12 @@
   if (mirror_con == NULL)  return false;  // cannot happen?
 
 #ifndef PRODUCT
-  if (PrintInlining || PrintOptoInlining) {
+  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
     ciType* k = mirror_con->java_mirror_type();
     if (k) {
-      tty->print("Inlining %s on constant Class ", iname);
+      tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
       k->print_name();
       tty->cr();
-    } else {
-      tty->print_cr("Inlining %s on non-constant Class", iname);
     }
   }
 #endif
@@ -2357,8 +2523,6 @@
   // situation.
   _sp += nargs;  // set original stack for use by uncommon_trap
   mirror = do_null_check(mirror, T_OBJECT);
-  if (null_check_obj)
-    obj = do_null_check(obj, T_OBJECT);
   _sp -= nargs;
   // If mirror or obj is dead, only null-path is taken.
   if (stopped())  return true;
@@ -2374,6 +2538,7 @@
   if (stopped()) { push_result(region, phi); return true; }
 
   Node* p;  // handy temp
+  Node* null_ctl;
 
   // Now that we have the non-null klass, we can perform the real query.
   // For constant classes, the query will constant-fold in LoadNode::Value.
@@ -2388,6 +2553,68 @@
     p = basic_plus_adr(kls, Klass::modifier_flags_offset_in_bytes() + sizeof(oopDesc));
     query_value = make_load(NULL, p, TypeInt::INT, T_INT);
     break;
+
+  case vmIntrinsics::_isInterface:
+    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
+    if (generate_interface_guard(kls, region) != NULL)
+      // A guard was added.  If the guard is taken, it was an interface.
+      phi->add_req(intcon(1));
+    // If we fall through, it's a plain class.
+    query_value = intcon(0);
+    break;
+
+  case vmIntrinsics::_isArray:
+    // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
+    if (generate_array_guard(kls, region) != NULL)
+      // A guard was added.  If the guard is taken, it was an array.
+      phi->add_req(intcon(1));
+    // If we fall through, it's a plain class.
+    query_value = intcon(0);
+    break;
+
+  case vmIntrinsics::_isPrimitive:
+    query_value = intcon(0); // "normal" path produces false
+    break;
+
+  case vmIntrinsics::_getSuperclass:
+    // The rules here are somewhat unfortunate, but we can still do better
+    // with random logic than with a JNI call.
+    // Interfaces store null or Object as _super, but must report null.
+    // Arrays store an intermediate super as _super, but must report Object.
+    // Other types can report the actual _super.
+    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
+    if (generate_interface_guard(kls, region) != NULL)
+      // A guard was added.  If the guard is taken, it was an interface.
+      phi->add_req(null());
+    if (generate_array_guard(kls, region) != NULL)
+      // A guard was added.  If the guard is taken, it was an array.
+      phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
+    // If we fall through, it's a plain class.  Get its _super.
+    p = basic_plus_adr(kls, Klass::super_offset_in_bytes() + sizeof(oopDesc));
+    kls = _gvn.transform(new (C, 3) LoadKlassNode(0, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
+    null_ctl = top();
+    kls = null_check_oop(kls, &null_ctl);
+    if (null_ctl != top()) {
+      // If the guard is taken, Object.superClass is null (both klass and mirror).
+      region->add_req(null_ctl);
+      phi   ->add_req(null());
+    }
+    if (!stopped()) {
+      query_value = load_mirror_from_klass(kls);
+    }
+    break;
+
+  case vmIntrinsics::_getComponentType:
+    if (generate_array_guard(kls, region) != NULL) {
+      // Be sure to pin the oop load to the guard edge just created:
+      Node* is_array_ctrl = region->in(region->req()-1);
+      Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()) + sizeof(oopDesc));
+      Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT);
+      phi->add_req(cmo);
+    }
+    query_value = null();  // non-array case is null
+    break;
+
   case vmIntrinsics::_getClassAccessFlags:
     p = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
     query_value = make_load(NULL, p, TypeInt::INT, T_INT);
@@ -2420,12 +2647,6 @@
   Node* klasses[2];             // corresponding Klasses: superk, subk
   klasses[0] = klasses[1] = top();
 
-#ifndef PRODUCT
-  if (PrintInlining || PrintOptoInlining) {
-    tty->print_cr("Inlining Class.isAssignableFrom");
-  }
-#endif
-
   enum {
     // A full decision tree on {superc is prim, subc is prim}:
     _prim_0_path = 1,           // {P,N} => false
@@ -2522,47 +2743,243 @@
   return true;
 }
 
-//------------------inline_native_Array_newInstance--------------------
-bool LibraryCallKit::inline_native_Array_newInstance() {
-
-  // Restore the stack and pop off the arguments.
-  _sp += 2;
-
-  Node *count_val = pop();
-
-  // Pop component class last:  it was pushed first.
-  Node *mirror = pop();
-
-  const TypeInstPtr *mirror_con = _gvn.type(mirror)->isa_instptr();
-
-  ciType* k = !mirror_con ? NULL : mirror_con->java_mirror_type();
-  if( k == NULL )  return false;
-  if( !k->is_loaded() )  return false;
-  if (k->basic_type() == T_VOID)   return false;  // No such thing as a "void[]".
-
-  if (PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
-    tty->print("Inlining Array.newInstance on constant Class ");
-    k->print_name();
-    tty->cr();
+//---------------------generate_array_guard_common------------------------
+Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
+                                                  bool obj_array, bool not_array) {
+  // If obj_array/non_array==false/false:
+  // Branch around if the given klass is in fact an array (either obj or prim).
+  // If obj_array/non_array==false/true:
+  // Branch around if the given klass is not an array klass of any kind.
+  // If obj_array/non_array==true/true:
+  // Branch around if the kls is not an oop array (kls is int[], String, etc.)
+  // If obj_array/non_array==true/false:
+  // Branch around if the kls is an oop array (Object[] or subtype)
+  //
+  // Like generate_guard, adds a new path onto the region.
+  jint  layout_con = 0;
+  Node* layout_val = get_layout_helper(kls, layout_con);
+  if (layout_val == NULL) {
+    bool query = (obj_array
+                  ? Klass::layout_helper_is_objArray(layout_con)
+                  : Klass::layout_helper_is_javaArray(layout_con));
+    if (query == not_array) {
+      return NULL;                       // never a branch
+    } else {                             // always a branch
+      Node* always_branch = control();
+      if (region != NULL)
+        region->add_req(always_branch);
+      set_control(top());
+      return always_branch;
+    }
   }
-
-  BasicType elem_type = k->basic_type();
-  if (elem_type == T_ARRAY)  elem_type = T_OBJECT;
-  const Type* etype;
-  if (elem_type != T_OBJECT) {
-    etype = Type::get_const_basic_type(elem_type);
-  } else {
-    etype = TypeOopPtr::make_from_klass_raw(k->as_klass());
+  // Now test the correct condition.
+  jint  nval = (obj_array
+                ? ((jint)Klass::_lh_array_tag_type_value
+                   <<    Klass::_lh_array_tag_shift)
+                : Klass::_lh_neutral_value);
+  Node* cmp = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(nval)) );
+  BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
+  // invert the test if we are looking for a non-array
+  if (not_array)  btest = BoolTest(btest).negate();
+  Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, btest) );
+  return generate_fair_guard(bol, region);
+}
+
+
+//-----------------------inline_native_newArray--------------------------
+bool LibraryCallKit::inline_native_newArray() {
+  int nargs = 2;
+  Node* mirror    = argument(0);
+  Node* count_val = argument(1);
+
+  _sp += nargs;  // set original stack for use by uncommon_trap
+  mirror = do_null_check(mirror, T_OBJECT);
+  _sp -= nargs;
+
+  enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
+  RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
+  PhiNode*    result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
+                                                      TypeInstPtr::NOTNULL);
+  PhiNode*    result_io  = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
+  PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
+                                                      TypePtr::BOTTOM);
+
+  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
+  Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
+                                                  nargs,
+                                                  result_reg, _slow_path);
+  Node* normal_ctl   = control();
+  Node* no_array_ctl = result_reg->in(_slow_path);
+
+  // Generate code for the slow case.  We make a call to newArray().
+  set_control(no_array_ctl);
+  if (!stopped()) {
+    // Either the input type is void.class, or else the
+    // array klass has not yet been cached.  Either the
+    // ensuing call will throw an exception, or else it
+    // will cache the array klass for next time.
+    PreserveJVMState pjvms(this);
+    CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
+    Node* slow_result = set_results_for_java_call(slow_call);
+    // this->control() comes from set_results_for_java_call
+    result_reg->set_req(_slow_path, control());
+    result_val->set_req(_slow_path, slow_result);
+    result_io ->set_req(_slow_path, i_o());
+    result_mem->set_req(_slow_path, reset_memory());
   }
-  const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciArrayKlass::make(k));
-
-  Node* obj = new_array(makecon(array_klass), count_val);
-
-  push(obj);
+
+  set_control(normal_ctl);
+  if (!stopped()) {
+    // Normal case:  The array type has been cached in the java.lang.Class.
+    // The following call works fine even if the array type is polymorphic.
+    // It could be a dynamic mix of int[], boolean[], Object[], etc.
+    _sp += nargs;  // set original stack for use by uncommon_trap
+    Node* obj = new_array(klass_node, count_val);
+    _sp -= nargs;
+    result_reg->init_req(_normal_path, control());
+    result_val->init_req(_normal_path, obj);
+    result_io ->init_req(_normal_path, i_o());
+    result_mem->init_req(_normal_path, reset_memory());
+  }
+
+  // Return the combined state.
+  set_i_o(        _gvn.transform(result_io)  );
+  set_all_memory( _gvn.transform(result_mem) );
+  push_result(result_reg, result_val);
+  C->set_has_split_ifs(true); // Has chance for split-if optimization
 
   return true;
 }
 
+//----------------------inline_native_getLength--------------------------
+bool LibraryCallKit::inline_native_getLength() {
+  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
+
+  int nargs = 1;
+  Node* array = argument(0);
+
+  _sp += nargs;  // set original stack for use by uncommon_trap
+  array = do_null_check(array, T_OBJECT);
+  _sp -= nargs;
+
+  // If array is dead, only null-path is taken.
+  if (stopped())  return true;
+
+  // Deoptimize if it is a non-array.
+  Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
+
+  if (non_array != NULL) {
+    PreserveJVMState pjvms(this);
+    set_control(non_array);
+    _sp += nargs;  // push the arguments back on the stack
+    uncommon_trap(Deoptimization::Reason_intrinsic,
+                  Deoptimization::Action_maybe_recompile);
+  }
+
+  // If control is dead, only non-array-path is taken.
+  if (stopped())  return true;
+
+  // The works fine even if the array type is polymorphic.
+  // It could be a dynamic mix of int[], boolean[], Object[], etc.
+  push( load_array_length(array) );
+
+  C->set_has_split_ifs(true); // Has chance for split-if optimization
+
+  return true;
+}
+
+//------------------------inline_array_copyOf----------------------------
+bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
+  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
+
+  // Restore the stack and pop off the arguments.
+  int nargs = 3 + (is_copyOfRange? 1: 0);
+  Node* original          = argument(0);
+  Node* start             = is_copyOfRange? argument(1): intcon(0);
+  Node* end               = is_copyOfRange? argument(2): argument(1);
+  Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
+
+  _sp += nargs;  // set original stack for use by uncommon_trap
+  array_type_mirror = do_null_check(array_type_mirror, T_OBJECT);
+  original          = do_null_check(original, T_OBJECT);
+  _sp -= nargs;
+
+  // Check if a null path was taken unconditionally.
+  if (stopped())  return true;
+
+  Node* orig_length = load_array_length(original);
+
+  Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nargs,
+                                            NULL, 0);
+  _sp += nargs;  // set original stack for use by uncommon_trap
+  klass_node = do_null_check(klass_node, T_OBJECT);
+  _sp -= nargs;
+
+  RegionNode* bailout = new (C, 1) RegionNode(1);
+  record_for_igvn(bailout);
+
+  // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
+  // Bail out if that is so.
+  Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
+  if (not_objArray != NULL) {
+    // Improve the klass node's type from the new optimistic assumption:
+    ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
+    const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
+    Node* cast = new (C, 2) CastPPNode(klass_node, akls);
+    cast->init_req(0, control());
+    klass_node = _gvn.transform(cast);
+  }
+
+  // Bail out if either start or end is negative.
+  generate_negative_guard(start, bailout);
+  generate_negative_guard(end,   bailout);
+ 
+  Node* length = end;
+  if (_gvn.type(start) != TypeInt::ZERO) {
+    length = _gvn.transform( new (C, 3) SubINode(end, start) );
+  }
+
+  // Bail out if length is negative.
+  generate_negative_guard(length, bailout);
+ 
+  if (bailout->req() > 1) {
+    PreserveJVMState pjvms(this);
+    set_control( _gvn.transform(bailout) );
+    _sp += nargs;  // push the arguments back on the stack
+    uncommon_trap(Deoptimization::Reason_intrinsic,
+                  Deoptimization::Action_maybe_recompile);
+  }
+
+  if (!stopped()) {
+    // How many elements will we copy from the original?
+    // The answer is MinI(orig_length - start, length).
+    Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
+    Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
+
+    _sp += nargs;  // set original stack for use by uncommon_trap
+    Node* newcopy = new_array(klass_node, length);
+    _sp -= nargs;
+
+    // Generate a direct call to the right arraycopy function(s).
+    // We know the copy is disjoint but we might not know if the
+    // oop stores need checking.
+    // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
+    // This will fail a store-check if x contains any non-nulls.
+    bool disjoint_bases = true;
+    bool length_never_negative = true;
+    generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
+                       original, start, newcopy, intcon(0), moved,
+                       nargs, disjoint_bases, length_never_negative);
+
+    push(newcopy);
+  }
+
+  C->set_has_split_ifs(true); // Has chance for split-if optimization
+
+  return true;
+}
+
+
 //----------------------generate_virtual_guard---------------------------
 // Helper for hashCode and clone.  Peeks inside the vtable to avoid a call.
 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
@@ -2590,13 +3007,19 @@
 // Use generate_method_call to make a slow-call to the real
 // method if the fast path fails.  An alternative would be to
 // use a stub like OptoRuntime::slow_arraycopy_Java.
+// This only works for expanding the current library call,
+// not another intrinsic.  (E.g., don't use this for making an
+// arraycopy call inside of the copyOf intrinsic.)
 CallJavaNode*
-LibraryCallKit::generate_method_call(bool is_virtual, bool is_static) {
+LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
   // When compiling the intrinsic method itself, do not use this technique.
   guarantee(callee() != C->method(), "cannot make slow-call to self");
 
-  ciMethod*       method = callee();
-  const TypeFunc* tf     = TypeFunc::make(method);
+  ciMethod* method = callee();
+  // ensure the JVMS we have will be correct for this call
+  guarantee(method_id == method->intrinsic_id(), "must match");
+
+  const TypeFunc* tf = TypeFunc::make(method);
   int tfdc = tf->domain()->cnt();
   CallJavaNode* slow_call;
   if (is_static) {
@@ -2740,7 +3163,9 @@
   if (!stopped()) {
     // No need for PreserveJVMState, because we're using up the present state.
     set_all_memory(init_mem);
-    CallJavaNode* slow_call = generate_method_call(is_virtual, is_static);
+    vmIntrinsics::ID hashCode_id = vmIntrinsics::_hashCode;
+    if (is_static)   hashCode_id = vmIntrinsics::_identityHashCode;
+    CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
     Node* slow_result = set_results_for_java_call(slow_call);
     // this->control() comes from set_results_for_java_call
     result_reg->init_req(_slow_path, control());
@@ -2780,7 +3205,7 @@
   ciMethod*       method = callee();
 
 #ifndef PRODUCT
-  if ((PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   }
 #endif
@@ -2800,7 +3225,7 @@
   const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
   if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
 #ifndef PRODUCT
-    if ((PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
       tty->print_cr("  Bailing out because caller depth was not a constant");
     }
 #endif
@@ -2815,7 +3240,7 @@
   int caller_depth = caller_depth_type->get_con() - 1;
   if (caller_depth < 0) {
 #ifndef PRODUCT
-    if ((PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
       tty->print_cr("  Bailing out because caller depth was %d", caller_depth);
     }
 #endif
@@ -2824,7 +3249,7 @@
 
   if (!jvms()->has_method()) {
 #ifndef PRODUCT
-    if ((PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
       tty->print_cr("  Bailing out because intrinsic was inlined at top level");
     }
 #endif
@@ -2861,7 +3286,7 @@
 
   if (inlining_depth == 0) {
 #ifndef PRODUCT
-    if ((PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
       tty->print_cr("  Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
       tty->print_cr("  JVM state at this point:");
       for (int i = _depth; i >= 1; i--) {
@@ -2878,7 +3303,7 @@
   // Push this as a constant
   push(makecon(TypeInstPtr::make(caller_mirror)));
 #ifndef PRODUCT
-  if ((PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
     tty->print_cr("  Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
     tty->print_cr("  JVM state at this point:");
     for (int i = _depth; i >= 1; i--) {
@@ -2891,9 +3316,23 @@
 
 // Helper routine for above
 bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
-  // %%% These methods should be given permanent IntrinsicIDs.
-  return jvms->method()->equals(C->get_Method_invoke()) ||
-    jvms->method()->holder()->is_subclass_of(C->get_MethodAccessorImpl());
+  // Is this the Method.invoke method itself?
+  if (jvms->method()->intrinsic_id() == vmIntrinsics::_invoke)
+    return true;
+
+  // Is this a helper, defined somewhere underneath MethodAccessorImpl.
+  ciKlass* k = jvms->method()->holder();
+  if (k->is_instance_klass()) {
+    ciInstanceKlass* ik = k->as_instance_klass();
+    for (; ik != NULL; ik = ik->super()) {
+      if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() &&
+          ik == env()->find_system_klass(ik->name())) {
+        return true;
+      }
+    }
+  }
+
+  return false;
 }
 
 static int value_field_offset = -1;  // offset of the "value" field of AtomicLongCSImpl.  This is needed by
@@ -3102,6 +3541,286 @@
   return true;
 }
 
+#ifdef _LP64
+#define XTOP ,top() /*additional argument*/
+#else  //_LP64
+#define XTOP        /*no additional argument*/
+#endif //_LP64
+
+//----------------------inline_unsafe_copyMemory-------------------------
+bool LibraryCallKit::inline_unsafe_copyMemory() {
+  if (callee()->is_static())  return false;  // caller must have the capability!
+  int nargs = 1 + 5 + 3;  // 5 args:  (src: ptr,off, dst: ptr,off, size)
+  assert(signature()->size() == nargs-1, "copy has 5 arguments");
+  null_check_receiver(callee());  // check then ignore argument(0)
+  if (stopped())  return true;
+
+  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
+
+  Node* src_ptr = argument(1);
+  Node* src_off = ConvL2X(argument(2));
+  assert(argument(3)->is_top(), "2nd half of long");
+  Node* dst_ptr = argument(4);
+  Node* dst_off = ConvL2X(argument(5));
+  assert(argument(6)->is_top(), "2nd half of long");
+  Node* size    = ConvL2X(argument(7));
+  assert(argument(8)->is_top(), "2nd half of long");
+
+  assert(Unsafe_field_offset_to_byte_offset(11) == 11,
+         "fieldOffset must be byte-scaled");
+
+  Node* src = make_unsafe_address(src_ptr, src_off);
+  Node* dst = make_unsafe_address(dst_ptr, dst_off);
+
+  // Conservatively insert a memory barrier on all memory slices.
+  // Do not let writes of the copy source or destination float below the copy.
+  insert_mem_bar(Op_MemBarCPUOrder);
+
+  // Call it.  Note that the length argument is not scaled.
+  make_runtime_call(RC_LEAF|RC_NO_FP,
+                    OptoRuntime::fast_arraycopy_Type(),
+                    StubRoutines::unsafe_arraycopy(),
+                    "unsafe_arraycopy",
+                    TypeRawPtr::BOTTOM,
+                    src, dst, size XTOP);
+
+  // Do not let reads of the copy destination float above the copy.
+  insert_mem_bar(Op_MemBarCPUOrder);
+
+  return true;
+}
+
+
+//------------------------inline_native_clone----------------------------
+// Here are the simple edge cases:
+//  null receiver => normal trap
+//  virtual and clone was overridden => slow path to out-of-line clone
+//  not cloneable or finalizer => slow path to out-of-line Object.clone
+//
+// The general case has two steps, allocation and copying.
+// Allocation has two cases, and uses GraphKit::new_instance or new_array.
+//
+// Copying also has two cases, oop arrays and everything else.
+// Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
+// Everything else uses the tight inline loop supplied by CopyArrayNode.
+//
+// These steps fold up nicely if and when the cloned object's klass
+// can be sharply typed as an object array, a type array, or an instance.
+//
+bool LibraryCallKit::inline_native_clone(bool is_virtual) {
+  int nargs = 1;
+  Node* obj = null_check_receiver(callee());
+  if (stopped())  return true;
+  Node* obj_klass = load_object_klass(obj);
+  const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
+  const TypeOopPtr*   toop   = ((tklass != NULL)
+                                ? tklass->as_instance_type()
+                                : TypeInstPtr::NOTNULL);
+
+  // Conservatively insert a memory barrier on all memory slices.
+  // Do not let writes into the original float below the clone.
+  insert_mem_bar(Op_MemBarCPUOrder);
+
+  // paths into result_reg:
+  enum {
+    _slow_path = 1,     // out-of-line call to clone method (virtual or not)
+    _objArray_path,     // plain allocation, plus arrayof_oop_arraycopy
+    _fast_path,         // plain allocation, plus a CopyArray operation
+    PATH_LIMIT
+  };
+  RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
+  PhiNode*    result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
+                                                      TypeInstPtr::NOTNULL);
+  PhiNode*    result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
+  PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
+                                                      TypePtr::BOTTOM);
+  record_for_igvn(result_reg);
+
+  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
+  int raw_adr_idx = Compile::AliasIdxRaw;
+  const bool raw_mem_only = true;
+
+  // paths into alloc_reg (on the fast path, just before the CopyArray):
+  enum { _typeArray_alloc = 1, _instance_alloc, ALLOC_LIMIT };
+  RegionNode* alloc_reg = new(C, ALLOC_LIMIT) RegionNode(ALLOC_LIMIT);
+  PhiNode*    alloc_val = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, raw_adr_type);
+  PhiNode*    alloc_siz = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, TypeX_X);
+  PhiNode*    alloc_i_o = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::ABIO);
+  PhiNode*    alloc_mem = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::MEMORY,
+                                                      raw_adr_type);
+  record_for_igvn(alloc_reg);
+
+  bool card_mark = false;  // (see below)
+
+  Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
+  if (array_ctl != NULL) {
+    // It's an array.
+    PreserveJVMState pjvms(this);
+    set_control(array_ctl);
+    Node* obj_length = load_array_length(obj);
+    Node* obj_size = NULL;
+    _sp += nargs;  // set original stack for use by uncommon_trap
+    Node* alloc_obj = new_array(obj_klass, obj_length,
+                                raw_mem_only, &obj_size);
+    _sp -= nargs;
+    assert(obj_size != NULL, "");
+    Node* raw_obj = alloc_obj->in(1);
+    assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
+
+    if (true) { // TO DO: check ReduceInitialCardMarks
+      // If it is an oop array, it requires very special treatment,
+      // because card marking is required on each card of the array.
+      Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
+      if (is_obja != NULL) {
+        PreserveJVMState pjvms2(this);
+        set_control(is_obja);
+        // Generate a direct call to the right arraycopy function(s).
+        bool disjoint_bases = true;
+	bool length_never_negative = true;
+        generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
+                           obj, intcon(0), alloc_obj, intcon(0),
+                           obj_length, nargs,
+			   disjoint_bases, length_never_negative);
+        result_reg->init_req(_objArray_path, control());
+        result_val->init_req(_objArray_path, alloc_obj);
+        result_i_o ->set_req(_objArray_path, i_o());
+        result_mem ->set_req(_objArray_path, reset_memory());
+      }
+    }
+
+    // Otherwise, there are no card marks to worry about.
+    alloc_val->init_req(_typeArray_alloc, raw_obj);
+    alloc_siz->init_req(_typeArray_alloc, obj_size);
+    alloc_reg->init_req(_typeArray_alloc, control());
+    alloc_i_o->init_req(_typeArray_alloc, i_o());
+    alloc_mem->init_req(_typeArray_alloc, memory(raw_adr_type));
+  }
+
+  // We only go to the fast case code if we pass a number of guards.
+  // The paths which do not pass are accumulated in the slow_region.
+  RegionNode* slow_region = new (C, 1) RegionNode(1);
+  record_for_igvn(slow_region);
+  if (!stopped()) {
+    // It's an instance.  Make the slow-path tests.
+    // If this is a virtual call, we generate a funny guard.  We grab
+    // the vtable entry corresponding to clone() from the target object.
+    // If the target method which we are calling happens to be the
+    // Object clone() method, we pass the guard.  We do not need this
+    // guard for non-virtual calls; the caller is known to be the native
+    // Object clone().
+    if (is_virtual) {
+      generate_virtual_guard(obj_klass, slow_region);
+    }
+
+    // The object must be cloneable and must not have a finalizer.
+    // Both of these conditions may be checked in a single test.
+    // We could optimize the cloneable test further, but we don't care.
+    generate_access_flags_guard(obj_klass,
+                                // Test both conditions:
+                                JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
+                                // Must be cloneable but not finalizer:
+                                JVM_ACC_IS_CLONEABLE,
+                                slow_region);
+  }
+
+  if (!stopped()) {
+    // It's an instance, and it passed the slow-path tests.
+    PreserveJVMState pjvms(this);
+    Node* obj_size = NULL;
+    Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
+    assert(obj_size != NULL, "");
+    Node* raw_obj = alloc_obj->in(1);
+    assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
+    if (true) { // TO DO: check ReduceInitialCardMarks
+      // Put in store barrier for any and all oops we are sticking
+      // into this object.  (We could avoid this if we could prove
+      // that the object type contains no oop fields at all.)
+      card_mark = true;
+    }
+    alloc_val->init_req(_instance_alloc, raw_obj);
+    alloc_siz->init_req(_instance_alloc, obj_size);
+    alloc_reg->init_req(_instance_alloc, control());
+    alloc_i_o->init_req(_instance_alloc, i_o());
+    alloc_mem->init_req(_instance_alloc, memory(raw_adr_type));
+  }
+
+  // Generate code for the slow case.  We make a call to clone().
+  set_control(_gvn.transform(slow_region));
+  if (!stopped()) {
+    PreserveJVMState pjvms(this);
+    CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
+    Node* slow_result = set_results_for_java_call(slow_call);
+    // this->control() comes from set_results_for_java_call
+    result_reg->init_req(_slow_path, control());
+    result_val->init_req(_slow_path, slow_result);
+    result_i_o ->set_req(_slow_path, i_o());
+    result_mem ->set_req(_slow_path, reset_memory());
+  }
+
+  // The object is allocated, as an array and/or an instance.  Now copy it.
+  set_control( _gvn.transform(alloc_reg) );
+  set_i_o(     _gvn.transform(alloc_i_o) );
+  set_memory(  _gvn.transform(alloc_mem), raw_adr_type );
+  Node* raw_obj  = _gvn.transform(alloc_val);
+
+  if (!stopped()) {
+    // Copy the fastest available way.
+    // (No need for PreserveJVMState, since we're using it all up now.)
+    Node* src  = obj;
+    Node* dest = raw_obj;
+    Node* end  = dest;
+    Node* size = _gvn.transform(alloc_siz);
+
+    // Exclude the header.
+    int base_off = sizeof(oopDesc);
+    src  = basic_plus_adr(src,  base_off);
+    dest = basic_plus_adr(dest, base_off);
+    end  = basic_plus_adr(end,  size);
+
+    // Compute the length also, if needed:
+    Node* countx = size;
+    countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
+    countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
+
+    // Select an appropriate instruction to initialize the range.
+    // The CopyArray instruction (if supported) can be optimized
+    // into a discrete set of scalar loads and stores.
+    bool disjoint_bases = true;
+    generate_unchecked_arraycopy(T_LONG, raw_adr_type, disjoint_bases,
+                                 src, NULL, dest, NULL, countx);
+    
+    // Now that the object is properly initialized, type it as an oop.
+    Node* new_obj = new(C, 2) CheckCastPPNode(control(), raw_obj,
+                                              TypeInstPtr::NOTNULL);
+    new_obj = _gvn.transform(new_obj);
+
+    // If necessary, emit some card marks afterwards.  (Non-arrays only.)
+    if (card_mark) {
+      Node* no_particular_value = NULL;
+      Node* no_particular_field = NULL;
+      store_barrier(memory(raw_adr_type), T_OBJECT, new_obj,
+                    no_particular_field, no_particular_value);
+    }
+    // Present the results of the slow call.
+    result_reg->init_req(_fast_path, control());
+    result_val->init_req(_fast_path, new_obj);
+    result_i_o ->set_req(_fast_path, i_o());
+    result_mem ->set_req(_fast_path, reset_memory());
+  }
+
+  // Return the combined state.
+  set_control(    _gvn.transform(result_reg) );
+  set_i_o(        _gvn.transform(result_i_o) );
+  set_all_memory( _gvn.transform(result_mem) );
+
+  // Cast the result to a sharper type, since we know what clone does.
+  Node* new_obj = _gvn.transform(result_val);
+  Node* cast    = new (C, 2) CheckCastPPNode(control(), new_obj, toop);
+  push(_gvn.transform(cast));
+
+  return true;
+}
+
 
 // constants for computing the copy function
 enum {
@@ -3173,20 +3892,16 @@
 }
 
 //------------------------------basictype2arraycopy----------------------------
-address LibraryCallKit::basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset, const char* &name) {
-  // Call type_or_null(n) to determine node's type since 
-  // we are in parse phase and call n->Value() may return wrong type
-  // (for example, a phi node at the beginning of loop parsing).
-  const Type    *src_offset_type    = _gvn.type_or_null(src_offset);
-  const TypeInt *src_offset_inttype = (src_offset_type != NULL) ? 
-                                       src_offset_type->isa_int() : NULL;
-
-  const Type    *dest_offset_type    = _gvn.type_or_null(dest_offset);
-  const TypeInt *dest_offset_inttype = (dest_offset_type != NULL) ? 
-                                        dest_offset_type->isa_int() : NULL;
+address LibraryCallKit::basictype2arraycopy(BasicType t,
+                                            Node* src_offset,
+                                            Node* dest_offset,
+                                            bool disjoint_bases,
+                                            const char* &name) {
+  const TypeInt* src_offset_inttype  = gvn().find_int_type(src_offset);;
+  const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
 
   bool aligned = false;
-  bool disjoint = false;
+  bool disjoint = disjoint_bases;
 
   // if the offsets are the same, we can treat the memory regions as
   // disjoint, because either the memory regions are in different arrays,
@@ -3201,18 +3916,22 @@
     int element_size = type2aelembytes[t];
     aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
               ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
-    disjoint =  s_offs >= d_offs;
-  } else {
-    disjoint = src_offset == dest_offset;
+    if (s_offs >= d_offs)  disjoint = true;
+  } else if (src_offset == dest_offset && src_offset != NULL) {
+    // This can occur if the offsets are identical non-constants.
+    disjoint = true;
   }
 
   return select_arraycopy_function(t, aligned, disjoint, name);
 }
 
+
 //------------------------------inline_arraycopy-----------------------
 bool LibraryCallKit::inline_arraycopy() {
   // Restore the stack and pop off the arguments.
-  int nargs = 5;
+  int nargs = 5;  // 2 oops, 3 ints, no size_t or long
+  assert(callee()->signature()->size() == nargs, "copy has 5 arguments");
+
   Node *src         = argument(0);
   Node *src_offset  = argument(1);
   Node *dest        = argument(2);
@@ -3230,8 +3949,22 @@
   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
   if (top_src  == NULL || top_src->klass()  == NULL ||
       top_dest == NULL || top_dest->klass() == NULL) {
+    // Conservatively insert a memory barrier on all memory slices.
+    // Do not let writes into the source float below the arraycopy.
+    insert_mem_bar(Op_MemBarCPUOrder);
+
     // Call StubRoutines::generic_arraycopy stub.
-    generate_generic_arraycopy(TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length);
+    generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
+                       src, src_offset, dest, dest_offset, length,
+                       nargs);
+
+    // Do not let reads from the destination float above the arraycopy.
+    // Since we cannot type the arrays, we don't know which slices
+    // might be affected.  We could restrict this barrier only to those
+    // memory slices which pertain to array elements--but don't bother.
+    if (!InsertMemBarAfterArraycopy)
+      // (If InsertMemBarAfterArraycopy, there is already one in place.)
+      insert_mem_bar(Op_MemBarCPUOrder);
     return true;
   }
 
@@ -3284,8 +4017,8 @@
   // (5) dest_offset must not be negative.
   generate_negative_guard(dest_offset, slow_region);
 
-  // (6) length must not be negative.
-  generate_negative_guard(length, slow_region);
+  // (6) length must not be negative (moved to generate_arraycopy()).
+  // generate_negative_guard(length, slow_region);
 
   // (7) src_offset + length must not exceed length of src.
   generate_limit_guard(src_offset, length,
@@ -3299,9 +4032,12 @@
 
   // (9) each element of an oop array must be assignable
   // The generate_arraycopy subroutine checks this.
-  generate_arraycopy(dest_elem,
+
+  // This is where the memory effects are placed:
+  const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
+  generate_arraycopy(adr_type, dest_elem,
                      src, src_offset, dest, dest_offset, length,
-                     nargs, slow_region);
+                     nargs, false, false, slow_region);
 
   return true;
 }
@@ -3315,40 +4051,47 @@
 // (such as out of bounds length or non-conformable array types).
 // The generated code has this shape, in general:
 //
-//     if (indexes in bounds) {
-//       if ((is object array) && !(array type check [*])) {
-//         goto slow_region
+//     if (length == 0)  return   // via zero_path
+//     slowval = -1
+//     if (types unknown) {
+//       slowval = call generic copy loop
+//       if (slowval == 0)  return  // via checked_path
+//     } else if (indexes in bounds) {
+//       if ((is object array) && !(array type check)) {
+//         slowval = call checked copy loop
+//         if (slowval == 0)  return  // via checked_path
 //       } else {
 //         call bulk copy loop
 //         return  // via fast_path
 //       }
 //     }
+//     // adjust params for remaining work:
+//     if (slowval != -1) {
+//       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
+//     }
 //   slow_region:
 //     call slow arraycopy(src, src_offset, dest, dest_offset, length)
 //     return  // via slow_call_path
 //
-// ([*] Of course, primitive array types do not need type checks.)
-//
-// This code shape assumes that the compiler can statically
-// determine at least an approximate array type for source
-// and destination.  It does not support fully generic arraycopy.
+// This routine is used from several intrinsics:  System.arraycopy,
+// Object.clone (the array subcase), and Arrays.copyOf[Range].
 //
 void
-LibraryCallKit::generate_arraycopy(BasicType basic_elem_type,
+LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
+                                   BasicType basic_elem_type,
                                    Node* src,  Node* src_offset,
                                    Node* dest, Node* dest_offset,
                                    Node* copy_length,
-                                   int nargs, Node* slow_region) {
-  if (slow_region == NULL)  slow_region = top();
-
-  // This is where the memory effects are placed:
-  const TypePtr* adr_type = TypeAryPtr::get_array_body_type(basic_elem_type);
-
+                                   int nargs,
+				   bool disjoint_bases,
+				   bool length_never_negative,
+                                   Node* slow_control) {
   // Results are placed here:
-  enum { checks_done_path = 1,
-         fast_path        = 2,
+  enum { fast_path        = 1,
+         checked_path     = 2,
          slow_call_path   = 3,
-         PATH_LIMIT       = 4
+         zero_path        = 4,
+         PATH_LIMIT       = 5
   };
   RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
   PhiNode*    result_i_o    = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO);
@@ -3357,38 +4100,106 @@
   _gvn.set_type_bottom(result_i_o);
   _gvn.set_type_bottom(result_memory);
 
-  // Here are all the slow paths up to this point, in one bundle:
-  Node* slow_control = _gvn.transform(slow_region);
+  // Other parts of the slow_control edge:
+  Node* slow_i_o = i_o();
+  Node* slow_mem = memory(adr_type);
+
+  // Checked control path:
+  Node* checked_control = top();
+  Node* checked_mem     = NULL;
+  Node* checked_i_o     = NULL;
+  Node* checked_value   = NULL;
+
+  if (basic_elem_type == T_CONFLICT) {
+    { PreserveJVMState pjvms(this);
+      Node* cv = generate_generic_arraycopy(adr_type,
+                                            src, src_offset, dest, dest_offset,
+                                            copy_length, nargs);
+      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
+      checked_control = control();
+      checked_i_o     = i_o();
+      checked_mem     = reset_memory();
+      checked_value   = cv;
+    }
+    set_control(top());         // no fast path
+  }
+
+  Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
+  if (not_pos != NULL) {
+    Node* fast_ctrl = control();
+
+    set_control(not_pos);
+    // (6) length must not be negative.
+    if (!length_never_negative) {
+      if (slow_control == NULL) {
+        slow_control = new(C,1) RegionNode(1);
+        record_for_igvn(slow_control);
+      }
+      generate_negative_guard(copy_length, slow_control->as_Region());
+    }
+
+    // Present the results of the fast call.
+    result_region->init_req(zero_path, control());
+    result_i_o   ->init_req(zero_path, i_o());
+    result_memory->init_req(zero_path, memory(adr_type));
+
+    set_control(fast_ctrl);
+  }
+
+  assert(basic_elem_type != T_ARRAY, "caller must fix this");
+  if (basic_elem_type == T_OBJECT) {
+    // If src and dest have compatible element types, we can copy bits.
+    // Types S[] and D[] are compatible if D is a supertype of S.
+    //
+    // If they are not, we will use checked_oop_disjoint_arraycopy,
+    // which performs a fast optimistic per-oop check, and backs off
+    // further to JVM_ArrayCopy on the first per-oop check that fails.
+    // (Actually, we don't move raw bits only; the GC requires card marks.)
+
+    // Get the klassOop for both src and dest
+    Node* src_klass  = load_object_klass(src);
+    Node* dest_klass = load_object_klass(dest);
+
+    // Generate the subtype check.
+    // This might fold up statically, or then again it might not.
+    //
+    // Non-static example:  Copying List<String>.elements to a new String[].
+    // The backing store for a List<String> is always an Object[],
+    // but its elements are always type String, if the generic types
+    // are correct at the source level.
+    //
+    // Test S[] against D[], not S against D, because (probably)
+    // the secondary supertype cache is less busy for S[] than S.
+    // This usually only matters when D is an interface.
+    Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
+    // Plug failing path into checked_oop_disjoint_arraycopy
+    if (not_subtype_ctrl != top()) {
+      PreserveJVMState pjvms(this);
+      set_control(not_subtype_ctrl);
+      // (At this point we can assume disjoint_bases, since types differ.)
+      int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc);
+      Node* p1 = basic_plus_adr(dest_klass, ek_offset);
+      Node* n1 = new (C, 3) LoadKlassNode(0, immutable_memory(), p1, TypeRawPtr::BOTTOM);
+      Node* dest_elem_klass = _gvn.transform(n1);
+      Node* cv = generate_checkcast_arraycopy(adr_type,
+                                              dest_elem_klass,
+                                              src, src_offset, dest, dest_offset,
+                                              copy_length,
+                                              nargs);
+      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
+      checked_control = control();
+      checked_i_o     = i_o();
+      checked_mem     = reset_memory();
+      checked_value   = cv;
+    }
+    // At this point we know we do not need type checks on oop stores.
+  }
 
   if (!stopped()) {
     // Generate the fast path, if possible.
     PreserveJVMState pjvms(this);
 
-    // If src and dest have compatible element types, we can copy bits.
-    // Types S[] and D[] are compatible if D is a supertype of S.
-    // (Actually, we don't move raw bits only; the GC requires card marks.)
-    assert(basic_elem_type != T_ARRAY, "caller must fix this");
-    if (basic_elem_type == T_OBJECT) {
-      // Get the klassOop for both src and dest
-      Node* src_klass  = load_object_klass(src);
-      Node* dest_klass = load_object_klass(dest);
-
-      // Generate the subtype check.
-      // This might fold up statically, or then again it might not.
-      Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
-      if (not_subtype_ctrl != top()) {
-        // What remains is the slow path.
-        RegionNode* slow_reg2 = new(C, 3) RegionNode(3);
-        record_for_igvn(slow_reg2);
-        slow_reg2  ->init_req(1, slow_control);
-        slow_reg2  ->init_req(2, not_subtype_ctrl);
-        slow_control = _gvn.transform(slow_reg2);
-      }
-      // Fall through with a successful type check...
-    }
-    // At this point we know we do not need type checks on oop stores.
-
-    generate_unchecked_arraycopy(basic_elem_type, adr_type,
+    generate_unchecked_arraycopy(basic_elem_type, adr_type, disjoint_bases,
                                  src, src_offset, dest, dest_offset,
                                  ConvI2X(copy_length));
 
@@ -3398,13 +4209,66 @@
     result_memory->init_req(fast_path, memory(adr_type));
   }
 
+  // Here are all the slow paths up to this point, in one bundle:
+  if (slow_control == NULL)  slow_control = top();
+  slow_control = _gvn.transform(slow_control);
+
+  set_control(checked_control);
+  if (!stopped()) {
+    // Clean up after the checked call.
+    // The returned value is either 0 or -1^K,
+    // where K = number of partially transferred array elements.
+    Node* cmp = _gvn.transform( new(C, 3) CmpINode(checked_value, intcon(0)) );
+    Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) );
+    IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
+
+    // If it is 0, we are done, so transfer to the end.
+    Node* checks_done = _gvn.transform( new(C, 1) IfTrueNode(iff) );
+    result_region->init_req(checked_path, checks_done);
+    result_i_o   ->init_req(checked_path, checked_i_o);
+    result_memory->init_req(checked_path, checked_mem);
+
+    // If it is not zero, merge into the slow call.
+    set_control( _gvn.transform( new(C, 1) IfFalseNode(iff) ));
+    RegionNode* slow_reg2 = new(C, 3) RegionNode(3);
+    PhiNode*    slow_i_o2 = new(C, 3) PhiNode(slow_reg2, Type::ABIO);
+    PhiNode*    slow_mem2 = new(C, 3) PhiNode(slow_reg2, Type::MEMORY, adr_type);
+    record_for_igvn(slow_reg2);
+    slow_reg2  ->init_req(1, slow_control);
+    slow_i_o2  ->init_req(1, slow_i_o);
+    slow_mem2  ->init_req(1, slow_mem);
+    slow_reg2  ->init_req(2, control());
+    slow_i_o2  ->init_req(2, i_o());
+    slow_mem2  ->init_req(2, memory(adr_type));
+
+    // We must continue the copy exactly where it failed, or else
+    // another thread might see the wrong number of writes to dest.
+    Node* checked_offset = _gvn.transform( new(C, 3) XorINode(checked_value, intcon(-1)) );
+    Node* slow_offset    = new(C, 3) PhiNode(slow_reg2, TypeInt::INT);
+    slow_offset->init_req(1, intcon(0));
+    slow_offset->init_req(2, checked_offset);
+
+    slow_control = _gvn.transform(slow_reg2);
+    slow_i_o     = _gvn.transform(slow_i_o2);
+    slow_mem     = _gvn.transform(slow_mem2);
+    slow_offset  = _gvn.transform(slow_offset);
+
+    // Adjust the arguments by the conditionally incoming offset.
+    Node* src_off_plus  = _gvn.transform( new(C, 3) AddINode(src_offset,  slow_offset) );
+    Node* dest_off_plus = _gvn.transform( new(C, 3) AddINode(dest_offset, slow_offset) );
+    Node* length_minus  = _gvn.transform( new(C, 3) SubINode(copy_length, slow_offset) );
+
+    // Tweak the node variables to adjust the code produced below:
+    src_offset  = src_off_plus;
+    dest_offset = dest_off_plus;
+    copy_length = length_minus;
+  }
+
   set_control(slow_control);
   if (!stopped()) {
     // Generate the slow path, if needed.
     PreserveJVMState pjvms(this);   // (better safe than sorry)
 
-
-    // Simple case.
     generate_slow_arraycopy(adr_type,
                             src, src_offset, dest, dest_offset,
                             copy_length, nargs);
@@ -3414,88 +4278,117 @@
     result_memory->init_req(slow_call_path, memory(adr_type));
   }
 
+  // Remove unused edges.
+  for (uint i = 1; i < result_region->req(); i++) {
+    if (result_region->in(i) == NULL)
+      result_region->init_req(i, top());
+  }
+
   // Finished; return the combined state.
   set_control( _gvn.transform(result_region) );
   set_i_o(     _gvn.transform(result_i_o)    );
   set_memory(  _gvn.transform(result_memory), adr_type );
+
+  // The memory edges above are precise in order to model effects around 
+  // array copyies accurately to allow value numbering of field loads around
+  // arraycopy.  Such field loads, both before and after, are common in Java 
+  // collections and similar classes involving header/array data structures.
+  //
+  // But with low number of register or when some registers are used or killed 
+  // by arraycopy calls it causes registers spilling on stack. See 6544710.
+  // The next memory barrier is added to avoid it. If the arraycopy can be 
+  // optimized away (which it can, sometimes) then we can manually remove 
+  // the membar also.
+  if (InsertMemBarAfterArraycopy)
+    insert_mem_bar(Op_MemBarCPUOrder);
 }
 
 
 // Helper function; generates code for the slow case.
 // We make a call to a runtime method which emulates the native method,
 // but without the native wrapper overhead.
-Node*
+void
 LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
                                         Node* src,  Node* src_offset,
                                         Node* dest, Node* dest_offset,
                                         Node* copy_length,
                                         int nargs) {
-  CallJavaNode* call = generate_method_call(false, true);
-  set_results_for_java_call(call);
-
-  return call;
+  _sp += nargs; // any deopt will start just before call to enclosing method
+  Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
+                                 OptoRuntime::slow_arraycopy_Type(),
+                                 OptoRuntime::slow_arraycopy_Java(),
+                                 "slow_arraycopy", adr_type,
+                                 src, src_offset, dest, dest_offset,
+                                 copy_length);
+  _sp -= nargs;
+
+  // Handle exceptions thrown by this fellow:
+  make_slow_call_ex(call, env()->Throwable_klass(), false);
 }
 
-
-// Helper function; generates code for objects case.
-void
+// Helper function; generates code for cases requiring runtime checks.
+Node*
+LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
+                                             Node* dest_elem_klass,
+                                             Node* src,  Node* src_offset,
+                                             Node* dest, Node* dest_offset,
+                                             Node* copy_length,
+                                             int nargs) { 
+  if (stopped())  return NULL;
+
+  address copyfunc_addr = StubRoutines::checkcast_arraycopy();
+  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
+    return NULL;
+  }
+
+  // Pick out the parameters required to perform a store-check
+  // for the target array.  This is an optimistic check.  It will
+  // look in each non-null element's class, at the desired klass's
+  // super_check_offset, for the desired klass.
+  int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
+  Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
+  Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM);
+  Node* check_offset = _gvn.transform(n3);
+  Node* check_value  = dest_elem_klass;
+
+  Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
+  Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
+
+  // (We know the arrays are never conjoint, because their types differ.)
+  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
+                                 OptoRuntime::checkcast_arraycopy_Type(),
+                                 copyfunc_addr, "checkcast_arraycopy", adr_type,
+                                 // five arguments, of which two are
+                                 // intptr_t (jlong in LP64)
+                                 src_start, dest_start,
+                                 copy_length XTOP,
+                                 check_offset XTOP,
+                                 check_value);
+
+  return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
+}
+
+
+// Helper function; generates code for cases requiring runtime checks.
+Node*
 LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type, 
                                            Node* src,  Node* src_offset,
                                            Node* dest, Node* dest_offset,
-                                           Node* copy_length) {
+                                           Node* copy_length,
+                                           int nargs) {
+  if (stopped())  return NULL;
 
   address copyfunc_addr = StubRoutines::generic_arraycopy();
   if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
-    CallJavaNode* call = generate_method_call(false, true);
-    set_results_for_java_call(call);
-    return;
+    return NULL;
   }
-  RegionNode* result_region = new(C, 3) RegionNode(3);
-  PhiNode*    result_i_o    = new(C, 3) PhiNode(result_region, Type::ABIO);
-  PhiNode*    result_memory = new(C, 3) PhiNode(result_region, Type::MEMORY, adr_type);
-  record_for_igvn(result_region);
-  _gvn.set_type_bottom(result_i_o);
-  _gvn.set_type_bottom(result_memory);
 
   Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
                     OptoRuntime::generic_arraycopy_Type(),
                     copyfunc_addr, "generic_arraycopy", adr_type,
                     src, src_offset, dest, dest_offset, copy_length);
 
-  Node* init_mem = reset_memory();
-
-  Node* result = _gvn.transform(new (C, 1) ProjNode(call,TypeFunc::Parms));
-  Node* cmp_0 = _gvn.transform( new (C, 3) CmpINode(result, intcon(0)) );
-  Node* bol_0 = _gvn.transform( new (C, 2) BoolNode(cmp_0,  BoolTest::eq) );
-
-  IfNode* iff = new (C, 2) IfNode( control(), bol_0, PROB_MAX, COUNT_UNKNOWN );
-  _gvn.set_type(iff, iff->Value(&_gvn));
-
-  Node* if_fast = _gvn.transform( new (C, 1) IfTrueNode(iff) );
-
-  result_region->init_req(1, if_fast);
-  result_i_o   ->init_req(1, i_o());
-  result_memory->init_req(1, init_mem);
-  
-  Node* if_slow = _gvn.transform( new (C, 1) IfFalseNode(iff) );
-  set_control(if_slow);
-  {
-    set_all_memory(init_mem);
-    // The top level type of either src or dest is not known to be an
-    // array.  Punt to the slow routine.
-    // (But, avoid the native method wrapper to JVM_ArrayCopy.)
-    CallJavaNode* call = generate_method_call(false, true);
-    set_results_for_java_call(call);
-
-    result_region->init_req(2, control());
-    result_i_o   ->init_req(2, i_o());
-    result_memory->init_req(2, reset_memory());
-  }
-
-  // Finished; return the combined state.
-  set_control( _gvn.transform(result_region) );
-  set_i_o(     _gvn.transform(result_i_o)    );
-  set_all_memory( _gvn.transform(result_memory) );
+  return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
 }
 
 
@@ -3503,25 +4396,30 @@
 void
 LibraryCallKit::generate_unchecked_arraycopy(BasicType basic_elem_type,
                                              const TypePtr* adr_type,
+                                             bool disjoint_bases,
                                              Node* src,  Node* src_offset,
                                              Node* dest, Node* dest_offset,
                                              Node* copy_length) {
   if (stopped())  return;               // nothing to do
 
-  Node* src_start  = array_element_address(src,  src_offset,  basic_elem_type);
-  Node* dest_start = array_element_address(dest, dest_offset, basic_elem_type);;
+  Node* src_start  = src;
+  Node* dest_start = dest;
+  if (src_offset != NULL || dest_offset != NULL) {
+    assert(src_offset != NULL && dest_offset != NULL, "");
+    src_start  = array_element_address(src,  src_offset,  basic_elem_type);
+    dest_start = array_element_address(dest, dest_offset, basic_elem_type);
+  }
 
   // Figure out which arraycopy runtime method to call.
   const char* copyfunc_name = "arraycopy";
   address     copyfunc_addr =
       basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
-                          copyfunc_name);
+                          disjoint_bases, copyfunc_name);
 
   // Call it.  Note that the count_ix value is not scaled to a byte-size.
   make_runtime_call(RC_LEAF|RC_NO_FP,
-                    OptoRuntime::arraycopy_Type(),
+                    OptoRuntime::fast_arraycopy_Type(),
                     copyfunc_addr, copyfunc_name, adr_type,
-                    src_start, dest_start, copy_length,
-                    LP64_ONLY(top()) NOT_LP64(NULL));
+                    src_start, dest_start, copy_length XTOP);
 }
 
--- a/hotspot/src/share/vm/opto/live.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/live.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)live.cpp	1.69 07/05/05 17:06:18 JVM"
+#pragma ident "@(#)live.cpp	1.70 07/05/17 17:44:00 JVM"
 #endif
 /*
  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -86,22 +86,31 @@
     // Compute the local live-in set.  Start with any new live-out bits.
     IndexSet *use = getset( b );
     IndexSet *def = &_defs[b->_pre_order-1];
+    DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
     uint i;
     for( i=b->_nodes.size(); i>1; i-- ) {
       Node *n = b->_nodes[i-1];
       if( n->is_Phi() ) break;
 
       uint r = _names[n->_idx];
+      assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
       def->insert( r );
       use->remove( r );
       uint cnt = n->req();
       for( uint k=1; k<cnt; k++ ) {
         Node *nk = n->in(k);
         uint nkidx = nk->_idx;
-        if( _cfg._bbs[nkidx] != b )
-          use->insert( _names[nkidx] );
+        if( _cfg._bbs[nkidx] != b ) {
+          uint u = _names[nkidx];
+          use->insert( u );
+          DEBUG_ONLY(def_outside->insert( u );)
+        }
       }
     }
+#ifdef ASSERT
+    def_outside->set_next(_free_IndexSet);
+    _free_IndexSet = def_outside;     // Drop onto free list
+#endif
     // Remove anything defined by Phis and the block start instruction
     for( uint k=i; k>0; k-- ) {
       uint r = _names[b->_nodes[k-1]->_idx];
--- a/hotspot/src/share/vm/opto/locknode.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/locknode.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)locknode.cpp	1.48 07/05/05 17:06:19 JVM"
+#pragma ident "@(#)locknode.cpp	1.49 07/05/17 15:59:05 JVM"
 #endif
 /*
  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -86,6 +86,15 @@
   return (&n == this);                // Always fail except on self
 }
 
+//
+// Create a counter which counts the number of times this lock is acquired
+//
+void FastLockNode::create_lock_counter(JVMState* state) {
+  BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
+           OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
+  _counters = blnc->counters();
+}
+
 //=============================================================================
 //------------------------------do_monitor_enter-------------------------------
 void Parse::do_monitor_enter() {
--- a/hotspot/src/share/vm/opto/locknode.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/locknode.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)locknode.hpp	1.38 07/05/05 17:06:19 JVM"
+#pragma ident "@(#)locknode.hpp	1.39 07/05/17 15:59:09 JVM"
 #endif
 /*
  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -53,10 +53,14 @@
 
 //------------------------------FastLockNode-----------------------------------
 class FastLockNode: public CmpNode {
+private:
+  BiasedLockingCounters* _counters;
+
 public:
   FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
     init_req(0,ctrl);
     init_class_id(Class_FastLock);
+    _counters = NULL;
   }
   Node* obj_node() const { return in(1); }
   Node* box_node() const { return in(2); }
@@ -69,6 +73,8 @@
   virtual const Type *Value( PhaseTransform *phase ) const { return TypeInt::CC; }
   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
 
+  void create_lock_counter(JVMState* s);
+  BiasedLockingCounters* counters() const { return _counters; }
 };
 
 
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)loopnode.cpp	1.257 07/05/05 17:06:21 JVM"
+#pragma ident "@(#)loopnode.cpp	1.258 07/05/17 17:44:08 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -2585,10 +2585,12 @@
 //------------------------------dump-------------------------------------------
 void PhaseIdealLoop::dump( ) const {
   ResourceMark rm;
+  Arena* arena = Thread::current()->resource_area();
+  Node_Stack stack(arena, C->unique() >> 2);
   Node_List rpo_list;
-  VectorSet visited(Thread::current()->resource_area());
+  VectorSet visited(arena);
   visited.set(C->top()->_idx);
-  rpo( C->root(), visited, rpo_list );
+  rpo( C->root(), stack, visited, rpo_list );
   // Dump root loop indexed by last element in PO order
   dump( _ltree_root, rpo_list.size(), rpo_list );
 }
@@ -2656,14 +2658,25 @@
 }
 
 // Collect a R-P-O for the whole CFG.
-void PhaseIdealLoop::rpo( Node *n, VectorSet &visited, Node_List &rpo_list ) const {
-  if( visited.test_set(n->_idx) ) return;
-  if( !n->is_CFG() ) return;
+// Result list is in post-order (scan backwards for RPO)
+void PhaseIdealLoop::rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const {
+  stk.push(start, 0);
+  visited.set(start->_idx);
 
-  // Visit everybody
-  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++)
-    rpo(n->fast_out(i), visited, rpo_list);
-  rpo_list.push(n);
+  while (stk.is_nonempty()) {
+    Node* m   = stk.node();
+    uint  idx = stk.index();
+    if (idx < m->outcnt()) {
+      stk.set_index(idx + 1);
+      Node* n = m->raw_out(idx);
+      if (n->is_CFG() && !visited.test_set(n->_idx)) {
+        stk.push(n, 0);
+      }
+    } else {
+      rpo_list.push(m);
+      stk.pop();
+    }
+  }
 }
 #endif
 
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)loopnode.hpp	1.141 07/05/05 17:06:20 JVM"
+#pragma ident "@(#)loopnode.hpp	1.142 07/05/17 17:44:14 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -852,7 +852,7 @@
 #ifndef PRODUCT
   void dump( ) const;
   void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const;
-  void rpo( Node *n, VectorSet &visited, Node_List &rpo_list ) const;
+  void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const;
   void verify() const;          // Major slow  :-)
   void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const;
   IdealLoopTree *get_loop_idx(Node* n) const {
--- a/hotspot/src/share/vm/opto/machnode.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/machnode.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)machnode.hpp	1.201 07/05/05 17:06:21 JVM"
+#pragma ident "@(#)machnode.hpp	1.202 07/05/17 15:59:11 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -527,6 +527,16 @@
 #endif
 };
 
+//------------------------------MachFastLockNode-------------------------------------
+// Machine-specific versions of FastLockNodes
+class MachFastLockNode : public MachNode {
+  virtual uint size_of() const { return sizeof(*this); } // Size is bigger
+public:
+  BiasedLockingCounters* _counters;
+
+  MachFastLockNode() : MachNode() {}
+};
+
 //------------------------------MachReturnNode--------------------------------
 // Machine-specific versions of subroutine returns
 class MachReturnNode : public MachNode {
--- a/hotspot/src/share/vm/opto/macro.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)macro.cpp	1.29 07/05/05 17:06:20 JVM"
+#pragma ident "@(#)macro.cpp	1.30 07/05/17 15:59:14 JVM"
 #endif
 /*
  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -518,7 +518,6 @@
   //
   extract_call_projections(call);
 
-  assert (_fallthroughcatchproj != NULL && _catchallcatchproj != NULL, "missing projection from Allocate");
   // An allocate node has separate memory projections for the uses on the control and i_o paths
   // Replace uses of the control memory projection with result_phi_rawmem (unless we are only generating a slow call)
   if (!always_slow && _memproj_fallthrough != NULL) {
@@ -584,10 +583,14 @@
     return;
 
 
-  ctrl = _fallthroughcatchproj->clone();
-  _igvn.register_new_node_with_optimizer(ctrl);
-  _igvn.hash_delete(_fallthroughcatchproj);
-  _igvn.subsume_node(_fallthroughcatchproj, result_region);
+  if (_fallthroughcatchproj != NULL) {
+    ctrl = _fallthroughcatchproj->clone();
+    _igvn.register_new_node_with_optimizer(ctrl);
+    _igvn.hash_delete(_fallthroughcatchproj);
+    _igvn.subsume_node(_fallthroughcatchproj, result_region);
+  } else {
+    ctrl = top();
+  }
   Node *slow_result;
   if (_resproj == NULL) {
     // no uses of the allocation result
--- a/hotspot/src/share/vm/opto/memnode.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)memnode.cpp	1.236 07/05/05 17:06:15 JVM"
+#pragma ident "@(#)memnode.cpp	1.237 07/05/17 15:59:18 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -632,7 +632,11 @@
     assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
     return TypeInt::make(klass->access_flags());
   }
-  // (We could fold up the size_helper, if reflective code would use it.)
+  if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc)) {
+    // The field is Klass::_layout_helper.  Return its constant value if known.
+    assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
+    return TypeInt::make(klass->layout_helper());
+  }
 
   // No match.
   return NULL;
@@ -709,7 +713,7 @@
   }
 
   const TypeKlassPtr *tkls = tp->isa_klassptr();
-  if (tkls != NULL) {
+  if (tkls != NULL && !StressReflectiveCode) {
     ciKlass* klass = tkls->klass();
     if (klass->is_loaded() && tkls->klass_is_exact()) {
       // We are loading a field from a Klass metaobject whose identity
@@ -733,6 +737,13 @@
       }
       const Type* aift = load_array_final_field(tkls, klass);
       if (aift != NULL)  return aift;
+      if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset()) + (int)sizeof(oopDesc)
+          && klass->is_array_klass()) {
+        // The field is arrayKlass::_component_mirror.  Return its (constant) value.
+        // (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.)
+        assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror");
+        return TypeInstPtr::make(klass->as_array_klass()->component_mirror());
+      }
       if (tkls->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) {
         // The field is Klass::_java_mirror.  Return its (constant) value.
         // (Folds up the 2nd indirection in anObjConstant.getClass().)
@@ -763,7 +774,28 @@
         }
       }
     }
+
+    // If the type is enough to determine that the thing is not an array,
+    // we can give the layout_helper a positive interval type.
+    // This will help short-circuit some reflective code.
+    if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc)
+        && !klass->is_array_klass() // not directly typed as an array
+        && !klass->is_interface()  // specifically not Serializable & Cloneable
+        && !klass->is_java_lang_Object()   // not the supertype of all T[]
+        ) {
+      // Note:  When interfaces are reliable, we can narrow the interface
+      // test to (klass != Serializable && klass != Cloneable).
+      assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
+      jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
+      // The key property of this type is that it folds up tests
+      // for array-ness, since it proves that the layout_helper is positive.
+      // Thus, a generic value like the basic object layout helper works fine.
+      return TypeInt::make(min_size, max_jint, Type::WidenMin);
+    }
   }
+
+  // (If loading from a freshly-allocated object, could produce zero here.)
+
   return _type;
 }
 
@@ -844,13 +876,17 @@
     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
     int offset = tinst->offset();
     if (ik == phase->C->env()->Class_klass()
-        && offset == java_lang_Class::klass_offset_in_bytes()) {
+        && (offset == java_lang_Class::klass_offset_in_bytes() ||
+            offset == java_lang_Class::array_klass_offset_in_bytes())) {
       // We are loading a special hidden field from a Class mirror object,
       // the field which points to the VM's Klass metaobject.
       ciType* t = tinst->java_mirror_type();
       // java_mirror_type returns non-null for compile-time Class constants.
       if (t != NULL) {
         // constant oop => constant klass
+        if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
+          return TypeKlassPtr::make(ciArrayKlass::make(t));
+        }
         if (!t->is_klass()) {
           // a primitive Class (e.g., int.class) has NULL for a klass field
           return TypePtr::NULL_PTR;
@@ -923,7 +959,7 @@
 
   // Check for loading klass from an array klass
   const TypeKlassPtr *tkls = tp->isa_klassptr();
-  if( tkls != NULL ) {
+  if (tkls != NULL && !StressReflectiveCode) {
     ciKlass* klass = tkls->klass();
     if( !klass->is_loaded() )
       return _type;             // Bail out if not loaded
@@ -970,15 +1006,18 @@
   }
 
   // Simplify k.java_mirror.as_klass to plain k, where k is a klassOop.
+  // Simplify ak.component_mirror.array_klass to plain ak, ak an arrayKlass.
   // See inline_native_Class_query for occurrences of these patterns.
   // Java Example:  x.getClass().isAssignableFrom(y)
+  // Java Example:  Array.newInstance(x.getClass().getComponentType(), n)
   //
   // This improves reflective code, often making the Class
   // mirror go completely dead.  (Current exception:  Class
   // mirrors may appear in debug info, but we could clean them out by
   // introducing a new debug info operator for klassOop.java_mirror).
   if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass()
-      && offset == java_lang_Class::klass_offset_in_bytes()) {
+      && (offset == java_lang_Class::klass_offset_in_bytes() ||
+          offset == java_lang_Class::array_klass_offset_in_bytes())) {
     // We are loading a special hidden field from a Class mirror,
     // the field which points to its Klass or arrayKlass metaobject.
     if (base->is_Load()) {
@@ -990,6 +1029,9 @@
           && adr2->is_AddP()
           ) {
         int mirror_field = Klass::java_mirror_offset_in_bytes();
+        if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
+          mirror_field = in_bytes(arrayKlass::component_mirror_offset());
+        }
         if (tkls->offset() == mirror_field + (int)sizeof(oopDesc)) {
           return adr2->in(AddPNode::Base);
         }
@@ -1902,15 +1944,6 @@
   // Otherwise, it is a narrow slice.
   Node* n = alias_idx < req() ? in(alias_idx) : empty_memory();
   Compile *C = Compile::current();
-  // if we have empty memory and the index refers to a instance of a type, see if there
-  // there is a slice for the general type
-  if (is_empty_memory(n) && C->AliasLevel() >= 3 && alias_idx < (uint) C->num_alias_types()) {
-    // if this is a unique instance, get the index corresponding to its general type
-    uint gti = C->get_general_index(alias_idx);
-    if (gti != alias_idx) {
-      n = gti < req() ? in(gti) : empty_memory();
-    }
-  }
   if (is_empty_memory(n)) {
     // the array is sparse; empty slots are the "top" node
     n = base_memory();
--- a/hotspot/src/share/vm/opto/node.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/node.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)node.cpp	1.225 07/05/05 17:06:18 JVM"
+#pragma ident "@(#)node.cpp	1.226 07/05/17 17:44:21 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1464,97 +1464,94 @@
   tty->print("]] ");
 }
 
-//------------------------------dump_recur-------------------------------------
-static void dump_recur(const Node* n, int d, int& only_ctrl,
-                       Compile* C, Node_List &access_path,
-                       VectorSet &old_space, VectorSet &new_space) {
-  if (NotANode(n))  return;  // Gracefully handle NULL, -1, 0xabababab, etc.
-  if (d == 0)       return;  // Done with recursion
-  // Contained in new_space or old_space?
-  VectorSet* v = C->node_arena()->contains(n) ? &new_space : &old_space;
-  if (v->test_set(n->_idx)) { // Dumped already?
-    if (access_path[0] == n) {
-      // We found a loop, maybe
-      bool print_loop = (only_ctrl != 0);
-      if (!print_loop) {
-        // print it if there are no phis or regions in the mix
-        bool found_loop_breaker = false;
-        for (uint i = 0; i < access_path.size(); i++) {
-          Node* m = access_path[i];
-          if (m->is_Phi() || m->is_Region() || m->is_Root()) {
-            found_loop_breaker = true;
-            break;
+//------------------------------dump_nodes-------------------------------------
+static void dump_nodes(const Node* start, int d, bool only_ctrl) {
+  if (NotANode(start)) return;
+  ResourceMark rm;
+  Compile* C = Compile::current();
+  Arena* arena = Thread::current()->resource_area();
+  Node_Stack stack(arena, MIN2((uint)ABS(d), C->unique() >> 1));
+  VectorSet on_stack(arena);
+  // visited sets for old and new spaces
+  VectorSet old_space(arena), new_space(arena);
+
+  VectorSet *v = arena->contains(start) ? &new_space : &old_space;
+  v->set(start->_idx);
+  stack.push((Node*)start, 0);
+  on_stack.set(start->_idx);
+  if (d < 0) start->dump();
+
+  // Do a depth first walk over edges
+  while (stack.is_nonempty()) {
+    Node* m   = stack.node();
+    uint  idx = stack.index();
+    uint  limit = d > 0 ? m->len() : m->outcnt();
+    if (idx < limit) {
+      stack.set_index(idx + 1);
+      Node* n = NULL;
+      if (d > 0) {
+        n = m->in(idx);
+      } else {
+        n = m->raw_out(idx);
+      }
+      if (NotANode(n))  continue;
+      // do not recurse through top or the root (would reach unrelated stuff)
+      if (n->is_Root() || n->is_top())  continue;
+      if (only_ctrl && !n->is_CFG()) continue;
+      VectorSet *v = arena->contains(n) ? &new_space : &old_space;
+      if (!v->test_set(n->_idx)) {  // forward arc
+        // Limit depth
+        if (stack.size() < (uint)ABS(d)) {
+          if (d < 0) n->dump();
+          stack.push(n, 0);
+          on_stack.set(n->_idx);
+        }
+      } else {  // back or cross arc
+        if (on_stack.test(n->_idx)) {  // back arc
+
+          // print it if there are no phis or regions in the mix
+          bool found_loop_breaker = false;
+          int k;
+          for (k = stack.size() - 1; k >= 0; k--) {
+            Node* m = stack.node_at(k);
+            if (m->is_Phi() || m->is_Region() || m->is_Root() || m->is_Start()) {
+              found_loop_breaker = true;
+              break;
+            }
+            if (m == n) // Found loop head
+              break;
+          }
+          assert(k >= 0, "n must be on stack");
+
+          if (!found_loop_breaker) {
+            tty->print("# %s LOOP FOUND:", only_ctrl ? "CONTROL" : "DATA");
+            for (int i = stack.size() - 1; i >= k; i--) {
+              Node* m = stack.node_at(i);
+              bool mnew = C->node_arena()->contains(m);
+              tty->print(" %s%d:%s", (mnew? "": "o"), m->_idx, m->Name());
+              if (i != 0) tty->print(d > 0? " <-": " ->");
+            }
+            tty->cr();
           }
         }
-        print_loop = !found_loop_breaker;
       }
-      if (print_loop) {
-        tty->print("# %s LOOP FOUND:", (only_ctrl != 0 ? "CONTROL" : "DATA"));
-        for (uint i = access_path.size(); i-- > 0; ) {
-          Node* m = access_path[i];
-          bool mnew = C->node_arena()->contains(m);
-          tty->print(" %s%d:%s", (mnew? "": "o"), m->_idx, m->Name());
-          if (i != 0) tty->print(d > 0? " <-": " ->");
-        }
-        tty->cr();
-      }
-    }
-    return;
-  }
-
-  // Only dumping control edges?
-  if (only_ctrl != 0) {
-    if (n->is_CFG())
-      only_ctrl = 2;            // found another control edge
-    else if (only_ctrl == 2)
-      return;                   // found at least one control edge already
-  }
-
-  access_path.push((Node*) n);
-
-  if( d > 0 ) {                 // Forward dump
-    for (uint i=0; i<n->len(); i++) {
-      Node* m =  n->in(i);
-      if (NotANode(m))  continue;
-      // do not recurse through top or the root (would reach unrelated stuff)
-      if (m->is_Root() || m->is_top())  continue;
-      dump_recur(m, d-1, only_ctrl, C, access_path, old_space, new_space);
-    }
-    n->dump();
-  } else {                      // Backwards dump
-    n->dump();
-    for (uint i=0; i<n->outcnt(); i++) {
-      Node* m = n->raw_out(i);
-      // do not recurse through top or the root (would reach unrelated stuff)
-      if (NotANode(m))  continue;
-      if (m->is_Root() || m->is_top())  continue;
-      dump_recur(m, d+1, only_ctrl, C, access_path, old_space, new_space);
+    } else {
+      if (d > 0) m->dump();
+      on_stack >>= m->_idx;
+      stack.pop();
     }
   }
-
-  Node* npop = access_path.pop();
-  assert(npop == n, "");
 }
 
 //------------------------------dump-------------------------------------------
 void Node::dump(int d) const {
-  ResourceArea *area = Thread::current()->resource_area();
-  VectorSet old_space(area), new_space(area);
-  Node_List access_path(area);
-  Compile* C = Compile::current();
-  int only_ctrl = 0;
-  dump_recur(this, d, only_ctrl, C, access_path, old_space, new_space);
+  dump_nodes(this, d, false);
 }
 
 //------------------------------dump_ctrl--------------------------------------
 // Dump a Node's control history to depth
 void Node::dump_ctrl(int d) const {
-  ResourceArea *area = Thread::current()->resource_area();
-  VectorSet old_space(area), new_space(area);
-  Node_List access_path(area);
-  Compile* C = Compile::current();
-  int only_ctrl = 1;
-  dump_recur(this, d, only_ctrl, C, access_path, old_space, new_space);
+  dump_nodes(this, d, true);
 }
 
 // VERIFICATION CODE
--- a/hotspot/src/share/vm/opto/node.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/node.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)node.hpp	1.219 07/05/05 17:06:23 JVM"
+#pragma ident "@(#)node.hpp	1.221 07/05/17 17:44:27 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -825,15 +825,6 @@
   // Compute the latency from the def to this instruction of the ith input node
   uint latency(uint i);
 
-  // Compute the (backwards) latency from the uses of this instructions
-  int latency_from_uses(Block_Array &bbs, GrowableArray<uint> &node_latency) const;
-
-  // Compute the (backwards) latency from a single use
-  int latency_from_use (Block_Array &bbs, GrowableArray<uint> &node_latency, const Node *def, Node *use) const;
-
-  // Compute the (backwards) latency from the uses of this instructions
-  void partial_latency_of_defs(Block_Array &bbs, GrowableArray<uint> &node_latency);
-
   // Hash & compare functions, for pessimistic value numbering
 
   // If the hash function returns the special sentinel value NO_HASH,
@@ -1372,6 +1363,10 @@
   Node *node() const {
     return _inode_top->node;
   }
+  Node* node_at(uint i) const {
+    assert(_inodes + i <= _inode_top, "in range");
+    return _inodes[i].node;
+  }
   uint index() const {
     return _inode_top->indx;
   }
--- a/hotspot/src/share/vm/opto/output.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)output.cpp	1.288 07/05/05 17:06:22 JVM"
+#pragma ident "@(#)output.cpp	1.289 07/05/17 15:59:26 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -221,6 +221,7 @@
               call->entry_point() == OptoRuntime::multianewarray3_Java() ||
               call->entry_point() == OptoRuntime::multianewarray4_Java() ||
               call->entry_point() == OptoRuntime::multianewarray5_Java() ||
+              call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
               call->entry_point() == OptoRuntime::complete_monitor_locking_Java() 
               ) {
             insert = false;
@@ -264,6 +265,62 @@
 
 # endif // ENABLE_ZAP_DEAD_LOCALS
 
+//------------------------------compute_loop_first_inst_sizes------------------
+// Compute the size of first NumberOfLoopInstrToAlign instructions at head 
+// of a loop. When aligning a loop we need to provide enough instructions
+// in cpu's fetch buffer to feed decoders. The loop alignment could be
+// avoided if we have enough instructions in fetch buffer at the head of a loop.
+// By default, the size is set to 999999 by Block's constructor so that
+// a loop will be aligned if the size is not reset here.
+//
+// Note: Mach instructions could contain several HW instructions
+// so the size is estimated only. 
+//
+void Compile::compute_loop_first_inst_sizes() {
+  // The next condition is used to gate the loop alignment optimization.
+  // Don't aligned a loop if there are enough instructions at the head of a loop
+  // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
+  // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
+  // equal to 11 bytes which is the largest address NOP instruction.
+  if( MaxLoopPad < OptoLoopAlignment-1 ) {
+    uint last_block = _cfg->_num_blocks-1;
+    for( uint i=1; i <= last_block; i++ ) {
+      Block *b = _cfg->_blocks[i];
+      // Check the first loop's block which requires an alignment.
+      if( b->head()->is_Loop() && 
+          b->code_alignment() > (uint)relocInfo::addr_unit() ) {
+        uint sum_size = 0;
+        uint inst_cnt = NumberOfLoopInstrToAlign;
+        inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt,
+                                              _regalloc);
+        // Check the next fallthrough block if first loop's block does not have
+        // enough instructions.
+        if( inst_cnt > 0 && i < last_block ) {
+          // First, check if the first loop's block contains whole loop.
+          // LoopNode::LoopBackControl == 2.
+          Block *bx = _cfg->_bbs[b->pred(2)->_idx];
+          // Skip connector blocks (with limit in case of irreducible loops).
+          int search_limit = 16;
+          while( bx->is_connector() && search_limit-- > 0) {
+            bx = _cfg->_bbs[bx->pred(1)->_idx];
+          }
+          if( bx != b ) { // loop body is in several blocks.
+            Block *nb = NULL;
+            while( inst_cnt > 0 && i < last_block && nb != bx &&
+                  !_cfg->_blocks[i+1]->head()->is_Loop() ) {
+              i++;
+              nb = _cfg->_blocks[i];
+              inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt, 
+                                                      _regalloc);
+            } // while( inst_cnt > 0 && i < last_block  )
+          } // if( bx != b )
+        } // if( inst_cnt > 0 && i < last_block )
+        b->set_first_inst_size(sum_size);
+      } // f( b->head()->is_Loop() )
+    } // for( i <= last_block )
+  } // if( MaxLoopPad < OptoLoopAlignment-1 )
+}
+
 //----------------------Shorten_branches---------------------------------------
 // The architecture description provides short branch variants for some long
 // branch instructions. Replace eligible long branches with short branches.
@@ -409,6 +466,10 @@
     }
   }
 
+  // Compute the size of first NumberOfLoopInstrToAlign instructions at head 
+  // of a loop. It is used to determine the padding for loop alignment.
+  compute_loop_first_inst_sizes();
+
   // Step 3, compute the offsets of all the labels
   uint last_call_adr = max_uint;
   for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
@@ -454,13 +515,10 @@
       // When the next block starts a loop, we may insert pad NOP
       // instructions.
       Block *nb = _cfg->_blocks[i+1];
-      int code_alignment = nb->code_alignment();
-      assert(is_power_of_2(code_alignment), "code alignment is not a power of 2");
-      int mask = code_alignment - relocInfo::addr_unit();
-      blk_size += (code_alignment - (blk_starts[i] + blk_size)) & mask;
-
+      int current_offset = blk_starts[i] + blk_size;
+      current_offset += nb->alignment_padding(current_offset);
       // Save block size; update total method size
-      blk_starts[i+1] = blk_starts[i]+blk_size;
+      blk_starts[i+1] = current_offset;
     }
   }
 
@@ -729,20 +787,10 @@
     }
 
     // Add in mappings of the monitors
-#ifdef JVMPI_SUPPORT
-#define JVMPI_METHOD_BLOB \
-            (mcall && mcall->entry_point() == OptoRuntime::jvmpi_method_entry_Java()) || \
-            (mcall && mcall->entry_point() == OptoRuntime::jvmpi_method_exit_Java()) ||
-#else // !JVMPI_SUPPORT
-#define JVMPI_METHOD_BLOB
-#endif // JVMPI_SUPPORT
     assert( !method ||
             !method->is_synchronized() || 
             method->is_native() || 
             num_mon > 0 || 
-/* #ifdef JVMPI_SUPPORT */
-            JVMPI_METHOD_BLOB
-/* #endif // JVMPI_SUPPORT */
             !GenerateSynchronizationCode, 
             "monitors must always exist for synchronized methods");
 
@@ -1252,20 +1300,13 @@
     int nop_size = (new (this) MachNopNode())->size(_regalloc);
     if( i<_cfg->_num_blocks-1 ) {
       Block *nb = _cfg->_blocks[i+1];
-      int block_alignment = nb->code_alignment();
-      int max_loop_pad = block_alignment-relocInfo::addr_unit();
-      if( max_loop_pad > 0 ) {
-        assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
-        int current_alignment = current_offset & max_loop_pad;
-        if (current_alignment != 0) {
-          int nops_cnt = ((block_alignment-current_alignment) & max_loop_pad) /
-                         nop_size;
-          MachNode *nop = new (this) MachNopNode(nops_cnt);
-          b->_nodes.insert( b->_nodes.size(), nop );
-          _cfg->_bbs.map( nop->_idx, b );
-          nop->emit(*cb, _regalloc);
-          current_offset = cb->code_size();
-        }
+      uint padding = nb->alignment_padding(current_offset);
+      if( padding > 0 ) {
+        MachNode *nop = new (this) MachNopNode(padding / nop_size);
+        b->_nodes.insert( b->_nodes.size(), nop );
+        _cfg->_bbs.map( nop->_idx, b );
+        nop->emit(*cb, _regalloc);
+        current_offset = cb->code_size();
       }
     }
 
--- a/hotspot/src/share/vm/opto/parse1.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)parse1.cpp	1.492 07/05/05 17:06:24 JVM"
+#pragma ident "@(#)parse1.cpp	1.493 07/05/17 15:59:31 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -899,11 +899,7 @@
 
 //---------------------------throw_to_exit-------------------------------------
 // Merge the given map into an exception exit from this method.
-#ifdef JVMPI_SUPPORT
-// The exception exit will handle JVMPI and any unlocking of receiver.
-#else // !JVMPI_SUPPORT
 // The exception exit will handle any unlocking of receiver.
-#endif // JVMPI_SUPPORT
 // The ex_oop must be saved within the ex_map, unlike merge_exception.
 void Parse::throw_to_exit(SafePointNode* ex_map) {
   // Pop the JVMS to (a copy of) the caller.
@@ -976,15 +972,8 @@
   // See GraphKit::add_exception_state, which performs the commoning.
   bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode;
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI -- record exit from a method if compiled while JVMPI/Dtrace is turned on.
-  bool do_jvmpi = C->need_jvmpi_method_event();
-
-  if (do_synch || do_jvmpi || DTraceMethodProbes) {
-#else // !JVMPI_SUPPORT
   // record exit from a method if compiled while Dtrace is turned on.
   if (do_synch || DTraceMethodProbes) {
-#endif // JVMPI_SUPPORT
     // First move the exception list out of _exits:
     GraphKit kit(_exits.transfer_exceptions_into_jvms());
     SafePointNode* normal_map = kit.map();  // keep this guy safe 
@@ -1006,12 +995,6 @@
         // Unlock!
         kit.shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
       }
-#ifdef JVMPI_SUPPORT
-      if (do_jvmpi) {
-        // Note:  We must do this __after__ unlocking the receiver.
-        kit.make_jvmpi_method_exit(method());
-      }
-#endif // JVMPI_SUPPORT
       if (DTraceMethodProbes) {
         kit.make_dtrace_method_exit(method());
       }
@@ -1111,12 +1094,6 @@
 
   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
   
-#ifdef JVMPI_SUPPORT
-  // JVMPI -- record entry to a method if compiled while JVMPI is turned on
-  if (C->need_jvmpi_method_event()) {
-    make_jvmpi_method_entry();
-  }
-#endif // JVMPI_SUPPORT
   if (DTraceMethodProbes) {
     make_dtrace_method_entry(method());
   }
@@ -1444,8 +1421,17 @@
 
     NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); );
 
+#ifdef ASSERT
+    int pre_bc_sp = sp();
+    int inputs, depth;
+    bool have_se = !stopped() && compute_stack_effects(inputs, depth);
+    assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC");
+#endif //ASSERT
+
     do_one_bytecode();
 
+    assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth, "correct depth prediction");
+
     do_exceptions();
 
     NOT_PRODUCT( parse_histogram()->record_change(); );
@@ -1978,12 +1964,6 @@
   if (method()->is_synchronized() && GenerateSynchronizationCode) {
     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
   }
-#ifdef JVMPI_SUPPORT
-  // JVMPI -- record exit from a method if compiled while JVMPI is turned on
-  if (C->need_jvmpi_method_event()) {
-    make_jvmpi_method_exit(method());
-  }
-#endif // JVMPI_SUPPORT
   if (DTraceMethodProbes) {
     make_dtrace_method_exit(method());
   }
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)parseHelper.cpp	1.194 07/05/05 17:06:27 JVM"
+#pragma ident "@(#)parseHelper.cpp	1.195 07/05/17 16:00:23 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -28,46 +28,6 @@
 #include "incls/_precompiled.incl"
 #include "incls/_parseHelper.cpp.incl"
 
-#ifdef JVMPI_SUPPORT
-//------------------------------make_jvmpi_method_entry------------------------
-// JVMPI -- record entry to a method if compiled while JVMPI is turned on
-void GraphKit::make_jvmpi_method_entry() {
-  const TypeFunc *call_type    = OptoRuntime::jvmpi_method_entry_Type();
-  address         call_address = OptoRuntime::jvmpi_method_entry_Java();
-  const char     *call_name    = OptoRuntime::stub_name( call_address );
-  assert(bci() == InvocationEntryBci, "must be outside all blocks");
-  const TypeInstPtr *method_type = TypeInstPtr::make(TypePtr::Constant, method()->klass(), true, method(), 0);
-  Node *methodOop_node = _gvn.transform( new (C, 1) ConPNode(method_type) );
-  Node *receiver_node  = (method() && !method()->is_static()) // IF  (virtual call)
-    ? map()->in(TypeFunc::Parms)                              // THEN 'this' pointer, receiver,
-    : null();                                                 // ELSE NULL
-
-  kill_dead_locals();
-  make_runtime_call(RC_NO_LEAF | RC_NO_IO,
-                    call_type, call_address,
-                    NULL, TypeRawPtr::BOTTOM,
-                    methodOop_node, receiver_node);
-}
-
-//------------------------------make_jvmpi_method_exit-------------------------
-// JVMPI -- record entry to a method if compiled while JVMPI is turned on
-void GraphKit::make_jvmpi_method_exit(ciMethod* method) {
-  const TypeFunc *call_type    = OptoRuntime::jvmpi_method_exit_Type();
-  address         call_address = OptoRuntime::jvmpi_method_exit_Java(); // CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit); // OptoRuntime::jvmpi_method_exit_Java();
-  const char     *call_name    = "jvmpi_method_exit"; // OptoRuntime::stub_name( call_address );
-  // assert triggers on exception exits with other BCIs
-  // assert(bci() == InvocationEntryBci, "must be outside all blocks");
-  const TypeInstPtr* method_type = TypeInstPtr::make(TypePtr::Constant, method->klass(), true, method, 0);
-  Node *method_node = _gvn.transform( new (C, 1) ConPNode(method_type) );
-
-  kill_dead_locals();
-  make_runtime_call(RC_NO_LEAF | RC_NO_IO,
-                    call_type, call_address,
-                    NULL, TypeRawPtr::BOTTOM,
-                    method_node, null());
-}
-#endif // JVMPI_SUPPORT
-
 //------------------------------make_dtrace_method_entry_exit ----------------
 // Dtrace -- record entry or exit of a method if compiled with dtrace support
 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
--- a/hotspot/src/share/vm/opto/phase.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/phase.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)phase.cpp	1.58 07/05/05 17:06:25 JVM"
+#pragma ident "@(#)phase.cpp	1.59 07/05/17 16:00:26 JVM"
 #endif
 /*
  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -38,6 +38,7 @@
 
 // The next timers used for LogCompilation
 elapsedTimer Phase::_t_parser;
+elapsedTimer Phase::_t_escapeAnalysis;
 elapsedTimer Phase::_t_optimizer;
 elapsedTimer   Phase::_t_idealLoop;
 elapsedTimer   Phase::_t_ccp;
@@ -96,6 +97,9 @@
   tty->print_cr ("    stub compilation   : %3.3f sec.", Phase::_t_stubCompilation.seconds());
   tty->print_cr ("  Phases:");
   tty->print_cr ("    parse        : %3.3f sec", Phase::_t_parser.seconds());
+  if (DoEscapeAnalysis) {
+    tty->print_cr ("    escape analysis : %3.3f sec", Phase::_t_escapeAnalysis.seconds());
+  }
   tty->print_cr ("    optimizer    : %3.3f sec", Phase::_t_optimizer.seconds());
   if( Verbose || WizardMode ) {
     tty->print_cr ("      iterGVN      : %3.3f sec", Phase::_t_iterGVN.seconds());
@@ -133,6 +137,7 @@
   tty->print_cr ("    install_code : %3.3f sec", Phase::_t_registerMethod.seconds());
   tty->print_cr ("    ------------ : ----------");
   double phase_subtotal = Phase::_t_parser.seconds() + 
+    (DoEscapeAnalysis ? Phase::_t_escapeAnalysis.seconds() : 0.0) +
     Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() + 
     Phase::_t_matcher.seconds() + Phase::_t_scheduler.seconds() + 
     Phase::_t_registerAllocation.seconds() + Phase::_t_removeEmptyBlocks.seconds() +
--- a/hotspot/src/share/vm/opto/phase.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/phase.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)phase.hpp	1.52 07/05/05 17:06:27 JVM"
+#pragma ident "@(#)phase.hpp	1.53 07/05/17 16:00:29 JVM"
 #endif
 /*
  * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
@@ -72,6 +72,7 @@
 
 // The next timers used for LogCompilation
   static elapsedTimer _t_parser;
+  static elapsedTimer _t_escapeAnalysis;
   static elapsedTimer _t_optimizer;
   static elapsedTimer   _t_idealLoop;
   static elapsedTimer   _t_ccp;
--- a/hotspot/src/share/vm/opto/runtime.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)runtime.cpp	1.457 07/05/05 17:06:26 JVM"
+#pragma ident "@(#)runtime.cpp	1.458 07/05/17 16:00:35 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -52,13 +52,9 @@
 address OptoRuntime::_complete_monitor_locking_Java               = NULL;
 address OptoRuntime::_rethrow_Java                                = NULL;
 
+address OptoRuntime::_slow_arraycopy_Java                         = NULL;
 address OptoRuntime::_register_finalizer_Java                     = NULL;
 
-#ifdef JVMPI_SUPPORT
-address OptoRuntime::_jvmpi_method_entry_Java                     = NULL;
-address OptoRuntime::_jvmpi_method_exit_Java                      = NULL;
-#endif // JVMPI_SUPPORT
-
 # ifdef ENABLE_ZAP_DEAD_LOCALS
 address OptoRuntime::_zap_dead_Java_locals_Java                   = NULL;
 address OptoRuntime::_zap_dead_native_locals_Java                 = NULL;
@@ -101,14 +97,9 @@
   gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
   gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , false, true );
 
+  gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false, false);
   gen(env, _register_finalizer_Java        , register_finalizer_Type      , register_finalizer              ,    0 , false, false, false);  
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI support
-  gen(env, _jvmpi_method_entry_Java        , jvmpi_method_entry_Type      , jvmpi_method_entry_C            ,    0 , false, false, false);  
-  gen(env, _jvmpi_method_exit_Java         , jvmpi_method_exit_Type       , jvmpi_method_exit_C             ,    0 , false, false, false);  
-#endif // JVMPI_SUPPORT
-
 # ifdef ENABLE_ZAP_DEAD_LOCALS                                                                                              
   gen(env, _zap_dead_Java_locals_Java      , zap_dead_locals_Type         , zap_dead_Java_locals_C          ,    0 , false, true , false );
   gen(env, _zap_dead_native_locals_Java    , zap_dead_locals_Type         , zap_dead_native_locals_C        ,    0 , false, true , false );
@@ -551,45 +542,71 @@
   return TypeFunc::make(domain, range);
 }
 
-const TypeFunc* OptoRuntime::arraycopy_Type() {
+// arraycopy stub variations:
+enum ArrayCopyType {
+  ac_fast,                      // void(ptr, ptr, size_t)
+  ac_checkcast,                 //  int(ptr, ptr, size_t, size_t, ptr)
+  ac_slow,                      // void(ptr, int, ptr, int, int)
+  ac_generic                    //  int(ptr, int, ptr, int, int)
+};
+
+static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) {
   // create input type (domain)
-#ifdef _LP64
-  int argcnt = 4;
-#else
-  int argcnt = 3;
-#endif
-  const Type **fields = TypeTuple::fields(argcnt);
-  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;   // src
-  fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL;   // dest
-  fields[TypeFunc::Parms+2] = TypeX_X;    // size in whatevers (size_t)
-#ifdef _LP64
-  fields[TypeFunc::Parms+3] = Type::HALF; // other half of long length
-#endif
-  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
+  int num_args      = (act == ac_fast ? 3 : 5);
+  int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0);
+  int argcnt = num_args;
+  LP64_ONLY(argcnt += num_size_args); // halfwords for lengths
+  const Type** fields = TypeTuple::fields(argcnt);
+  int argp = TypeFunc::Parms;
+  fields[argp++] = TypePtr::NOTNULL;    // src
+  if (num_size_args == 0) {
+    fields[argp++] = TypeInt::INT;      // src_pos
+  }
+  fields[argp++] = TypePtr::NOTNULL;    // dest
+  if (num_size_args == 0) {
+    fields[argp++] = TypeInt::INT;      // dest_pos
+    fields[argp++] = TypeInt::INT;      // length
+  }
+  while (num_size_args-- > 0) {
+    fields[argp++] = TypeX_X;               // size in whatevers (size_t)
+    LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
+  }
+  if (act == ac_checkcast) {
+    fields[argp++] = TypePtr::NOTNULL;  // super_klass
+  }
+  assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act");
+  const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
 
-  // create result type
+  // create result type if needed
+  int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0);
   fields = TypeTuple::fields(1);
-  fields[TypeFunc::Parms+0] = NULL; // void 
-  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
+  if (retcnt == 0)
+    fields[TypeFunc::Parms+0] = NULL; // void
+  else
+    fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed
+  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields);
   return TypeFunc::make(domain, range);
 }
 
+const TypeFunc* OptoRuntime::fast_arraycopy_Type() {
+  // This signature is simple:  Two base pointers and a size_t.
+  return make_arraycopy_Type(ac_fast);
+}
+
+const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() {
+  // An extension of fast_arraycopy_Type which adds type checking.
+  return make_arraycopy_Type(ac_checkcast);
+}
+
+const TypeFunc* OptoRuntime::slow_arraycopy_Type() {
+  // This signature is exactly the same as System.arraycopy.
+  // There are no intptr_t (int/long) arguments.
+  return make_arraycopy_Type(ac_slow);
+}
+
 const TypeFunc* OptoRuntime::generic_arraycopy_Type() {
-  // create input type (domain)
-  const Type **fields = TypeTuple::fields(5);
-  fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;   // src
-  fields[TypeFunc::Parms+1] = TypeInt::INT;          // src_pos
-  fields[TypeFunc::Parms+2] = TypeInstPtr::BOTTOM;   // dst
-  fields[TypeFunc::Parms+3] = TypeInt::INT;          // dst_pos
-  fields[TypeFunc::Parms+4] = TypeInt::INT;          // length
-  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+5, fields);
-
-  // create result type (range)
-  fields = TypeTuple::fields(1);
-  fields[TypeFunc::Parms+0] = TypeInt::INT;
-  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
-
-  return TypeFunc::make(domain, range);
+  // This signature is like System.arraycopy, except that it returns status.
+  return make_arraycopy_Type(ac_generic);
 }
 
 
@@ -943,57 +960,6 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-//-----------------------------------------------------------------------------
-// JVMPI support
-const TypeFunc *OptoRuntime::jvmpi_method_entry_Type() {
-  // create input type (domain)
-  const Type **fields = TypeTuple::fields(2);
-  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // methodOop;    Method we are entering
-  fields[TypeFunc::Parms+1] = TypeInstPtr::BOTTOM;   // oop;          Receiver
-  // // The JavaThread* is passed to each routine as the last argument
-  // fields[TypeFunc::Parms+2] = TypeRawPtr::NOTNULL;  // JavaThread *; Executing thread
-  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
-
-  // create result type (range)
-  fields = TypeTuple::fields(0);
-
-  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
-
-  return TypeFunc::make(domain,range);
-}
-
-// Make the signature of jvmpi_method_entry_C and jvmpi_method_exit_C the same for simplicity and consistency.
-const TypeFunc *OptoRuntime::jvmpi_method_exit_Type() {
-  // create input type (domain)
-  const Type **fields = TypeTuple::fields(2);
-  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // methodOop;    Method we are exiting
-  fields[TypeFunc::Parms+1] = TypeInstPtr::BOTTOM;   // oop;          dummy
-  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields);
-
-  // create result type (range)
-  fields = TypeTuple::fields(0);
-
-  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
-
-  return TypeFunc::make(domain,range);
-}
-
-// TEMPORARY: jvmpi_method_entry_C and jvmpi_method_exit_C was cloned from sharedRuntime.cpp
-// but with different signature
-// Must be entry as it may lock when acquring the jmethodID of the method
-JRT_ENTRY (void, OptoRuntime::jvmpi_method_entry_C(methodOopDesc* method, oopDesc* receiver, JavaThread* thread))
-  SharedRuntime::jvmpi_method_entry_work(
-    thread, methodOop(method), oop(receiver));
-JRT_END
-
-// Must be entry as it may lock when acquring the jmethodID of the method
-// Make the signature of jvmpi_method_entry_C and jvmpi_method_exit_C the same for simplicity and consistency.
-JRT_ENTRY (void, OptoRuntime::jvmpi_method_exit_C(methodOopDesc* method, oopDesc* dummy, JavaThread* thread))
-  SharedRuntime::jvmpi_method_exit_work(thread, methodOop(method));
-JRT_END
-#endif // JVMPI_SUPPORT
-
 //-----------------------------------------------------------------------------
 // Dtrace support.  entry and exit probes have the same signature
 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() {
@@ -1035,8 +1001,6 @@
 JRT_END
 
 //-----------------------------------------------------------------------------
-// Non-product code
-#ifndef PRODUCT
 
 NamedCounter * volatile OptoRuntime::_named_counters = NULL;
 
@@ -1046,6 +1010,7 @@
 void OptoRuntime::print_named_counters() {
   int total_lock_count = 0;
   int eliminated_lock_count = 0;
+
   NamedCounter* c = _named_counters;
   while (c) {
     if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) {
@@ -1060,17 +1025,24 @@
           eliminated_lock_count += count;
         }
       }
+    } else if (c->tag() == NamedCounter::BiasedLockingCounter) {
+      BiasedLockingCounters* blc = ((BiasedLockingNamedCounter*)c)->counters();
+      if (blc->nonzero()) {
+        tty->print_cr("%s", c->name());
+        blc->print_on(tty);
+      }
     }
     c = c->next();
   }
-  tty->print_cr("dynamic locks: %d", total_lock_count);
-  if (eliminated_lock_count) {
-    tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
-                  (int)(eliminated_lock_count * 100.0 / total_lock_count));
+  if (total_lock_count > 0) {
+    tty->print_cr("dynamic locks: %d", total_lock_count);
+    if (eliminated_lock_count) {
+      tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count,
+                    (int)(eliminated_lock_count * 100.0 / total_lock_count));
+    }
   }
 }
 
-
 //
 //  Allocate a new NamedCounter.  The JVMState is used to generate the
 //  name which consists of method@line for the inlining tree.
@@ -1092,9 +1064,15 @@
     }
     int bci = jvms->bci();
     if (bci < 0) bci = 0;
-    st.print("%s.%s@%d", m->holder()->name()->as_utf8(), m->name()->as_utf8(), m->line_number_from_bci(bci));
+    st.print("%s.%s@%d", m->holder()->name()->as_utf8(), m->name()->as_utf8(), bci);
+    // To print linenumbers instead of bci use: m->line_number_from_bci(bci)
   }
-  NamedCounter* c = new NamedCounter(strdup(st.as_string()), tag);
+  NamedCounter* c;
+  if (tag == NamedCounter::BiasedLockingCounter) {
+    c = new BiasedLockingNamedCounter(strdup(st.as_string()));
+  } else {
+    c = new NamedCounter(strdup(st.as_string()), tag);
+  }
 
   // atomically add the new counter to the head of the list.  We only
   // add counters so this is safe.
@@ -1106,6 +1084,10 @@
   return c;
 }
 
+//-----------------------------------------------------------------------------
+// Non-product code
+#ifndef PRODUCT
+
 int trace_exception_counter = 0;
 static void trace_exception(oop exception_oop, address exception_pc, const char* msg) {
   ttyLocker ttyl;
--- a/hotspot/src/share/vm/opto/runtime.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)runtime.hpp	1.198 07/05/05 17:06:29 JVM"
+#pragma ident "@(#)runtime.hpp	1.199 07/05/17 16:01:38 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -48,13 +48,13 @@
 // code in various ways.  Currently they are used by the lock coarsening code
 //
 
-#ifndef PRODUCT
 class NamedCounter : public CHeapObj {
 public:
     enum CounterTag {
     NoTag,
     LockCounter,
-    EliminatedLockCounter
+    EliminatedLockCounter,
+    BiasedLockingCounter
   };
 
 private:
@@ -83,7 +83,17 @@
   }
 
 };
-#endif // !PRODUCT
+
+class BiasedLockingNamedCounter : public NamedCounter {
+ private:
+  BiasedLockingCounters _counters;
+
+ public:
+  BiasedLockingNamedCounter(const char *n) :
+    NamedCounter(n, BiasedLockingCounter), _counters() {}
+
+  BiasedLockingCounters* counters() { return &_counters; }
+};
 
 typedef const TypeFunc*(*TypeFunc_generator)();
 
@@ -106,14 +116,9 @@
   static address _complete_monitor_locking_Java;
   static address _rethrow_Java;
 
+  static address _slow_arraycopy_Java;
   static address _register_finalizer_Java;
 
-#ifdef JVMPI_SUPPORT
-  // Stubs to support JVMPI
-  static address _jvmpi_method_entry_Java;
-  static address _jvmpi_method_exit_Java;
-#endif // JVMPI_SUPPORT
-
 # ifdef ENABLE_ZAP_DEAD_LOCALS
   static address _zap_dead_Java_locals_Java; 
   static address _zap_dead_native_locals_Java; 
@@ -160,13 +165,6 @@
   static ExceptionBlob*       _exception_blob;
   static void generate_exception_blob();
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI support
-  // TEMPORARY: following method was cloned from sharedRuntime.cpp
-  static void jvmpi_method_entry_C(methodOopDesc* method, oopDesc* receiver, JavaThread* thread);
-  static void jvmpi_method_exit_C(methodOopDesc* method, oopDesc* dummy, JavaThread* thread); 
-#endif // JVMPI_SUPPORT
-
   static void register_finalizer(oopDesc* obj, JavaThread* thread);
 
   // zaping dead locals, either from Java frames or from native frames
@@ -203,15 +201,10 @@
   static address vtable_must_compile_stub()              { return _vtable_must_compile_Java; }  
   static address complete_monitor_locking_Java()         { return _complete_monitor_locking_Java;   }  
 
+  static address slow_arraycopy_Java()                   { return _slow_arraycopy_Java; }
   static address register_finalizer_Java()               { return _register_finalizer_Java; }
 
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI support during execution
-  static address jvmpi_method_entry_Java()               { return _jvmpi_method_entry_Java; }
-  static address jvmpi_method_exit_Java()                { return _jvmpi_method_exit_Java; }
-#endif // JVMPI_SUPPORT
-
 # ifdef ENABLE_ZAP_DEAD_LOCALS
   static address zap_dead_locals_stub(bool is_native)    { return is_native
                                                                   ? _zap_dead_native_locals_Java
@@ -256,9 +249,11 @@
 
   static const TypeFunc* flush_windows_Type();
 
-  // leaf arraycopy routine types
-  static const TypeFunc* arraycopy_Type();
+  // arraycopy routine types
+  static const TypeFunc* fast_arraycopy_Type(); // bit-blasters
+  static const TypeFunc* checkcast_arraycopy_Type();
   static const TypeFunc* generic_arraycopy_Type();
+  static const TypeFunc* slow_arraycopy_Type();   // the full routine
 
   // leaf on stack replacement interpreter accessor types
   static const TypeFunc* osr_end_Type();
@@ -276,12 +271,6 @@
 
   static const TypeFunc* register_finalizer_Type();
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI support
-  static const TypeFunc* jvmpi_method_entry_Type();    // ENTRY
-  static const TypeFunc* jvmpi_method_exit_Type();     // ENTRY
-#endif // JVMPI_SUPPORT
-
   // Dtrace support
   static const TypeFunc* dtrace_method_entry_exit_Type();
   static const TypeFunc* dtrace_object_alloc_Type();
@@ -290,7 +279,6 @@
   static const TypeFunc* zap_dead_locals_Type();
 # endif
 
-#ifndef PRODUCT
  private:
  static NamedCounter * volatile _named_counters;
 
@@ -302,6 +290,4 @@
  // dumps all the named counters
  static void          print_named_counters();
 
-#endif /* PRODUCT */
-
 };
--- a/hotspot/src/share/vm/opto/superword.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/superword.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)superword.cpp	1.3 07/05/05 17:06:29 JVM"
+#pragma ident "@(#)superword.cpp	1.4 07/05/17 16:01:50 JVM"
 #endif
 /*
  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -45,7 +45,7 @@
   _node_info(arena(), 8,  0, SWNodeInfo::initial), // info needed per node
   _align_to_ref(NULL),                    // memory reference to align vectors to
   _disjoint_ptrs(arena(), 8,  0, OrderedPair::initial), // runtime disambiguated pointer pairs
-  dg(_arena),                             // dependence graph
+  _dg(_arena),                            // dependence graph
   _visited(arena()),                      // visited node set
   _post_visited(arena()),                 // post visited node set
   _n_idx_list(arena(), 8),                // scratch list of (node,index) pairs
@@ -74,6 +74,9 @@
   Node *pre_opaq1 = pre_end->limit();
   if (pre_opaq1->Opcode() != Op_Opaque1) return;
 
+  // Do vectors exist on this architecture?
+  if (vector_width_in_bytes() == 0) return;
+
   init(); // initialize data structures
 
   set_lpt(lpt);
@@ -234,6 +237,11 @@
   for (uint i = 0; i < memops.size(); i++) {
     MemNode* s1 = memops.at(i)->as_Mem();
     SWPointer p1(s1, this);
+    // Discard if pre loop can't align this reference
+    if (!ref_is_alignable(p1)) {
+      *cmp_ct.adr_at(i) = 0;
+      continue;
+    }
     for (uint j = i+1; j < memops.size(); j++) {
       MemNode* s2 = memops.at(j)->as_Mem();
       if (isomorphic(s1, s2)) {
@@ -299,6 +307,44 @@
 #endif
 }
 
+//------------------------------ref_is_alignable---------------------------
+// Can the preloop align the reference to position zero in the vector?
+bool SuperWord::ref_is_alignable(SWPointer& p) {
+  if (!p.has_iv()) {
+    return true;   // no induction variable
+  }
+  CountedLoopEndNode* pre_end = get_pre_loop_end(lp()->as_CountedLoop());
+  assert(pre_end->stride_is_con(), "pre loop stride is constant");
+  int preloop_stride = pre_end->stride_con();
+
+  int span = preloop_stride * p.scale_in_bytes();
+
+  // Stride one accesses are alignable.
+  if (ABS(span) == p.memory_size())
+    return true;
+
+  // If initial offset from start of object is computable,
+  // compute alignment within the vector.
+  int vw = vector_width_in_bytes();
+  if (vw % span == 0) {
+    Node* init_nd = pre_end->init_trip();
+    if (init_nd->is_Con() && p.invar() == NULL) {
+      int init = init_nd->bottom_type()->is_int()->get_con();
+
+      int init_offset = init * p.scale_in_bytes() + p.offset_in_bytes();
+      assert(init_offset >= 0, "positive offset from object start");
+
+      if (span > 0) {
+        return (vw - (init_offset % vw)) % span == 0;
+      } else {
+        assert(span < 0, "nonzero stride * scale");
+        return (init_offset % vw) % -span == 0;
+      }
+    }
+  }
+  return false;
+}
+
 //---------------------------dependence_graph---------------------------
 // Construct dependency graph.
 // Add dependence edges to load/store nodes for memory dependence
@@ -308,7 +354,7 @@
   for (int i = 0; i < _block.length(); i++ ) {
     Node *n = _block.at(i);
     if (n->is_Mem() || n->is_Phi() && n->bottom_type() == Type::MEMORY) {
-      dg.make_node(n);
+      _dg.make_node(n);
     }
   }
 
@@ -321,20 +367,20 @@
     mem_slice_preds(n_tail, n, _nlist);
 
     // Make the slice dependent on the root
-    DepMem* slice = dg.dep(n);
-    dg.make_edge(dg.root(), slice);
+    DepMem* slice = _dg.dep(n);
+    _dg.make_edge(_dg.root(), slice);
 
     // Create a sink for the slice
-    DepMem* slice_sink = dg.make_node(NULL);
-    dg.make_edge(slice_sink, dg.tail());
+    DepMem* slice_sink = _dg.make_node(NULL);
+    _dg.make_edge(slice_sink, _dg.tail());
 
     // Now visit each pair of memory ops, creating the edges
     for (int j = _nlist.length() - 1; j >= 0 ; j--) {
       Node* s1 = _nlist.at(j);
 
       // If no dependency yet, use slice
-      if (dg.dep(s1)->in_cnt() == 0) {
-        dg.make_edge(slice, s1);
+      if (_dg.dep(s1)->in_cnt() == 0) {
+        _dg.make_edge(slice, s1);
       }
       SWPointer p1(s1->as_Mem(), this);
       bool sink_dependent = true;
@@ -352,19 +398,19 @@
           _disjoint_ptrs.append_if_missing(pp);
         } else if (!SWPointer::not_equal(cmp)) {
           // Possibly same address
-          dg.make_edge(s1, s2);
+          _dg.make_edge(s1, s2);
           sink_dependent = false;
         }
       }
       if (sink_dependent) {
-        dg.make_edge(s1, slice_sink);
+        _dg.make_edge(s1, slice_sink);
       }
     }
 #ifndef PRODUCT
     if (TraceSuperWord) {
       tty->print_cr("\nDependence graph for slice: %d", n->_idx);
       for (int q = 0; q < _nlist.length(); q++) {
-        dg.print(_nlist.at(q));
+        _dg.print(_nlist.at(q));
       }
       tty->cr();
     }
@@ -405,6 +451,9 @@
           // or need to run igvn.optimize() again before SLP
         } else if (out->is_Phi() && out->bottom_type() == Type::MEMORY && !in_bb(out)) {
           // Ditto.  Not sure what else to check further.
+        } else if (out->Opcode() == Op_StoreCM && out->in(4) == n) {
+          // StoreCM has an input edge used as a precedence edge.
+          // Maybe an issue when oop stores are vectorized.
         } else {
           assert(out == prev || prev == NULL, "no branches off of store slice");
         }
@@ -498,7 +547,7 @@
   visited_set(deep);
   int shal_depth = depth(shallow);
   assert(shal_depth <= depth(deep), "must be");
-  for (DepPreds preds(deep, dg); !preds.done(); preds.next()) {
+  for (DepPreds preds(deep, _dg); !preds.done(); preds.next()) {
     Node* pred = preds.current();
     if (in_bb(pred) && !visited_test(pred)) {
       if (shallow == pred) {
@@ -1258,7 +1307,7 @@
       if (!n->is_Phi()) {
         int d_orig = depth(n);
         int d_in   = 0;
-        for (DepPreds preds(n, dg); !preds.done(); preds.next()) {
+        for (DepPreds preds(n, _dg); !preds.done(); preds.next()) {
           Node* pred = preds.current();
           if (in_bb(pred)) {
             d_in = MAX2(d_in, depth(pred));
@@ -1588,7 +1637,7 @@
 
 //------------------------------init---------------------------
 void SuperWord::init() {
-  dg.init();
+  _dg.init();
   _packset.clear();
   _disjoint_ptrs.clear();
   _block.clear();
--- a/hotspot/src/share/vm/opto/superword.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/superword.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)superword.hpp	1.4 07/05/05 17:06:29 JVM"
+#pragma ident "@(#)superword.hpp	1.5 07/05/17 16:01:57 JVM"
 #endif
 /*
  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -52,6 +52,7 @@
 // first statement is considered the left element, and the
 // second statement is considered the right element.
 
+class SWPointer;
 class OrderedPair;
 
 // ========================= Dependence Graph =====================
@@ -218,7 +219,7 @@
 
   GrowableArray<OrderedPair> _disjoint_ptrs; // runtime disambiguated pointer pairs
 
-  DepGraph dg; // Dependence graph
+  DepGraph _dg; // Dependence graph
 
   // Scratch pads
   VectorSet    _visited;       // Visited set
@@ -304,6 +305,8 @@
   void find_adjacent_refs();
   // Find a memory reference to align the loop induction variable to.
   void find_align_to_ref(Node_List &memops);
+  // Can the preloop align the reference to position zero in the vector?
+  bool ref_is_alignable(SWPointer& p);
   // Construct dependency graph.
   void dependence_graph();
   // Return a memory slice (node list) in predecessor order starting at "start"
--- a/hotspot/src/share/vm/opto/type.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/type.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)type.cpp	1.252 07/05/05 17:06:28 JVM"
+#pragma ident "@(#)type.cpp	1.253 07/05/17 16:02:23 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -231,6 +231,7 @@
   TypeInt::CHAR    = TypeInt::make(0,65535,      WidenMin); // Java chars
   TypeInt::SHORT   = TypeInt::make(-32768,32767, WidenMin); // Java shorts
   TypeInt::POS     = TypeInt::make(0,max_jint,   WidenMin); // Non-neg values
+  TypeInt::POS1    = TypeInt::make(1,max_jint,   WidenMin); // Positive values
   TypeInt::INT     = TypeInt::make(min_jint,max_jint, WidenMax); // 32-bit integers
   TypeInt::SYMINT  = TypeInt::make(-max_jint,max_jint,WidenMin); // symmetric range
   // CmpL is overloaded both as the bytecode computation returning
@@ -490,17 +491,17 @@
   const Type *tdual = t->_dual;
   const Type *thisdual = _dual;
   // strip out instances
-  if (t2t->isa_instptr() != NULL) {
-    t2t = t2t->isa_instptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
+  if (t2t->isa_oopptr() != NULL) {
+    t2t = t2t->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
   }
-  if (t2this->isa_instptr() != NULL) {
-    t2this = t2this->isa_instptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
+  if (t2this->isa_oopptr() != NULL) {
+    t2this = t2this->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
   }
-  if (tdual->isa_instptr() != NULL) {
-    tdual = tdual->isa_instptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
+  if (tdual->isa_oopptr() != NULL) {
+    tdual = tdual->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
   }
-  if (thisdual->isa_instptr() != NULL) {
-    thisdual = thisdual->isa_instptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
+  if (thisdual->isa_oopptr() != NULL) {
+    thisdual = thisdual->isa_oopptr()->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE);
   }
 
   if( !interface_vs_oop && (t2t != tdual || t2this != thisdual) ) {
@@ -1003,7 +1004,8 @@
 const TypeInt *TypeInt::BYTE;   // Bytes, -128 to 127
 const TypeInt *TypeInt::CHAR;   // Java chars, 0-65535
 const TypeInt *TypeInt::SHORT;  // Java shorts, -32768-32767
-const TypeInt *TypeInt::POS;    // Positive 32-bit integers
+const TypeInt *TypeInt::POS;    // Positive 32-bit integers or zero
+const TypeInt *TypeInt::POS1;   // Positive 32-bit integers
 const TypeInt *TypeInt::INT;    // 32-bit integers
 const TypeInt *TypeInt::SYMINT; // symmetric range [-max_jint..max_jint]
 
@@ -2697,13 +2699,13 @@
     const TypeInstPtr *tinst = t->is_instptr();
     int off = meet_offset( tinst->offset() );
     PTR ptr = meet_ptr( tinst->ptr() );
+    int instance_id = meet_instance(tinst->instance_id());
 
     // Check for easy case; klasses are equal (and perhaps not loaded!)
     // If we have constants, then we created oops so classes are loaded
     // and we can handle the constants further down.  This case handles
     // both-not-loaded or both-loaded classes
     if (ptr != Constant && klass()->equals(tinst->klass()) && klass_is_exact() == tinst->klass_is_exact()) {
-      int instance_id = meet_instance(tinst->instance_id());
       return make( ptr, klass(), klass_is_exact(), NULL, off, instance_id );
     }
       
@@ -2843,7 +2845,7 @@
         else
           ptr = NotNull;
       }
-      return make( ptr, this_klass, this_xk, o, off );
+      return make( ptr, this_klass, this_xk, o, off, instance_id );
     } // Else classes are not equal
                
     // Since klasses are different, we require a LCA in the Java
--- a/hotspot/src/share/vm/opto/type.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/type.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)type.hpp	1.155 07/05/05 17:06:26 JVM"
+#pragma ident "@(#)type.hpp	1.156 07/05/17 16:02:31 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -403,6 +403,7 @@
   static const TypeInt *CHAR;
   static const TypeInt *SHORT;
   static const TypeInt *POS;
+  static const TypeInt *POS1;
   static const TypeInt *INT;
   static const TypeInt *SYMINT; // symmetric range [-max_jint..max_jint]
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/opto/vectornode.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/vectornode.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)vectornode.cpp	1.3 07/05/05 17:06:29 JVM"
+#pragma ident "@(#)vectornode.cpp	1.4 07/05/17 16:02:33 JVM"
 #endif
 /*
  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -30,7 +30,8 @@
 //------------------------------VectorNode--------------------------------------
 
 // Return vector type for an element type and vector length.
-const Type* VectorNode::get_vect_type(BasicType elt_bt, int len) {
+const Type* VectorNode::vect_type(BasicType elt_bt, uint len) {
+  assert(len <= VectorNode::max_vlen(elt_bt), "len in range");
   switch(elt_bt) {
   case T_BOOLEAN:
   case T_BYTE:
@@ -69,6 +70,7 @@
 // Scalar promotion
 VectorNode* VectorNode::scalar2vector(Compile* C, Node* s, uint vlen, const Type* opd_t) {
   BasicType bt = opd_t->array_element_basic_type();
+  assert(vlen <= VectorNode::max_vlen(bt), "vlen in range");
   switch (bt) {
   case T_BOOLEAN:
   case T_BYTE:
--- a/hotspot/src/share/vm/opto/vectornode.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/opto/vectornode.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)vectornode.hpp	1.4 07/05/05 17:06:29 JVM"
+#pragma ident "@(#)vectornode.hpp	1.5 07/05/17 16:02:36 JVM"
 #endif
 /*
  * Copyright 2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -31,13 +31,13 @@
   uint _length; // vector length
   virtual BasicType elt_basic_type() const = 0; // Vector element basic type
 
-  static const Type* get_vect_type(BasicType elt_bt, int len);
-  static const Type* get_vect_type(const Type* elt_type, int len) {
-    return get_vect_type(elt_type->array_element_basic_type(), len);
+  static const Type* vect_type(BasicType elt_bt, uint len);
+  static const Type* vect_type(const Type* elt_type, uint len) {
+    return vect_type(elt_type->array_element_basic_type(), len);
   }
 
  public:
-  friend class VectorLoadNode;  // For get_vect_type
+  friend class VectorLoadNode;  // For vect_type
   friend class VectorStoreNode; // ditto.
 
   VectorNode(Node* n1, uint vlen) : Node(NULL, n1), _length(vlen) {
@@ -56,7 +56,7 @@
 
   // Element and vector type
   const Type* elt_type()  const { return Type::get_const_basic_type(elt_basic_type()); }
-  const Type* vect_type() const { return get_vect_type(elt_basic_type(), length()); }
+  const Type* vect_type() const { return vect_type(elt_basic_type(), length()); }
 
   virtual const Type *bottom_type() const { return vect_type(); }
   virtual uint        ideal_reg()   const { return Matcher::vector_ideal_reg(); }
@@ -375,6 +375,10 @@
 
  protected:
   virtual BasicType elt_basic_type()  const = 0; // Vector element basic type
+  // For use in constructor
+  static const Type* vect_type(const Type* elt_type, uint len) {
+    return VectorNode::vect_type(elt_type, len);
+  }
 
  public:
   VectorLoadNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const Type *rt)
@@ -387,10 +391,7 @@
 
   // Element and vector type
   const Type* elt_type()  const { return Type::get_const_basic_type(elt_basic_type()); }
-  const Type* vect_type() const { return VectorNode::get_vect_type(elt_basic_type(), length()); }
-  const Type* vect_type(const Type* elt_type) const {
-    return VectorNode::get_vect_type(elt_type, length());
-  }
+  const Type* vect_type() const { return VectorNode::vect_type(elt_basic_type(), length()); }
 
   virtual uint ideal_reg() const  { return Matcher::vector_ideal_reg(); }
   virtual BasicType memory_type() const { return T_VOID; }
@@ -410,7 +411,7 @@
   virtual BasicType elt_basic_type() const { return T_BYTE; }
  public:
   Load16BNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,16)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store16B; }
   virtual uint length() const { return 16; }
@@ -423,7 +424,7 @@
   virtual BasicType elt_basic_type() const { return T_BYTE; }
  public:
   Load8BNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,8)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store8B; }
   virtual uint length() const { return 8; }
@@ -436,7 +437,7 @@
   virtual BasicType elt_basic_type() const { return T_BYTE; }
  public:
   Load4BNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,4)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store4B; }
   virtual uint length() const { return 4; }
@@ -449,7 +450,7 @@
   virtual BasicType elt_basic_type() const { return T_CHAR; }
  public:
   Load8CNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,8)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store8C; }
   virtual uint length() const { return 8; }
@@ -462,7 +463,7 @@
   virtual BasicType elt_basic_type() const { return T_CHAR; }
  public:
   Load4CNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,4)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store4C; }
   virtual uint length() const { return 4; }
@@ -475,7 +476,7 @@
   virtual BasicType elt_basic_type() const { return T_CHAR; }
  public:
   Load2CNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,2)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store2C; }
   virtual uint length() const { return 2; }
@@ -488,7 +489,7 @@
   virtual BasicType elt_basic_type() const { return T_SHORT; }
  public:
   Load8SNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,8)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store8C; }
   virtual uint length() const { return 8; }
@@ -501,7 +502,7 @@
   virtual BasicType elt_basic_type() const { return T_SHORT; }
  public:
   Load4SNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,4)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store4C; }
   virtual uint length() const { return 4; }
@@ -514,7 +515,7 @@
   virtual BasicType elt_basic_type() const { return T_SHORT; }
  public:
   Load2SNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,2)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store2C; }
   virtual uint length() const { return 2; }
@@ -527,7 +528,7 @@
   virtual BasicType elt_basic_type() const { return T_INT; }
  public:
   Load4INode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,4)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store4I; }
   virtual uint length() const { return 4; }
@@ -540,7 +541,7 @@
   virtual BasicType elt_basic_type() const { return T_INT; }
  public:
   Load2INode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT)
-    : VectorLoadNode(c,mem,adr,at,vect_type(ti)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(ti,2)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store2I; }
   virtual uint length() const { return 2; }
@@ -553,7 +554,7 @@
   virtual BasicType elt_basic_type() const { return T_LONG; }
  public:
   Load2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong *tl = TypeLong::LONG)
-    : VectorLoadNode(c,mem,adr,at,vect_type(tl)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(tl,2)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store2L; }
   virtual uint length() const { return 2; }
@@ -566,7 +567,7 @@
   virtual BasicType elt_basic_type() const { return T_FLOAT; }
  public:
   Load4FNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const Type *t = Type::FLOAT)
-    : VectorLoadNode(c,mem,adr,at,vect_type(t)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(t,4)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store4F; }
   virtual uint length() const { return 4; }
@@ -579,7 +580,7 @@
   virtual BasicType elt_basic_type() const { return T_FLOAT; }
  public:
   Load2FNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const Type *t = Type::FLOAT)
-    : VectorLoadNode(c,mem,adr,at,vect_type(t)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(t,2)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store2F; }
   virtual uint length() const { return 2; }
@@ -592,7 +593,7 @@
   virtual BasicType elt_basic_type() const { return T_DOUBLE; }
  public:
   Load2DNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const Type *t = Type::DOUBLE)
-    : VectorLoadNode(c,mem,adr,at,vect_type(t)) {}
+    : VectorLoadNode(c,mem,adr,at,vect_type(t,2)) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_Store2D; }
   virtual uint length() const { return 2; }
@@ -618,7 +619,7 @@
 
   // Element and vector type
   const Type* elt_type()  const { return Type::get_const_basic_type(elt_basic_type()); }
-  const Type* vect_type() const { return VectorNode::get_vect_type(elt_basic_type(), length()); }
+  const Type* vect_type() const { return VectorNode::vect_type(elt_basic_type(), length()); }
 
   virtual uint ideal_reg() const  { return Matcher::vector_ideal_reg(); }
   virtual BasicType memory_type() const { return T_VOID; }
--- a/hotspot/src/share/vm/prims/forte.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/forte.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)forte.cpp	1.68 07/05/05 17:06:31 JVM"
+#pragma ident "@(#)forte.cpp	1.69 07/05/17 16:02:39 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -404,11 +404,7 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-// is_valid_method() exists in fprofiler.cpp, jvmpi.cpp and now here.
-#else // !JVMPI_SUPPORT
 // is_valid_method() exists in fprofiler.cpp and now here.
-#endif // JVMPI_SUPPORT
 // We need one central version of this routine.
 
 bool forte_is_valid_method(methodOop method) {
@@ -523,10 +519,6 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-// Forte specific version of jvmpi.cpp: is_walkable_frame()
-//
-#endif // JVMPI_SUPPORT
 // Determine if 'fr' can be used to find a walkable frame. Returns
 // false if a walkable frame cannot be found. *walkframe_p, *method_p,
 // and *bci_p are not set when false is returned. Returns true if a
@@ -658,30 +650,21 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-//
-// Forte specific version of jvmpi.cpp:fill_call_trace_given_top()
-//
-#else // !JVMPI_SUPPORT
-/* call frame copied from old .h file and renamed */
+// call frame copied from old .h file and renamed
 typedef struct {
-    jint lineno;                      /* line number in the source file */
-    jmethodID method_id;              /* method executed in this frame */
+    jint lineno;                      // line number in the source file
+    jmethodID method_id;              // method executed in this frame
 } ASGCT_CallFrame;
 
-/* call trace copied from old .h file and renamed */
+// call trace copied from old .h file and renamed
 typedef struct {
-    JNIEnv *env_id;                   /* Env where trace was recorded */
-    jint num_frames;                  /* number of frames in this trace */
-    ASGCT_CallFrame *frames;          /* frames */
+    JNIEnv *env_id;                   // Env where trace was recorded
+    jint num_frames;                  // number of frames in this trace
+    ASGCT_CallFrame *frames;          // frames
 } ASGCT_CallTrace;
-#endif // JVMPI_SUPPORT
+
 static void forte_fill_call_trace_given_top(JavaThread* thd,
-#ifdef JVMPI_SUPPORT
-  JVMPI_CallTrace* trace, int depth, frame top_frame) {
-#else // !JVMPI_SUPPORT
   ASGCT_CallTrace* trace, int depth, frame top_frame) {
-#endif // JVMPI_SUPPORT
   NoHandleMark nhm;
 
   frame walkframe;
@@ -768,29 +751,17 @@
 // when a LWP gets interrupted by SIGPROF but the stack traces are filled
 // with different content (see below).
 // 
-#ifdef JVMPI_SUPPORT
-// This function must only be called when either JVM/PI or JVM/TI
-#else // !JVMPI_SUPPORT
 // This function must only be called when JVM/TI
-#endif // JVMPI_SUPPORT
 // CLASS_LOAD events have been enabled since agent startup. The enabled
 // event will cause the jmethodIDs to be allocated at class load time.
 // The jmethodIDs cannot be allocated in a signal handler because locks
 // cannot be grabbed in a signal handler safely.
 //
-#ifdef JVMPI_SUPPORT
-// void (*AsyncGetCallTrace)(JVMPI_CallTrace *trace, jint depth, void* ucontext)
-#else // !JVMPI_SUPPORT
 // void (*AsyncGetCallTrace)(ASGCT_CallTrace *trace, jint depth, void* ucontext)
-#endif // JVMPI_SUPPORT
 //
 // Called by the profiler to obtain the current method call stack trace for 
 // a given thread. The thread is identified by the env_id field in the 
-#ifdef JVMPI_SUPPORT
-// JVMPI_CallTrace structure. The profiler agent should allocate a JVMPI_CallTrace 
-#else // !JVMPI_SUPPORT
 // ASGCT_CallTrace structure. The profiler agent should allocate a ASGCT_CallTrace 
-#endif // JVMPI_SUPPORT
 // structure with enough memory for the requested stack depth. The VM fills in 
 // the frames buffer and the num_frames field. 
 //
@@ -800,43 +771,24 @@
 //   depth    - depth of the call stack trace. 
 //   ucontext - ucontext_t of the LWP
 //
-#ifdef JVMPI_SUPPORT
-// JVMPI_CallTrace:
-#else // !JVMPI_SUPPORT
 // ASGCT_CallTrace:
-#endif // JVMPI_SUPPORT
 //   typedef struct {
 //       JNIEnv *env_id;
 //       jint num_frames;
-#ifdef JVMPI_SUPPORT
-//       JVMPI_CallFrame *frames;
-//   } JVMPI_CallTrace;
-#else // !JVMPI_SUPPORT
 //       ASGCT_CallFrame *frames;
 //   } ASGCT_CallTrace;
-#endif // JVMPI_SUPPORT
 //
 // Fields:
 //   env_id     - ID of thread which executed this trace. 
 //   num_frames - number of frames in the trace. 
 //                (< 0 indicates the frame is not walkable).
-#ifdef JVMPI_SUPPORT
-//   frames     - the JVMPI_CallFrames that make up this trace. Callee followed by callers.
-//
-//  JVMPI_CallFrame:
-#else // !JVMPI_SUPPORT
 //   frames     - the ASGCT_CallFrames that make up this trace. Callee followed by callers.
 //
 //  ASGCT_CallFrame:
-#endif // JVMPI_SUPPORT
 //    typedef struct {
 //        jint lineno;                     
 //        jmethodID method_id;              
-#ifdef JVMPI_SUPPORT
-//    } JVMPI_CallFrame;
-#else // !JVMPI_SUPPORT
 //    } ASGCT_CallFrame;
-#endif // JVMPI_SUPPORT
 //
 //  Fields: 
 //    1) For Java frame (interpreted and compiled),
@@ -847,11 +799,7 @@
 //       method_id - jmethodID of the method being executed
 
 extern "C" {
-#ifdef JVMPI_SUPPORT
-void AsyncGetCallTrace(JVMPI_CallTrace *trace, jint depth, void* ucontext) {
-#else // !JVMPI_SUPPORT
 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) {
-#endif // JVMPI_SUPPORT
   if (SafepointSynchronize::is_synchronizing()) {
     // The safepoint mechanism is trying to synchronize all the threads.
     // Since this can involve thread suspension, it is not safe for us
@@ -885,13 +833,7 @@
   assert(JavaThread::current() == thread, 
          "AsyncGetCallTrace must be called by the current interrupted thread");
 
-#ifdef JVMPI_SUPPORT
-  if (!JvmtiExport::should_post_class_load() &&
-      // check JVM/PI after JVM/TI since JVM/TI is now preferred
-      !jvmpi::is_event_enabled(JVMPI_EVENT_CLASS_LOAD)) {
-#else // !JVMPI_SUPPORT
   if (!JvmtiExport::should_post_class_load()) {
-#endif // JVMPI_SUPPORT
     trace->num_frames = -1;
     return;
   }
--- a/hotspot/src/share/vm/prims/jni.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/jni.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)jni.cpp	1.432 07/05/05 17:06:30 JVM"
+#pragma ident "@(#)jni.cpp	1.433 07/05/17 16:02:44 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -686,10 +686,6 @@
 JNI_END
 
 // Must be JNI_ENTRY (with HandleMark)
-#ifdef JVMPI_SUPPORT
-// When jvmpi is enabled, the agent might allocate handles on receiving
-// JNI_GLOBALREF_FREE event.
-#endif // JVMPI_SUPPORT
 JNI_ENTRY_NO_PRESERVE(void, jni_DeleteGlobalRef(JNIEnv *env, jobject ref))
   JNIWrapper("DeleteGlobalRef");
   DTRACE_PROBE2(hotspot_jni, DeleteGlobalRef__entry, env, ref);
@@ -2613,10 +2609,6 @@
 JNI_END
 
 // Must be JNI_ENTRY (with HandleMark)
-#ifdef JVMPI_SUPPORT
-// When jvmpi is enabled, the agent might allocate handles on receiving
-// JNI_WEAK_GLOBALREF_FREE event.
-#endif // JVMPI_SUPPORT
 JNI_ENTRY(void, jni_DeleteWeakGlobalRef(JNIEnv *env, jweak ref))
   JNIWrapper("jni_DeleteWeakGlobalRef");
   DTRACE_PROBE2(hotspot_jni, DeleteWeakGlobalRef__entry, env, ref);
@@ -3430,12 +3422,6 @@
   if (JvmtiExport::should_post_thread_life()) {
     JvmtiExport::post_thread_start(thread);
   }
-#ifdef JVMPI_SUPPORT
-  // Notify jvmpi
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_THREAD_START)) {
-    jvmpi::post_thread_start_event(thread);
-  }
-#endif // JVMPI_SUPPORT
 
   *(JNIEnv**)penv = thread->jni_environment();
 
@@ -3537,28 +3523,11 @@
 
     } else if (version == JVMPI_VERSION_1 ||
                version == JVMPI_VERSION_1_1 ||
-#ifdef JVMPI_SUPPORT
-               (version == JVMPI_VERSION_1_2 && UseSuspendResumeThreadLists)) {
-#else // !JVMPI_SUPPORT
                version == JVMPI_VERSION_1_2) {
-#endif // JVMPI_SUPPORT
-#ifdef JVMPI_SUPPORT
-      if (UseUnsupportedDeprecatedJVMPI) {
-        *penv = (void* )jvmpi::GetInterface_1(version); // version 1.X support
-        ret = JNI_OK;
-        return ret;
-      } else {
-#endif // JVMPI_SUPPORT
-        tty->print_cr("ERROR: JVMPI, an experimental interface, is no longer supported.");
-        tty->print_cr("Please use the supported interface: the JVM Tool Interface (JVM TI).");
-#ifdef JVMPI_SUPPORT
-        tty->print_cr("For information on temporary workarounds contact: jvmpi_eol@sun.com");
-#endif // JVMPI_SUPPORT
-        ret = JNI_EVERSION;
-        return ret;
-#ifdef JVMPI_SUPPORT
-      }
-#endif // JVMPI_SUPPORT
+      tty->print_cr("ERROR: JVMPI, an experimental interface, is no longer supported.");
+      tty->print_cr("Please use the supported interface: the JVM Tool Interface (JVM TI).");
+      ret = JNI_EVERSION;
+      return ret;
     } else if (JvmtiExport::is_jvmdi_version(version)) {
       tty->print_cr("FATAL ERROR: JVMDI is no longer supported.");
       tty->print_cr("Please use the supported interface: the JVM Tool Interface (JVM TI).");
--- a/hotspot/src/share/vm/prims/jvm.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)jvm.cpp	1.565 07/05/05 17:06:31 JVM"
+#pragma ident "@(#)jvm.cpp	1.566 07/05/17 16:02:51 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -2820,9 +2820,6 @@
   if (JvmtiExport::should_post_data_dump()) {
     JvmtiExport::post_data_dump();
   }
-#ifdef JVMPI_SUPPORT
-  jvmpi::post_dump_event();
-#endif // JVMPI_SUPPORT
 JVM_END
 
 
--- a/hotspot/src/share/vm/prims/jvmpi.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3857 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)jvmpi.cpp	1.180 07/05/05 17:06:35 JVM"
-#endif
-/*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_jvmpi.cpp.incl"
-
-#ifdef JVMPI_SUPPORT
-//-------------------------------------------------------
-
-// Unsolved problems:
-//
-// CPU profiling
-// - we need an exact mapping of pc/bci to lineNo; this is not
-//   always possible as:
-//    - interpreted has not set up the frame completely
-//    - compiled code is not at a safepoint
-
-//-------------------------------------------------------
-
-
-// define raw monitor validity checking
-static int PROF_RM_MAGIC = (int)(('P' << 24) | ('I' << 16) | ('R' << 8) | 'M');
-#define PROF_RM_CHECK(m)                                 \
-    ((m) != NULL && (m)->magic() == PROF_RM_MAGIC) 
-
-
-unsigned int jvmpi::_event_flags = JVMPI_PROFILING_OFF;
-unsigned int jvmpi::_event_flags_array[JVMPI_MAX_EVENT_TYPE_VAL + 1];
-JVMPI_Interface jvmpi::jvmpi_interface;
-bool jvmpi::slow_allocation = false;
-
-class VM_JVMPIPostObjAlloc: public VM_Operation {
- private:
-  static volatile bool _restrict_event_posting;
-  static Thread *      _create_thread;
-
- public:
-  VM_JVMPIPostObjAlloc() {
-    // local fields are initialized when declared
-  }
-
-  ~VM_JVMPIPostObjAlloc() {
-    clear_restriction();
-  }
-
-  static void clear_restriction();
-  static const Thread *create_thread() {
-    return _create_thread;
-  }
-  void doit();
-  const char* name() const {
-    return "post JVMPI object allocation";
-  }
-  static void set_create_thread(Thread *thr) {
-    _create_thread = thr;
-  }
-  static void wait_if_restricted();
-};
-
-
-// JVMPI_EVENT_THREAD_START events can be posted by the thread itself
-// or by the VM initialization thread which can result in duplicate
-// THREAD_START events. Duplicate events are not acceptable unless the
-// event is specifically requested. We track all threads that have
-// posted THREAD_START events until after any needed synthetic events
-// from jvmpi::post_vm_initialization_events() are posted.
-//
-// The global thread_start_posted_list is protected by the Threads_lock.
-// Since both real and synthetic THREAD_START event posters use
-// jvmpi::post_thread_start_event() we will have no dups.
-//
-// We could have used a flag in the JavaThread, but why take up space
-// to solve a race that only happens during VM initialization? We also
-// want to isolate this bit of JVM/PI strangeness here.
-
-static GrowableArray<JavaThread *>* thread_start_posted_list = NULL;
-static volatile bool track_thread_start_events = true;
-
-// Returns true if a THREAD_START event has already been posted
-// for this thread.
-static bool check_for_and_record_thread_start_event(JavaThread* thread) {
-  assert(Threads_lock->owned_by_self(), "sanity check");
-
-  if (track_thread_start_events) {
-    // we are still in the race condition region
-
-    if (thread_start_posted_list == NULL) {
-      // first thread to post THREAD_START event so setup initial space
-      thread_start_posted_list =
-        new (ResourceObj::C_HEAP) GrowableArray<JavaThread*>(3, true);
-    }
-  
-    if (thread_start_posted_list->contains(thread)) {
-      // real or synthetic event already posted; don't post another
-      return true;
-    }
-  
-    // record this thread and let real or synthetic event be posted
-    thread_start_posted_list->append(thread);
-    return false;
-  }
-
-  // We are past the point where synthetic events can be posted. No
-  // need to record threads anymore, but we have to check against the
-  // history to see if an event was posted before.
-
-  if (thread_start_posted_list == NULL) {
-    // tracking list was not setup so let the thread post its own event
-    return false;
-  }
-
-  if (thread_start_posted_list->contains(thread)) {
-    // synthetic event already posted; don't post another
-    return true;
-  }
-
-  // let thread post its own event
-  return false;
-} // end check_for_and_record_thread_start_event()
-
-
-static void stop_tracking_thread_start_events() {
-  track_thread_start_events = false;
-}
-
-
-void jvmpi::initialize(int version) {
-  // Exit with an error if we are using a jvmpi-incompatible garbage collector,
-  // unless explicitly overridden via JVMPICheckGCCompatibility (needed for using
-  // Analyzer with these non-jvmpi collectors; see bug 4889433).
-  if (JVMPICheckGCCompatibility && 
-      (UseConcMarkSweepGC || UseParNewGC || UseParallelGC)) {
-    vm_exit_during_initialization(
-      "JVMPI not supported with this garbage collector; "
-      "please refer to the GC/JVMPI documentation");
-  }
-
-  // The creating thread requests the VM_JVMPIPostObjAlloc VM operation
-  // so it the restriction should not apply to its events (if any).
-  VM_JVMPIPostObjAlloc::set_create_thread(ThreadLocalStorage::thread());
-
-  // Enable JVMPI
-  _event_flags |= JVMPI_PROFILING_ON;
-
-  // First, initialize all JVMPI defined event notifications 
-  // to be not available
-  for (int i= 0; i <= JVMPI_MAX_EVENT_TYPE_VAL; i++) {
-    _event_flags_array[i] = JVMPI_EVENT_NOT_SUPPORTED;
-  }
-
-  // Then, initialize events supported by the HotSpot VM
-  // to be initially disabled.
-  disable_event(JVMPI_EVENT_CLASS_LOAD);
-  disable_event(JVMPI_EVENT_CLASS_UNLOAD);
-  disable_event(JVMPI_EVENT_CLASS_LOAD_HOOK);
-  disable_event(JVMPI_EVENT_OBJECT_ALLOC);
-  disable_event(JVMPI_EVENT_OBJECT_FREE);
-  // JVMPI_VERSION_1_1 is upward compatible from JVMPI_VERSION_1 so enable
-  // the INSTRUCTION_START event
-  disable_event(JVMPI_EVENT_INSTRUCTION_START);
-  disable_event(JVMPI_EVENT_THREAD_START);
-  disable_event(JVMPI_EVENT_THREAD_END);
-  disable_event(JVMPI_EVENT_JNI_GLOBALREF_ALLOC);
-  disable_event(JVMPI_EVENT_JNI_GLOBALREF_FREE);
-  disable_event(JVMPI_EVENT_JNI_WEAK_GLOBALREF_ALLOC);
-  disable_event(JVMPI_EVENT_JNI_WEAK_GLOBALREF_FREE);
-  disable_event(JVMPI_EVENT_METHOD_ENTRY);
-  disable_event(JVMPI_EVENT_METHOD_ENTRY2);
-  disable_event(JVMPI_EVENT_METHOD_EXIT);
-  disable_event(JVMPI_EVENT_LOAD_COMPILED_METHOD);
-  disable_event(JVMPI_EVENT_UNLOAD_COMPILED_METHOD);
-  disable_event(JVMPI_EVENT_JVM_INIT_DONE);
-  disable_event(JVMPI_EVENT_JVM_SHUT_DOWN);
-  disable_event(JVMPI_EVENT_DUMP_DATA_REQUEST);
-  disable_event(JVMPI_EVENT_RESET_DATA_REQUEST);
-  disable_event(JVMPI_EVENT_OBJECT_MOVE);
-  disable_event(JVMPI_EVENT_ARENA_NEW);
-  disable_event(JVMPI_EVENT_DELETE_ARENA);
-  disable_event(JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTER);
-  disable_event(JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTERED);
-  disable_event(JVMPI_EVENT_RAW_MONITOR_CONTENDED_EXIT);
-  disable_event(JVMPI_EVENT_MONITOR_CONTENDED_ENTER);
-  disable_event(JVMPI_EVENT_MONITOR_CONTENDED_ENTERED);
-  disable_event(JVMPI_EVENT_MONITOR_CONTENDED_EXIT);
-  disable_event(JVMPI_EVENT_MONITOR_WAIT);
-  disable_event(JVMPI_EVENT_MONITOR_WAITED);
-  disable_event(JVMPI_EVENT_GC_START);
-  disable_event(JVMPI_EVENT_GC_FINISH);
-
-  // return highest upward compatible version number
-  if (UseSuspendResumeThreadLists) {
-    jvmpi_interface.version               = JVMPI_VERSION_1_2;
-  } else {
-    jvmpi_interface.version               = JVMPI_VERSION_1_1;
-  }
-  
-  // initialize the jvmpi_interface functions
-  jvmpi_interface.EnableEvent             = &enable_event;
-  jvmpi_interface.DisableEvent            = &disable_event;
-  
-  jvmpi_interface.RequestEvent            = &request_event;
-  jvmpi_interface.GetCallTrace            = &get_call_trace;
-  jvmpi_interface.GetCurrentThreadCpuTime = &get_current_thread_cpu_time;
-  jvmpi_interface.ProfilerExit            = &profiler_exit;
-  jvmpi_interface.RawMonitorCreate        = &raw_monitor_create;
-  jvmpi_interface.RawMonitorEnter         = &raw_monitor_enter;
-  jvmpi_interface.RawMonitorExit          = &raw_monitor_exit;
-  jvmpi_interface.RawMonitorWait          = &raw_monitor_wait;
-  jvmpi_interface.RawMonitorNotifyAll     = &raw_monitor_notify_all;
-  jvmpi_interface.RawMonitorDestroy       = &raw_monitor_destroy;
-  jvmpi_interface.SuspendThread           = &suspend_thread;
-  jvmpi_interface.ResumeThread            = &resume_thread;
-  jvmpi_interface.GetThreadStatus         = &get_thread_status;
-  jvmpi_interface.ThreadHasRun            = &thread_has_run;
-  jvmpi_interface.CreateSystemThread      = &create_system_thread;
-  jvmpi_interface.SetThreadLocalStorage   = &set_thread_local_storage;
-  jvmpi_interface.GetThreadLocalStorage   = &get_thread_local_storage;
-  
-  jvmpi_interface.DisableGC               = &disable_gc;
-  jvmpi_interface.EnableGC                = &enable_gc;
-  
-  jvmpi_interface.RunGC                   = &run_gc;
-  jvmpi_interface.GetThreadObject         = &get_thread_object;
-  jvmpi_interface.GetMethodClass          = &get_method_class;
-
-  // JVMPI_VERSION_1_1 is upward compatible from JVMPI_VERSION_1 so set
-  // up function pointers for jobjectID2jobject and jobject2jobjectID
-  jvmpi_interface.jobjectID2jobject     = &jobjectID_2_jobject;
-  jvmpi_interface.jobject2jobjectID     = &jobject_2_jobjectID;
-
-  // JVMPI_VERSION_1_2 is upward compatible from previous versions, but
-  // it can be turned disabled via the UseSuspendResumeThreadLists option.
-  if (UseSuspendResumeThreadLists) {
-    jvmpi_interface.SuspendThreadList     = &suspend_thread_list;
-    jvmpi_interface.ResumeThreadList      = &resume_thread_list;
-  } else {
-    jvmpi_interface.SuspendThreadList     = NULL;
-    jvmpi_interface.ResumeThreadList      = NULL;
-  }
-}
-
-
-JVMPI_Interface* jvmpi::GetInterface_1(int version) {
-  initialize(version);
-  return &jvmpi_interface;
-}
-
-static void _pass()     { return; }
-static void _block()    { 
-  while (true) {
-    VM_Exit::block_if_vm_exited();
-
-    // VM has not yet reached final safepoint, but it will get there very soon
-    Thread *thr = ThreadLocalStorage::get_thread_slow();
-    if (thr) os::yield_all(100);     // yield_all() needs a thread on Solaris
-  }
-}
-
-// disable JVMPI - this is called during VM shutdown, after the
-// JVM_SHUT_DOWN event.
-void jvmpi::disengage() {
-  _event_flags = JVMPI_PROFILING_OFF;
-  
-  address block_func = CAST_FROM_FN_PTR(address, _block);
-  address pass_func  = CAST_FROM_FN_PTR(address, _pass);
-
-  // replace most JVMPI interface functions with infinite loops
-  jvmpi_interface.EnableEvent =
-      CAST_TO_FN_PTR(jint(*)(jint, void*), block_func);
-  jvmpi_interface.DisableEvent =
-      CAST_TO_FN_PTR(jint(*)(jint, void*), block_func);
-  jvmpi_interface.RequestEvent =
-      CAST_TO_FN_PTR(jint(*)(jint, void*), block_func);
-  jvmpi_interface.GetCallTrace =
-      CAST_TO_FN_PTR(void(*)(JVMPI_CallTrace*, jint), block_func);
-  jvmpi_interface.GetCurrentThreadCpuTime =
-      CAST_TO_FN_PTR(jlong(*)(void), block_func);
-  // allow ProfilerExit to go through
-  // jvmpi_interface.ProfilerExit = CAST_TO_FN_PTR(void(*)(jint), block_func);
-  jvmpi_interface.RawMonitorCreate =
-      CAST_TO_FN_PTR(JVMPI_RawMonitor(*)(char*), block_func);
-  jvmpi_interface.RawMonitorEnter =
-      CAST_TO_FN_PTR(void(*)(JVMPI_RawMonitor), block_func);
-  jvmpi_interface.RawMonitorExit =
-      CAST_TO_FN_PTR(void(*)(JVMPI_RawMonitor), block_func);
-  jvmpi_interface.RawMonitorWait =
-      CAST_TO_FN_PTR(void(*)(JVMPI_RawMonitor, jlong), block_func);
-  jvmpi_interface.RawMonitorNotifyAll =
-      CAST_TO_FN_PTR(void(*)(JVMPI_RawMonitor), block_func);
-  jvmpi_interface.RawMonitorDestroy =
-      CAST_TO_FN_PTR(void(*)(JVMPI_RawMonitor), block_func);
-  jvmpi_interface.SuspendThread =
-      CAST_TO_FN_PTR(void(*)(JNIEnv*), block_func);
-  jvmpi_interface.ResumeThread =
-      CAST_TO_FN_PTR(void(*)(JNIEnv*), block_func);
-  jvmpi_interface.GetThreadStatus = 
-      CAST_TO_FN_PTR(jint(*)(JNIEnv*), block_func);
-  jvmpi_interface.ThreadHasRun =
-      CAST_TO_FN_PTR(jboolean(*)(JNIEnv*), block_func);
-  jvmpi_interface.CreateSystemThread =
-      CAST_TO_FN_PTR(jint(*)(char*, jint, jvmpi_void_function_of_void), block_func);
-  // Allow GetThreadLocalStorage() and SetThreadLocalStorage() to
-  // go through since they simply manage thread local storage.
-  // jvmpi_interface.SetThreadLocalStorage =
-  //     CAST_TO_FN_PTR(void(*)(JNIEnv*, void*), block_func);
-  // jvmpi_interface.GetThreadLocalStorage = 
-  //     CAST_TO_FN_PTR(void*(*)(JNIEnv*), block_func);
-  jvmpi_interface.DisableGC =
-      CAST_TO_FN_PTR(void(*)(void), block_func);
-  jvmpi_interface.EnableGC = 
-      CAST_TO_FN_PTR(void(*)(void), block_func);
-  jvmpi_interface.RunGC =
-      CAST_TO_FN_PTR(void(*)(void), block_func);
-  jvmpi_interface.GetThreadObject =
-      CAST_TO_FN_PTR(jobjectID(*)(JNIEnv*), block_func);
-  jvmpi_interface.GetMethodClass =
-      CAST_TO_FN_PTR(jobjectID(*)(jmethodID), block_func);
-  jvmpi_interface.jobjectID2jobject = 
-      CAST_TO_FN_PTR(jobject(*)(jobjectID), block_func);
-  jvmpi_interface.jobject2jobjectID =
-      CAST_TO_FN_PTR(jobjectID(*)(jobject), block_func);
-
-  // NotifyEvent() is called from VM, do not block
-  jvmpi_interface.NotifyEvent =
-      CAST_TO_FN_PTR(void(*)(JVMPI_Event*), pass_func);
-}
-
-inline void jvmpi::post_event_common(JVMPI_Event* event) {
-
-  // Check for restrictions related to the VM_JVMPIPostObjAlloc VM
-  // operation. JavaThreads will wait here if the VM operation is
-  // in process in order to prevent deadlock.
-  VM_JVMPIPostObjAlloc::wait_if_restricted();
-
-  // notify profiler agent
-  jvmpi_interface.NotifyEvent(event);
-}
-
-inline void jvmpi::post_event(JVMPI_Event* event) {
-  Thread* thread = Thread::current();
-  assert(thread->is_Java_thread(), "expecting a Java thread");
-
-  JavaThread* jthread = (JavaThread*)thread;
-  event->env_id = jthread->jni_environment();
-  // prepare to call out across JVMPI
-  ThreadToNativeFromVM transition(jthread);
-  HandleMark  hm(thread);  
-  // notify profiler agent
-  post_event_common(event);
-}
-
-// JVMPI 2.0: should cleanup race condition where calling_thread
-// exits before being notified.
-inline void jvmpi::post_event_vm_mode(JVMPI_Event* event, JavaThread* calling_thread) {
-  Thread* thread = Thread::current();
-  if (thread->is_Java_thread()) {
-    // JVMPI doesn't do proper transitions on RAW_ENTRY
-    // When it does do this can be enabled.
-#ifdef PROPER_TRANSITIONS
-    assert(((JavaThread*)thread)->thread_state() == _thread_in_vm, "Only vm mode expected");
-    post_event(event);
-#else
-    JavaThread* jthread = (JavaThread*)thread;
-    JavaThreadState saved_state = jthread->thread_state();
-
-    if (saved_state == _thread_in_vm) {
-      // same as conditions for post_event() so use it
-      post_event(event);
-      return;
-    }
-
-    // We are about to transition to _thread_in_native. See if there
-    // is an external suspend requested before we go. If there is,
-    // then we do a self-suspend. We don't need to do this for
-    // post_event() because it uses ThreadToNativeFromVM.
-
-    if (jthread->is_external_suspend_with_lock()) {
-      jthread->java_suspend_self();
-    }
-
-    event->env_id = jthread->jni_environment();
-    // prepare to call out across JVMPI
-    jthread->frame_anchor()->make_walkable(jthread);
-    if (saved_state == _thread_in_Java) {
-      ThreadStateTransition::transition_from_java(jthread, _thread_in_native);
-    } else if (saved_state != _thread_in_native) {
-      // Nested events are already in _thread_in_native and don't need
-      // to transition again.
-      ThreadStateTransition::transition(jthread, saved_state, _thread_in_native);
-    }
-    HandleMark  hm(thread);  
-    // notify profiler agent
-    post_event_common(event);
-    // restore state prior to posting event
-    ThreadStateTransition::transition_from_native(jthread, saved_state); 
-#endif /* PROPER_TRANSITIONS */
-  } else {
-    if (thread->is_VM_thread()) {
-      // calling from VM thread
-
-      if (calling_thread == NULL) {
-	  calling_thread = JavaThread::active();
-      }	  
-	    
-      assert(calling_thread != NULL && calling_thread->is_Java_thread(),
-	     "wrong thread, expecting Java thread");
-
-      event->env_id = (calling_thread != NULL && 
-		       calling_thread->is_Java_thread()) ?
-		       calling_thread->jni_environment() : NULL;
-    } else {
-      event->env_id = calling_thread->jni_environment();
-    }
-    // notify profiler agent
-    post_event_common(event);
-  }
-}
-
-
-// ----------------------------------------------------------
-// Functions called by other parts of the VM to notify events
-// ----------------------------------------------------------
-
-void issue_jvmpi_class_load_event(klassOop k) {
-  jvmpi::post_class_load_event(Klass::cast(k)->java_mirror());
-}
-
-
-class IssueJVMPIobjAllocEvent: public ObjectClosure {
- public:
-  void do_object(oop obj) {
-    Universe::jvmpi_object_alloc(obj, obj->size() * wordSize);
-  };
-};
-
-volatile bool VM_JVMPIPostObjAlloc::_restrict_event_posting = true;
-Thread *      VM_JVMPIPostObjAlloc::_create_thread = NULL;
-
-void VM_JVMPIPostObjAlloc::clear_restriction() {
-  // See MutexLockerEx comment in wait_if_restricted().
-  MutexLockerEx loap(ObjAllocPost_lock, Mutex::_no_safepoint_check_flag);
-
-  // Lower restriction since we are done with the VM operation
-  _restrict_event_posting = false;
-
-  // let any waiting threads resume
-  ObjAllocPost_lock->notify_all();
-}
-
-void VM_JVMPIPostObjAlloc::doit() {
-  // Issue object allocation events for all allocated objects
-  IssueJVMPIobjAllocEvent blk;
-
-  // make sure the heap's parsable before iterating over it
-  Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
-  Universe::heap()->object_iterate(&blk);
-}
-
-// The restriction is true by default to allow wait_if_restricted()
-// to query the value without holding the lock. This imposes the
-// least overhead on later calls to post_event_common(). Since any
-// event handler can request an OBJ_ALLOC event, we have to restrict
-// all other events until the VMThread is done with its housekeeping.
-//
-// There are five cases to consider:
-// 1) The VMThread calls wait_if_restricted() as part of its
-//    OBJ_ALLOC posting. It will not grab the lock and will not
-//    block due to the second if-statement.
-// 2) The JavaThread that will eventually make this VM operation
-//    request calls wait_if_restricted() before the VM op is created.
-//    It will not grab the lock and will not block due to the second
-//    if-statement.
-//
-// The remaining cases apply to JavaThreads that are not making this
-// VM operation request.
-//
-// 3) A JavaThread that calls wait_if_restricted() before the VM
-//    op is created will always grab the lock and then enter the
-//    check-and-wait() loop.
-// 4) A JavaThread that calls wait_if_restricted() after the VM
-//    op is created but before it is finished will always grab the
-//    lock and then enter the check-and-wait() loop.
-// 5) A JavaThread that calls wait_if_restricted() after the VM
-//    op is finished will see the false value and will not block.
-//
-// If the restriction is false by default and then set to true in
-// the VM op constructor, then we have to guard the query with a
-// lock grab to prevent a race between the JavaThread and the
-// VMThread. Without the lock grab, it would be possible for the
-// JavaThread to see the "false" value just before the constructor
-// sets that the value to true. At that point, the JavaThread
-// would be racing to finish its event posting before the VMThread
-// blocks it in a safepoint.
-//
-void VM_JVMPIPostObjAlloc::wait_if_restricted() {
-  if (_restrict_event_posting) {
-    // a restriction has been raised
-
-    // The restriction does not apply to the VMThread nor does it
-    // apply to the thread that makes the VM_JVMPIPostObjAlloc
-    // VM operation request.
-    Thread *thr = ThreadLocalStorage::thread();
-    if (thr != NULL && !thr->is_VM_thread() && thr != create_thread()) {
-      // The restriction applies to this thread. We use
-      // MutexLockerEx to allow the lock to work just
-      // before calling into the agent's code (native).
-      MutexLockerEx loap(ObjAllocPost_lock, Mutex::_no_safepoint_check_flag);
-      while (_restrict_event_posting) {
-        ObjAllocPost_lock->wait(Mutex::_no_safepoint_check_flag, 0);
-      }
-    }
-  }
-}
-
-void jvmpi::post_vm_initialization_events() {
-  if (Universe::jvmpi_alloc_event_enabled()) {
-    // Issue the object allocation events thru a VM operation since
-    // it needs to be done at a safepoint
-    VM_JVMPIPostObjAlloc op;
-    VMThread::execute(&op);
-  } else {
-    // lift the restriction since we didn't do the VM operation
-    VM_JVMPIPostObjAlloc::clear_restriction();
-  }
-
-  if (!jvmpi::enabled()) {
-    // no agent is attached and the event posting restriction is now
-    // lifted so there is nothing more to do
-
-    // We don't conditionally enable the THREAD_START tracking mechanism
-    // so we always have to disable it.
-    stop_tracking_thread_start_events();
-    return;
-  }
-
-  assert(!JVMPICheckGCCompatibility ||
-         !(UseConcMarkSweepGC || UseParNewGC || UseParallelGC),
-         "JVMPI-incompactible collector; jvm should have exited during "
-         " JVMPI initialization");
-
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_CLASS_LOAD)) {
-    // Issue class load events for all loaded classes
-    // Note: This must happen _after_ the allocation events, otherwise hprof has problems!
-    SystemDictionary::classes_do(&issue_jvmpi_class_load_event);
-  }
-
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_THREAD_START)) {
-    // Issue thread creation events for all started threads
-    int k = 0;
-    int threadcount;
-    JavaThread** ThreadSnapShot;
-    { MutexLocker mu(Threads_lock);
-      threadcount = Threads::number_of_threads();
-      ThreadSnapShot = NEW_C_HEAP_ARRAY(JavaThread*, threadcount);
-      for (JavaThread* tp = Threads::first() ; (tp != NULL) && ( k < threadcount); tp = tp->next(), k++) {
-        ThreadSnapShot[k] = tp;
-      }
-    } // Release Threads_lock before calling up to agent code
-    for (k = 0; k<threadcount; k++) {
-      jvmpi::post_thread_start_event(ThreadSnapShot[k]);
-    }
-    FREE_C_HEAP_ARRAY(JavaThread*, ThreadSnapShot);
-  }
-
-  // we are now past the point of posting synthetic THREAD_START events
-  stop_tracking_thread_start_events();
-}
-
-
-void jvmpi::post_vm_initialized_event() {
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_JVM_INIT_DONE;
-  post_event(&event);
-}
-
-void jvmpi::post_vm_death_event() {
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_JVM_SHUT_DOWN;
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_instruction_start_event(const frame& f) {
-  ResourceMark rm;
-  JVMPI_Event event;
-
-  methodOop method = f.interpreter_frame_method();
-  address   bcp    = f.interpreter_frame_bcp();
-
-  // fill in generic information
-  event.event_type = JVMPI_EVENT_INSTRUCTION_START;
-  event.u.instruction.method_id = method->jmethod_id();
-  event.u.instruction.offset    = method->bci_from(bcp);
-
-  // debugging
-#ifdef ASSERT
-  switch (Bytecodes::java_code(Bytecodes::cast(*bcp))) {
-    case Bytecodes::_tableswitch : // fall through
-    case Bytecodes::_lookupswitch: // fall through
-    case Bytecodes::_ifnull      : // fall through
-    case Bytecodes::_ifeq        : // fall through
-    case Bytecodes::_ifnonnull   : // fall through
-    case Bytecodes::_ifne        : // fall through
-    case Bytecodes::_iflt        : // fall through
-    case Bytecodes::_ifge        : // fall through
-    case Bytecodes::_ifgt        : // fall through
-    case Bytecodes::_ifle        : assert(f.interpreter_frame_expression_stack_size() >= 1, "stack size must be >= 1"); break;
-    case Bytecodes::_if_acmpeq   : // fall through
-    case Bytecodes::_if_icmpeq   : // fall through
-    case Bytecodes::_if_acmpne   : // fall through
-    case Bytecodes::_if_icmpne   : // fall through
-    case Bytecodes::_if_icmplt   : // fall through
-    case Bytecodes::_if_icmpge   : // fall through
-    case Bytecodes::_if_icmpgt   : // fall through
-    case Bytecodes::_if_icmple   : assert(f.interpreter_frame_expression_stack_size() >= 2, "stack size must be >= 2"); break;
-  }
-#endif
-
-  // fill in bytecode-specific information
-  //
-  // Note: This code is necessary to satisfy the current interface for the jcov
-  //       code coverage tool. The interface should be simplified and generalized
-  //       to provide expression stack access instead of specific information for
-  //       a few bytecodes only. Given expression stack access, the code below
-  //       can move into jcov, the interface becomes simpler, more general, and
-  //       also more powerful. With the next version/revision of JVMPI this clean
-  //       up should be seriously considered (gri 11/18/99).
-
-  int  size  = f.interpreter_frame_expression_stack_size();
-  jint tos_0 = size > 0 ? *f.interpreter_frame_expression_stack_at(size - 1) : 0;
-  jint tos_1 = size > 1 ? *f.interpreter_frame_expression_stack_at(size - 2) : 0;
-
-  switch (Bytecodes::java_code(Bytecodes::cast(*bcp))) {
-    case Bytecodes::_tableswitch :
-      { const Bytecode_tableswitch* s = Bytecode_tableswitch_at(bcp);
-        event.u.instruction.u.tableswitch_info.key = tos_0;
-        event.u.instruction.u.tableswitch_info.low = s->low_key();
-        event.u.instruction.u.tableswitch_info.hi  = s->high_key();
-      }
-      break;
-    case Bytecodes::_lookupswitch:
-      { Bytecode_lookupswitch* s = Bytecode_lookupswitch_at(bcp);
-        int i;
-        for (i = 0; i < s->number_of_pairs() && tos_0 != s->pair_at(i)->match(); i++);
-        event.u.instruction.u.lookupswitch_info.chosen_pair_index = i;
-        event.u.instruction.u.lookupswitch_info.pairs_total       = s->number_of_pairs();
-      }
-      break;
-    case Bytecodes::_ifnull      : // fall through
-    case Bytecodes::_ifeq        : event.u.instruction.u.if_info.is_true = tos_0 == 0; break;
-    case Bytecodes::_ifnonnull   : // fall through
-    case Bytecodes::_ifne        : event.u.instruction.u.if_info.is_true = tos_0 != 0; break;
-    case Bytecodes::_iflt        : event.u.instruction.u.if_info.is_true = tos_0 <  0; break;
-    case Bytecodes::_ifge        : event.u.instruction.u.if_info.is_true = tos_0 >= 0; break;
-    case Bytecodes::_ifgt        : event.u.instruction.u.if_info.is_true = tos_0 >  0; break;
-    case Bytecodes::_ifle        : event.u.instruction.u.if_info.is_true = tos_0 <= 0; break;
-    case Bytecodes::_if_acmpeq   : // fall through
-    case Bytecodes::_if_icmpeq   : event.u.instruction.u.if_info.is_true = tos_1 == tos_0; break;
-    case Bytecodes::_if_acmpne   : // fall through
-    case Bytecodes::_if_icmpne   : event.u.instruction.u.if_info.is_true = tos_1 != tos_0; break;
-    case Bytecodes::_if_icmplt   : event.u.instruction.u.if_info.is_true = tos_1 <  tos_0; break;
-    case Bytecodes::_if_icmpge   : event.u.instruction.u.if_info.is_true = tos_1 >= tos_0; break;
-    case Bytecodes::_if_icmpgt   : event.u.instruction.u.if_info.is_true = tos_1 >  tos_0; break;
-    case Bytecodes::_if_icmple   : event.u.instruction.u.if_info.is_true = tos_1 <= tos_0; break;
-  }
-  
-  post_event(&event);
-}
-
-void jvmpi::post_thread_start_event(JavaThread* thread, jint flag)
-{
-  ResourceMark rm;
-  JVMPI_Event event;
-  
-  assert(!Threads_lock->owned_by_self(), "must not own threads_lock for notify");
-
-  { MutexLocker mu(Threads_lock);
-
-    // Do not post thread start event for hidden java thread.
-    if (thread->is_hidden_from_external_view()) return;
-
-    if (flag != JVMPI_REQUESTED_EVENT &&
-        check_for_and_record_thread_start_event(thread)) {
-      // Prevent duplicate THREAD_START events unless there is a
-      // specific request for the event.
-      return;
-    }
-
-    event.event_type = JVMPI_EVENT_THREAD_START | flag;
-  
-    event.u.thread_start.thread_name   = (char*)thread->get_thread_name();
-    event.u.thread_start.group_name    = (char*)thread->get_threadgroup_name();
-    event.u.thread_start.parent_name   = (char*)thread->get_parent_name();
-    event.u.thread_start.thread_id     = (jobjectID)thread->threadObj();
-    event.u.thread_start.thread_env_id = thread->jni_environment();
-  } // Release Threads_lock
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: post_thread_start_event for thread id " INTPTR_FORMAT " [thread " INTPTR_FORMAT " <%s>] ",
-		  event.u.thread_start.thread_id, thread, event.u.thread_start.thread_name);
-  }
-  
-  GC_locker::lock();
-  post_event_vm_mode(&event, NULL);
-  GC_locker::unlock();
-}
-
-void jvmpi::post_thread_start_event(JavaThread* thread) {
-  post_thread_start_event(thread, 0);
-}
-
-void jvmpi::post_thread_end_event(JavaThread* thread) {
-  ResourceMark rm;
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_THREAD_END;
-
-  { MutexLocker mu(Threads_lock);
-
-    // Do not post thread end event for hidden java thread.
-    if (thread->is_hidden_from_external_view()) return;
-
-    event.u.thread_start.thread_name   = (char* )thread->get_thread_name();
-    event.u.thread_start.group_name    = (char*)thread->get_threadgroup_name();
-    event.u.thread_start.parent_name   = (char* )thread->get_parent_name();
-    event.u.thread_start.thread_id     = (jobjectID)thread->threadObj();
-    event.u.thread_start.thread_env_id = thread->jni_environment();
-  } // Release Threads_lock
-
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: post_thread_end_event for thread id " INTPTR_FORMAT " [thread " INTPTR_FORMAT " <%s>] ", 
-		  event.u.thread_start.thread_id, thread, event.u.thread_start.thread_name);
-  }
-  post_event(&event);
-}
-
-void jvmpi::fillin_array_class_load_event(oop kOop, JVMPI_Event *eventp) {
-  Klass *k = Klass::cast(java_lang_Class::as_klassOop(kOop));
-  assert(k->oop_is_array(), "must be array classes");
-
-  eventp->event_type                       = JVMPI_EVENT_CLASS_LOAD;
-  eventp->u.class_load.class_name          = k->external_name();
-  eventp->u.class_load.source_name         = NULL;
-  eventp->u.class_load.num_interfaces      = 0; 
-  eventp->u.class_load.num_methods         = 0;
-  eventp->u.class_load.methods             = NULL;
-  eventp->u.class_load.num_static_fields   = 0;
-  eventp->u.class_load.statics             = NULL;
-  eventp->u.class_load.num_instance_fields = 0;
-  eventp->u.class_load.instances           = NULL;
-  eventp->u.class_load.class_id            = (jobjectID)kOop;
-}
-
-// Note: kOop must be mirror
-void jvmpi::fillin_class_load_event(oop kOop, JVMPI_Event *eventp, bool fillin_jni_ids) {
-  eventp->event_type = JVMPI_EVENT_CLASS_LOAD;
-  instanceKlassHandle k = java_lang_Class::as_klassOop(kOop);
-  assert(!k()->klass_part()->oop_is_array(), "must not be array classes");
-
-  instanceKlass* ik = instanceKlass::cast(k());
-  // get field info
-  int num_statics = 0;
-  int num_instances = 0;
-  for (FieldStream count_field_st(k, true, true); !count_field_st.eos(); count_field_st.next()) {
-    if (count_field_st.access_flags().is_static()) {
-      num_statics++;
-    } else {
-      num_instances++;
-    }
-  }
-  JVMPI_Field* statics = NEW_RESOURCE_ARRAY(JVMPI_Field, num_statics);
-  JVMPI_Field* instances = NEW_RESOURCE_ARRAY(JVMPI_Field, num_instances);
-  int i_stat = 0;
-  int i_inst = 0;
-  for (FieldStream field_st(k, true, true); !field_st.eos(); field_st.next()) {
-    char* f_name = field_st.name     ()->as_C_string();
-    char* f_sig  = field_st.signature()->as_C_string();
-    if (field_st.access_flags().is_static()) {
-      statics[i_stat].field_name      = f_name;
-      statics[i_stat].field_signature = f_sig;
-      i_stat++;
-    } else {
-      instances[i_inst].field_name      = f_name;
-      instances[i_inst].field_signature = f_sig;
-      i_inst++;
-    }
-  }
-  assert(i_inst == num_instances, "sanity check");
-  assert(i_stat == num_statics, "sanity check");
-  // get method info
-  int num_methods = ik->methods()->length();
-  JVMPI_Method* methods = NEW_RESOURCE_ARRAY(JVMPI_Method, num_methods);
-  int i_meth = 0;
-  for (MethodStream meth_st(k, true, true); !meth_st.eos(); meth_st.next()) {
-    methodOop m = meth_st.method();
-    methods[i_meth].method_name      = m->name()->as_C_string();
-    methods[i_meth].method_signature = m->signature()->as_C_string();
-    if (fillin_jni_ids) {
-      methods[i_meth].method_id      = m->jmethod_id();
-    } else {
-      // method_id doesn't mean much after class is unloaded
-      methods[i_meth].method_id      = NULL;
-    }
-    methods[i_meth].start_lineno     = m->line_number_from_bci(0);
-    if (m->code_size() > 0) {
-      methods[i_meth].end_lineno     = m->line_number_from_bci(m->code_size() - 1);
-    } else {
-      methods[i_meth].end_lineno     = m->line_number_from_bci(0);
-    }
-    i_meth++;
-  }
-
-  eventp->u.class_load.class_name          = ik->external_name();
-  if (ik->source_file_name() == NULL)
-    eventp->u.class_load.source_name       = NULL;
-  else
-    eventp->u.class_load.source_name       = ik->source_file_name()->as_C_string();
-  eventp->u.class_load.num_interfaces      = ik->local_interfaces()->length();
-  eventp->u.class_load.num_methods         = num_methods;
-  eventp->u.class_load.methods             = methods;
-  eventp->u.class_load.num_static_fields   = num_statics;
-  eventp->u.class_load.statics             = statics;
-  eventp->u.class_load.num_instance_fields = num_instances;
-  eventp->u.class_load.instances           = instances;
-  eventp->u.class_load.class_id            = (jobjectID)ik->java_mirror();
-}
-
-
-// List of classes unloaded for the duration of the CLASS_UNLOAD event
-// handler. Populated by save_class_unload_event_info(), queried by both
-// post_class_load_event() and post_class_unload_events(), and cleaned
-// up by post_class_unload_events().
-static GrowableArray<JVMPI_Event*>* unloaded_classes = NULL;
-
-// Note: kOop must be mirror
-void jvmpi::post_class_load_event(oop kOop, jint flag) {
-
-  if (flag == JVMPI_REQUESTED_EVENT && unloaded_classes != NULL) {
-    // This is a synthesized event and we are in the middle of unloading
-    // classes so see if the requested class is one that we unloaded.
-
-    // walk the list of unloaded class event information
-    for (int i = 0; i < unloaded_classes->length(); i++) {
-      JVMPI_Event *ev = unloaded_classes->at(i);
-      if ((oop)(ev->u.class_load.class_id) == kOop) {
-        // We are in the event handler for CLASS_UNLOAD event so
-        // we don't have to lock out GC. Post the saved event
-        // information for the unloaded class to the agent.
-        assert(GC_locker::is_active(), "GC must be locked when in event handler");
-        post_event_vm_mode(ev, NULL);
-        return;
-      }
-    }
-  }
-
-  ResourceMark rm;
-  JVMPI_Event event;
-  klassOop k = java_lang_Class::as_klassOop(kOop);
-
-  if (k->klass_part()->oop_is_array()) {
-    fillin_array_class_load_event(kOop, &event);
-  } else {
-    fillin_class_load_event(kOop, &event, true /* fillin JNI ids */);
-  }
-  event.event_type |= flag;
-  if (TraceJVMPI) {
-    tty->print("JVMPI: post_class_load_event for klass mirror " INTPTR_FORMAT " ", (address)kOop);
-    java_lang_Class::as_klassOop(kOop)->print_value();
-    tty->print(" ");
-    kOop->print_value();
-    tty->cr();
-  }
-
-  GC_locker::lock();
-  post_event_vm_mode(&event, NULL);
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_class_load_event(oop k) {
-  post_class_load_event(k, 0);
-}
-
-
-// Wrapper to translate the (32-bit) JVM/PI memory allocation function
-// to the HotSpot resource allocation function.
-void *jvmpi::jvmpi_alloc(unsigned int bytecnt) {
-  return (void *)resource_allocate_bytes((size_t)bytecnt);
-}
-
-
-void jvmpi::post_class_load_hook_event(unsigned char **ptrP,
-  unsigned char **end_ptrP, jvmpi_alloc_func_t malloc_f) {
-  JVMPI_Event event;
-
-  /* fill event info and notify the profiler */
-  event.event_type = JVMPI_EVENT_CLASS_LOAD_HOOK;
-
-  event.u.class_load_hook.class_data = *ptrP;
-  event.u.class_load_hook.class_data_len = *end_ptrP - *ptrP;
-  event.u.class_load_hook.malloc_f = malloc_f;
-
-  post_event(&event);
-    
-  *ptrP = event.u.class_load_hook.new_class_data;
-  *end_ptrP = *ptrP + event.u.class_load_hook.new_class_data_len;
-}
-
-
-// Post CLASS_UNLOAD events and/or release saved memory.
-void jvmpi::post_class_unload_events() {
-  if (unloaded_classes != NULL) {  // we unloaded some classes
-    // walk the list of unloaded class event information
-    for (int i = 0; i < unloaded_classes->length(); i++) {
-      JVMPI_Event *ev = unloaded_classes->at(i);  // shorthand for saved info
-
-      if (jvmpi::is_event_enabled(JVMPI_EVENT_CLASS_UNLOAD)) {
-        // The caller is still interested in the events so post them.
-        // Note: by the time we get called, the caller may no longer
-        // be interested in the events, but we have to always free
-        // the memory below.
-        JVMPI_Event event;
-
-        GC_locker::lock();
-
-        // construct a CLASS_UNLOAD event from the saved into
-        event.event_type = JVMPI_EVENT_CLASS_UNLOAD;
-        event.u.class_unload.class_id = ev->u.class_load.class_id;
-
-        post_event_vm_mode(&event, NULL);
-
-        GC_locker::unlock();
-      }
-      delete ev;  // done with the saved info
-    }
-
-    delete unloaded_classes;
-    unloaded_classes = NULL;
-  }
-}
-
-
-// GC has caused a class to be unloaded so save CLASS_LOAD information
-// just in case there is a RequestEvent(CLASS_LOAD) call from the
-// CLASS_UNLOAD event handler.
-void jvmpi::save_class_unload_event_info(oop k) {
-  JVMPI_Event *ev = new JVMPI_Event();
-  fillin_class_load_event(k, ev, false /* don't fillin JNI id values */);
-  ev->event_type |= JVMPI_REQUESTED_EVENT;
-
-  if (unloaded_classes == NULL) {
-    // first unloaded class so setup initial space for the events
-    unloaded_classes =
-      new (ResourceObj::C_HEAP) GrowableArray<JVMPI_Event*>(5, true);
-  }
-  unloaded_classes->append(ev);
-}
-
-
-void jvmpi::post_dump_event() {
-  if (is_event_enabled(JVMPI_EVENT_DUMP_DATA_REQUEST)) {
-    JVMPI_Event event;
-    event.event_type = JVMPI_EVENT_DUMP_DATA_REQUEST;
-    post_event(&event);
-  }
-  if (is_event_enabled(JVMPI_EVENT_RESET_DATA_REQUEST)) {
-    JVMPI_Event event;
-    event.event_type = JVMPI_EVENT_RESET_DATA_REQUEST;
-    post_event(&event);
-  }
-}
-
-// Maintain an array of skipped JNI global refs and those JNI global refs
-// are not dumped as GC roots in the heap dumps since they are internal to VM.
-static GrowableArray<jobject>* skipped_globalrefs = NULL;
-
-void jvmpi::post_new_globalref_event(jobject ref, oop obj, bool post_jvmpi_event) {
-  if (post_jvmpi_event) {
-    // post new JNI global ref alloc event
-    JVMPI_Event event;
-    
-    GC_locker::lock();
-    
-    /* fill event info and notify the profiler */
-    event.event_type = JVMPI_EVENT_JNI_GLOBALREF_ALLOC;
-
-    event.u.jni_globalref_alloc.obj_id = (jobjectID)(obj);
-    event.u.jni_globalref_alloc.ref_id = ref;
-
-    post_event_vm_mode(&event, NULL);
-
-    GC_locker::unlock();
-
-  } else {
-    // Not to post new JNI global ref alloc event;
-    // need to save those skipped JNI global ref, which should not be 
-    // dumped as GC roots in the heap dump
-
-    MutexLocker ml(JNIGlobalHandle_lock);
-    if (skipped_globalrefs == NULL) {
-      skipped_globalrefs = new (ResourceObj::C_HEAP) GrowableArray<jobject>(256, true);
-    }
-    skipped_globalrefs->append(ref);
-  }
-}
-
-
-void jvmpi::post_delete_globalref_event(jobject ref, bool post_jvmpi_event) {
-  if (post_jvmpi_event) {
-    // post JNI global ref free event
-    JVMPI_Event event;
-
-    GC_locker::lock();
-
-    /* fill event info and notify the profiler */
-    event.event_type = JVMPI_EVENT_JNI_GLOBALREF_FREE;
-
-    event.u.jni_globalref_free.ref_id = ref;
-
-    post_event_vm_mode(&event, NULL);
-
-    GC_locker::unlock();
-  } else {
-    // remove the JNI global ref from skipped_globalrefs list
-    MutexLocker ml(JNIGlobalHandle_lock);
-
-    int length = (skipped_globalrefs != NULL ? skipped_globalrefs->length() : 0);
-    int i = 0;
-
-    // we choose not to compact the array when a globalref is destroyed
-    // since the number of such calls might not be that many.
-    for (i = 0; i < length; i++) {
-      if (skipped_globalrefs->at(i) == ref) {
-        skipped_globalrefs->at_put(i, NULL);
-        break;
-      }
-    }
-    assert(length == 0 || i < length, "JNI global ref");
-  }
-}
-
-void jvmpi::post_new_weakref_event(jobject ref, oop obj) {
-  JVMPI_Event event;
-    
-  GC_locker::lock();
-
-  /* fill event info and notify the profiler */
-  event.event_type = JVMPI_EVENT_JNI_WEAK_GLOBALREF_ALLOC;
-
-  event.u.jni_globalref_alloc.obj_id = (jobjectID)(obj);
-  event.u.jni_globalref_alloc.ref_id = ref;
-
-  post_event_vm_mode(&event, NULL);
-
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_delete_weakref_event(jobject ref) {
-  JVMPI_Event event;
-    
-  GC_locker::lock();
-
-  /* fill event info and notify the profiler */
-  event.event_type = JVMPI_EVENT_JNI_WEAK_GLOBALREF_FREE;
-
-  event.u.jni_globalref_free.ref_id = ref;
-
-  post_event_vm_mode(&event, NULL);
-
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_arena_new_event(int arena_id, const char* arena_name) {
-  if (!is_event_enabled(JVMPI_EVENT_ARENA_NEW)) return;
-  JVMPI_Event event;
-
-  event.event_type = JVMPI_EVENT_ARENA_NEW;
-  event.u.new_arena.arena_id = arena_id;
-  event.u.new_arena.arena_name = arena_name;
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_arena_delete_event(int arena_id) {
-  JVMPI_Event event;
-
-  event.event_type = JVMPI_EVENT_DELETE_ARENA;
-  event.u.delete_arena.arena_id = arena_id;
-  post_event_vm_mode(&event, NULL);
-}
-
-/* post_object_alloc_event requires size to be in bytes */
-void jvmpi::post_object_alloc_event(oop obj, size_t bytesize, jint arena_id, jint flag) {
-  // do not emit the event if the allocation event is not enabled, except if it is
-  // requested
-  if (!is_event_enabled(JVMPI_EVENT_OBJECT_ALLOC) && flag != JVMPI_REQUESTED_EVENT) return;
-  // bailout if obj is undefined
-  if (obj == NULL) return;
-  // bootstraping problem: Issue object allocation event for the java/lang/Class 
-  // mirror with class set to NULL (to avoid infinite recursion).
-  bool bootstrap = (obj == Klass::cast(SystemDictionary::class_klass())->java_mirror());
-  // determine klass & is_array info
-  oop klass;
-  int is_array;
-  if (bootstrap) {
-    klass    = NULL;
-    is_array = JVMPI_NORMAL_OBJECT;
-  } else if (obj->is_instance()) {
-    klass    = Klass::cast(obj->klass())->java_mirror();
-    is_array = JVMPI_NORMAL_OBJECT;
-  } else if (obj->is_objArray()) {
-    klass    = Klass::cast(objArrayKlass::cast(obj->klass())->element_klass())->java_mirror();
-    is_array = JVMPI_CLASS;
-  } else if (obj->is_typeArray()) {
-    klass    = NULL;
-    is_array = typeArrayKlass::cast(obj->klass())->element_type();
-  } else {
-    klass    = JVMPI_INVALID_CLASS;
-    is_array = JVMPI_NORMAL_OBJECT;
-  }    
-  // post event if ok
-  if (klass != JVMPI_INVALID_CLASS) {
-    if (!flag) GC_locker::lock();
-    /* fill event info and notify the profiler */
-    { JVMPI_Event event;
-      event.event_type           = JVMPI_EVENT_OBJECT_ALLOC | flag;
-      event.u.obj_alloc.arena_id = arena_id;
-      event.u.obj_alloc.class_id = (jobjectID)klass;
-      event.u.obj_alloc.is_array = is_array;
-      event.u.obj_alloc.size     = (int) bytesize; // spec will require 64 bit modifications
-      event.u.obj_alloc.obj_id   = (jobjectID)obj;
-      if (TraceJVMPI) {
-	tty->print_cr("JVMPI: post_object_alloc_event for object " INTPTR_FORMAT " ", (address)obj);
-      }
-      post_event_vm_mode(&event, NULL);
-    }
-    if (!flag) GC_locker::unlock();
-  }
-}
-
-
-void jvmpi::post_object_free_event(oop obj) {
-  JVMPI_Event event;
-
-  // $$$ There used to be an assertion that this was only happening during
-  // m/s collections.  Didn't know how to generalize, so I took it out.
-  // (DLD, 6/20).
-
-  /* fill event info and notify the profiler */
-  event.event_type = JVMPI_EVENT_OBJECT_FREE;
-
-  event.u.obj_free.obj_id = (jobjectID)obj;
-    
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_object_move_event(oop oldobj, int old_arena_id, oop newobj, int new_arena_id) {
-  JVMPI_Event event;
-    
-  assert(Universe::heap()->is_gc_active(), "Should only move objects during GC");
-
-  /* fill event info and notify the profiler */
-  event.event_type = JVMPI_EVENT_OBJECT_MOVE;
-
-  event.u.obj_move.obj_id       = (jobjectID)oldobj;
-  event.u.obj_move.arena_id     = old_arena_id;
-  event.u.obj_move.new_obj_id   = (jobjectID)newobj;
-  event.u.obj_move.new_arena_id = new_arena_id;
-
-  post_event_vm_mode(&event, NULL);
-}
-
-
-static jint level = 0;
-
-void jvmpi::post_method_entry2_event(methodOop m, oop receiver) {
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_METHOD_ENTRY2;
-  event.u.method_entry2.method_id = m->jmethod_id();
-  event.u.method_entry2.obj_id = (jobjectID)receiver;
-  if (TraceJVMPI) {
-#if 0
-    ResourceMark rm;
-    tty->print_cr("%04d %s: method_entry2 %s",
-		  level++,
-		  ((JavaThread*)get_thread())->get_thread_name(),
-		  m->name_and_sig_as_C_string());
-#endif
-  }
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_method_entry_event(methodOop m) {
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_METHOD_ENTRY;
-  event.u.method.method_id = m->jmethod_id();
-  if (TraceJVMPI) {
-#if 0
-    ResourceMark rm;
-    tty->print_cr("%04d %s: method_entry %s",
-		  level++,
-		  ((JavaThread*)get_thread())->get_thread_name(),
-		  m->name_and_sig_as_C_string());
-#endif
-  }
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_method_exit_event(methodOop m) {
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_METHOD_EXIT;
-  event.u.method.method_id = m->jmethod_id();
-  if (TraceJVMPI) {
-#if 0
-    ResourceMark rm;
-    tty->print_cr("%04d %s: method_exit  %s",
-		  --level,
-		  ((JavaThread*)get_thread())->get_thread_name(),
-		  m->name_and_sig_as_C_string());
-#endif
-  }
-  post_event_vm_mode(&event, NULL);
-}
-
-
-// use  compiled_method_t so that the line number table can be constructed only
-// temporarily and then released after post_compiled_method_load_event terminates
-void jvmpi::post_compiled_method_load_event(compiled_method_t *compiled_method_info) {
-  JVMPI_Event event;
-
-  event.event_type = JVMPI_EVENT_COMPILED_METHOD_LOAD;
-  event.u.compiled_method_load.method_id         = compiled_method_info->method->jmethod_id();
-  event.u.compiled_method_load.code_addr         = compiled_method_info->code_addr;
-  event.u.compiled_method_load.code_size         = compiled_method_info->code_size;
-  event.u.compiled_method_load.lineno_table_size = compiled_method_info->lineno_table_len;
-  event.u.compiled_method_load.lineno_table      = compiled_method_info->lineno_table;
-
-  post_event_vm_mode(&event, NULL);
-}
-
-void jvmpi::post_compiled_method_unload_event(methodOop method) {
-  JVMPI_Event event;
-
-  event.event_type = JVMPI_EVENT_COMPILED_METHOD_UNLOAD;
-  event.u.compiled_method_unload.method_id = method->jmethod_id();
-  post_event_vm_mode(&event, NULL);
-}
-
-void jvmpi::post_monitor_contended_enter_event(void* object) {
-  GC_locker::lock();
-
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_MONITOR_CONTENDED_ENTER;
-  event.u.monitor.object = (jobjectID)object;
-  post_event_vm_mode(&event, NULL);
-
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_monitor_contended_entered_event(void* object) {
-  GC_locker::lock();
-
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_MONITOR_CONTENDED_ENTERED;
-  event.u.monitor.object = (jobjectID)object;
-  post_event_vm_mode(&event, NULL);
-
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_monitor_contended_exit_event(void* object) {
-  GC_locker::lock();
-
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_MONITOR_CONTENDED_EXIT;
-  event.u.monitor.object = (jobjectID)object;
-  post_event_vm_mode(&event, NULL);
-
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_monitor_wait_event(oop obj, jlong millis) {
-  GC_locker::lock();
-
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_MONITOR_WAIT;
-  event.u.monitor_wait.object  = (jobjectID)obj;
-  event.u.monitor_wait.timeout = millis;
-  post_event_vm_mode(&event, NULL);
-
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_monitor_waited_event(oop obj, jlong millis) {
-  GC_locker::lock();
-
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_MONITOR_WAITED;
-  event.u.monitor_wait.object  = (jobjectID)obj;
-  event.u.monitor_wait.timeout = millis;
-  post_event_vm_mode(&event, NULL);
-
-  GC_locker::unlock();
-}
-
-
-void jvmpi::post_raw_monitor_contended_enter_event(RawMonitor* rmon) {
-  Thread* tp = Thread::current();
-  if (tp->is_VM_thread()) return;
-  JVMPI_Event event;
-
-  event.event_type = JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTER;
-  event.u.raw_monitor.name = rmon->name();
-  event.u.raw_monitor.id = (JVMPI_RawMonitor)rmon;
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_raw_monitor_contended_entered_event(RawMonitor* rmon) {
-  if (Thread::current()->is_VM_thread()) return;
-  JVMPI_Event event;
-
-  event.event_type = JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTERED;
-  event.u.raw_monitor.name = rmon->name();
-  event.u.raw_monitor.id = (JVMPI_RawMonitor)rmon;
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_raw_monitor_contended_exit_event(RawMonitor* rmon) {
-  if (Thread::current()->is_VM_thread()) return;
-  JVMPI_Event event;
-
-  event.event_type = JVMPI_EVENT_RAW_MONITOR_CONTENDED_EXIT;
-  event.u.raw_monitor.name = rmon->name();
-  event.u.raw_monitor.id = (JVMPI_RawMonitor)rmon;
-  post_event_vm_mode(&event, NULL);
-}
-
-
-void jvmpi::post_gc_start_event() {
-  JVMPI_Event event;
-  assert(Thread::current()->is_VM_thread(), "wrong thread");
-
-  Thread* calling_thread = JavaThread::active();
-  /* fill event info and notify the profiler */
-  event.event_type = JVMPI_EVENT_GC_START;
-
-  assert(calling_thread->is_Java_thread(), "wrong thread");
-  post_event_vm_mode(&event, (JavaThread*)calling_thread);
-}
-
-
-class CountObjects: public ObjectClosure {
- private:
-  int _nof_objects;
- public:
-  CountObjects(): _nof_objects(0) {}
-
-  void do_object(oop obj) { _nof_objects++;  };
-
-  int nof_objects() const { return _nof_objects; }
-};
-
-
-void jvmpi::post_gc_finish_event(jlong used_obj_space, jlong total_obj_space) {
-  JVMPI_Event event;
-  assert(Thread::current()->is_VM_thread(), "wrong thread");
-  jlong used_objs = 0;
-  // compute number of used objects
-  { // Note: this is slow and cumbersome
-    CountObjects blk;
-    // Although the call to ensure_parsability()
-    // is not needed here due to this code running at the end of
-    // GC, these have been added here commented out since
-    // this code has moved around.
-    //  Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
-    Universe::heap()->permanent_object_iterate(&blk);
-
-    Universe::heap()->object_iterate(&blk);
-    used_objs = blk.nof_objects();
-  }
-  Thread* calling_thread = JavaThread::active();
-  /* fill event info and notify the profiler */
-  event.event_type = JVMPI_EVENT_GC_FINISH;
-  event.u.gc_info.used_objects       = used_objs;
-  event.u.gc_info.used_object_space  = used_obj_space;
-  event.u.gc_info.total_object_space = total_obj_space;
-
-  assert(calling_thread->is_Java_thread(), "wrong thread");
-  post_event_vm_mode(&event, (JavaThread*)calling_thread);
-}
-
-
-void jvmpi::post_trace_instr_event(unsigned char *pc, unsigned char opcode) {
-  Unimplemented();
-}
-
-
-void jvmpi::post_trace_if_event(unsigned char *pc, int is_true) {
-  Unimplemented();
-}
-
-
-void jvmpi::post_trace_tableswitch_event(unsigned char *pc, int key, int low, int hi) {
-  Unimplemented();
-}
-
-
-void jvmpi::post_trace_lookupswitch_event(unsigned char *pc, int chosen_pair_index, int pairs_total) {
-  Unimplemented();
-}
-
-
-// heap dumps
-
-// Dump is a helper class for all kinds of dumps that require
-// a buffer to hold the dump. 
-
-class Dump: public StackObj {
- private:
-  address _begin;                      // the beginning of the dump space, NULL if no space was allocated
-  address _end;                        // the current dump position
-  address _limit;                      // the limit of the dump space (debugging only)
-
-  void init(int dump_size) {
-    assert(dump_size <= 0 || _begin == NULL, "dump buffer already allocated");
-    _begin = dump_size > 0 ? NEW_C_HEAP_ARRAY(unsigned char, dump_size) : NULL;
-    _end   = _begin;
-    _limit = _begin + dump_size;
-  }
-    
-  bool write() const                   { return begin() != NULL; }
-  address limit() const                { return _limit; }
-  void range_check(int size)           { assert(end() + size <= limit(), "end of dump reached"); }
-
- public:
-  // creation
-  Dump()                               { init(0); }
-  void enable_write(int dump_size)     { init(dump_size); }
-  ~Dump()                              { if (write()) FREE_C_HEAP_ARRAY(unsigned char, begin()); }
-
-  // accessors
-  address begin() const                { return _begin; }
-  address end() const                  { return _end; }
-  int size() const                     { return end() - begin(); }
-
-  // primitive dumpers
-  void dump_u1(u1 x)                   { if (write()) { range_check(1); *_end = x;                   } _end += 1; }
-  void dump_u2(u2 x)                   { if (write()) { range_check(2); Bytes::put_Java_u2(_end, x); } _end += 2; }
-  void dump_u4(u4 x)                   { if (write()) { range_check(4); Bytes::put_Java_u4(_end, x); } _end += 4; }
-  void dump_u8(u8 x)                   { if (write()) { range_check(8); Bytes::put_Java_u8(_end, x); } _end += 8; }
-
-  // basic type dumpers
-  void dump_bool  (jboolean* x)        { dump_u1(*(u1*)x); }
-  void dump_char  (jchar*    x)        { dump_u2(*(u2*)x); }
-  void dump_float (jfloat*   x)        { dump_u4(*(u4*)x); }
-  void dump_double(jdouble*  x)        { dump_u8(*(u8*)x); }
-  void dump_byte  (jbyte*    x)        { dump_u1(*(u1*)x); }
-  void dump_short (jshort*   x)        { dump_u2(*(u2*)x); }
-  void dump_int   (jint*     x)        { dump_u4(*(u4*)x); }
-  void dump_long  (jlong*    x)        { dump_u8(*(u8*)x); }
-  void dump_obj   (oop*      x)        { dump_oop(*x); }
-
-  // other dumpers
-  //
-  // Note: jobjectID (oops) and JNIEnv* are not dumped in Java byte ordering
-  //       like all other data types - which is an inconsistency. It should
-  //       really be handled like all other data (and mapped to u4 for the
-  //       ia32 architecture).
-  void dump_oop(oop obj) {
-    if (obj != NULL && obj->is_klass()) {
-      // There are some objects, e.g., "unsafe" static field accessors,
-      // that can have a direct reference to an instanceKlass and we
-      // don't want to expose an internal data structure via a heap dump.
-      // Most places with 'if (obj->is_klass())' checks just return, but
-      // if we return from here, then that can confuse the caller that
-      // has assumptions about the dump size which will cause crashes.
-      // We just dump NULL instead.
-      obj = NULL;
-    }
-    assert(obj == NULL || obj->is_oop(), "not an oop");
-#ifndef _LP64
-    if (write()) {
-      range_check(4);
-      Bytes::put_native_u4(_end, (u4)obj);
-    }
-    _end += 4;
-#else
-    if (write()) {
-      range_check(8);
-      Bytes::put_native_u8(_end, (u8)obj);
-    }
-    _end += 8;
-#endif
-  }
-
-#ifndef _LP64
-  void dump_thread(JNIEnv* env)        { if (write()) { range_check(4); Bytes::put_native_u4(_end, (u4)env); } _end += 4; }
-  void dump_rawmonitor(JVMPI_RawMonitor mon) { if (write()) { range_check(4); Bytes::put_native_u4(_end, (u4)mon); } _end += 4; }
-  void dump_char_array(const char* s)        { if (write()) { range_check(4); Bytes::put_native_u4(_end, (u4)s); } _end += 4; }
-  void dump_voids(void* x)             { dump_u4((u4)x); }
-#else
-  void dump_thread(JNIEnv* env)        { if (write()) { range_check(8); Bytes::put_native_u8(_end, (u8)env); } _end += 8; }
-  void dump_rawmonitor(JVMPI_RawMonitor mon) { if (write()) { range_check(8); Bytes::put_native_u8(_end, (u8)mon); } _end += 8; }
-  void dump_char_array(const char* s)        { if (write()) { range_check(8); Bytes::put_native_u8(_end, (u8)s); } _end += 8; }
-  void dump_voids(void* x)             { dump_u8((u8)x); }
-#endif
-  void dump_type (int type)            { dump_u1((u1)type); }
-
-  // patching
-  void patch_u2(address at, u2 x) {
-    if (write()) {
-      assert(begin() <= at && at + 2 <= limit(), "patching outside dump space");
-      Bytes::put_Java_u2(at, x);
-    }
-  }
-
-  void patch_u4(address at, u4 x) {
-    if (write()) {
-      assert(begin() <= at && at + 4 <= limit(), "patching outside dump space");
-      Bytes::put_Java_u4(at, x);
-    }
-  }
-};
-
-
-class FieldDumper: public SignatureIterator {
- private:
-  Dump*   _dump;
-  address _addr;
-  bool    _dump_basic_types;
-
- public:
-  FieldDumper(Dump* dump, int level, symbolHandle signature, address addr)
-  : SignatureIterator(signature)
-  , _dump(dump)
-  , _addr(addr)
-  { 
-    _dump_basic_types = (level == JVMPI_DUMP_LEVEL_2);
-    dispatch_field();
-  }
-
-  void do_bool  ()                     { if (_dump_basic_types) _dump->dump_bool  ((jboolean*)_addr); }
-  void do_char  ()                     { if (_dump_basic_types) _dump->dump_char  ((jchar*   )_addr); }
-  void do_float ()                     { if (_dump_basic_types) _dump->dump_float ((jfloat*  )_addr); }
-  void do_double()                     { if (_dump_basic_types) _dump->dump_double((jdouble* )_addr); }
-  void do_byte  ()                     { if (_dump_basic_types) _dump->dump_byte  ((jbyte*   )_addr); }
-  void do_short ()                     { if (_dump_basic_types) _dump->dump_short ((jshort*  )_addr); }
-  void do_int   ()                     { if (_dump_basic_types) _dump->dump_int   ((jint*    )_addr); }
-  void do_long  ()                     { if (_dump_basic_types) _dump->dump_long  ((jlong*   )_addr); }
-  void do_void  ()                     { ShouldNotReachHere();                                        }
-  void do_object(int begin, int end)   {                        _dump->dump_obj   ((oop*     )_addr); }
-  void do_array (int begin, int end)   {                        _dump->dump_obj   ((oop*     )_addr); }
-};
-
-
-// The ObjectDumper takes care of any heap object to be dumped.
-// Note that non java-level objects are filtered out (such as
-// klasses, methodOops, etc.) and that mirrors are converted
-// into klasses for the dump.
-
-class ObjectDumper: public StackObj {
- private:
-  Dump* _dump;
-  int   _level;
-
-  void dump_instance(instanceOop instance) {
-    if (_level == JVMPI_DUMP_LEVEL_0) {
-      // dump type and id only
-      _dump->dump_type(JVMPI_NORMAL_OBJECT);
-      _dump->dump_oop(instance);
-      return;
-    }
-    // dump header
-    _dump->dump_type(JVMPI_GC_INSTANCE_DUMP);
-    _dump->dump_oop(instance);
-    _dump->dump_oop(Klass::cast(instance->klass())->java_mirror());
-    _dump->dump_u4((u4)0);              // reserve space for no. of bytes - patched at the end
-    address field_start = _dump->end(); // remember start of field dump
-    // dump instance fields
-    // (note: dumping in reverse order since the klass load event dumps
-    //        the instance field description in reverse order as well.)
-    { for (FieldStream s(instanceKlassHandle(instance->klass()), false, true); !s.eos(); s.next()) {
-        // ignore static fields as they are not in the instance
-        if (!s.access_flags().is_static()) {
-          FieldDumper(_dump, _level, s.signature(), (address)instance + s.offset());
-        }
-      }
-    }
-    // patch no. of bytes
-    _dump->patch_u4(field_start - 4, _dump->end() - field_start);
-  }
-
-  void dump_obj_array(objArrayOop array) {
-    // Note: Do not dump system object arrays as they are meaningless
-    //       for hprof. Furthermore, they contain klasses which should
-    //       never go out w/o extra treatment.
-    if (array->klass() != Universe::systemObjArrayKlassObj()) {
-      if (_level == JVMPI_DUMP_LEVEL_0) {
-        // dump type and id only
-        _dump->dump_type(JVMPI_CLASS);
-        _dump->dump_oop(array);
-        return;
-      }
-      oop klass = Klass::cast(objArrayKlass::cast(array->klass())->element_klass())->java_mirror();
-      const int length = array->length();
-      // dump header
-      _dump->dump_type(JVMPI_GC_OBJ_ARRAY_DUMP);
-      _dump->dump_oop(array);
-      _dump->dump_u4(length);
-      _dump->dump_oop(klass);
-      // dump elements
-      for (int i = 0; i < length; i++) _dump->dump_oop(array->obj_at(i));
-      // debugging
-      if (TraceJVMPI) {
-        tty->print("JVMPI: dump @ " INTPTR_FORMAT " obj array [%d] (klass = " INTPTR_FORMAT ")", (address)array, length, (address)klass);
-        if (Verbose) {
-          tty->print(" {");
-          for (int i = 0; i < length; i++) {
-            if (i > 0) tty->print(", ");
-            tty->print(INTPTR_FORMAT, (address)array->obj_at(i));
-          }
-          tty->print("}");
-        }
-        tty->cr();
-      }
-    }
-  }
-
-  void dump_type_array(typeArrayOop array) {
-    const int length = array->length();
-    const BasicType type = typeArrayKlass::cast(array->klass())->element_type();
-    int jvmpi_type = -1;
-    switch (type) {
-      case T_BOOLEAN: jvmpi_type = JVMPI_BOOLEAN; break;
-      case T_CHAR   : jvmpi_type = JVMPI_CHAR   ; break;
-      case T_FLOAT  : jvmpi_type = JVMPI_FLOAT  ; break;
-      case T_DOUBLE : jvmpi_type = JVMPI_DOUBLE ; break;
-      case T_BYTE   : jvmpi_type = JVMPI_BYTE   ; break;
-      case T_SHORT  : jvmpi_type = JVMPI_SHORT  ; break;
-      case T_INT    : jvmpi_type = JVMPI_INT    ; break;
-      case T_LONG   : jvmpi_type = JVMPI_LONG   ; break;
-      default       : ShouldNotReachHere();
-    }
-    if (_level == JVMPI_DUMP_LEVEL_0) {
-      // dump type and id only
-      _dump->dump_type(jvmpi_type);
-      _dump->dump_oop(array);
-      return;
-    }
-    // dump header
-    _dump->dump_type(JVMPI_GC_PRIM_ARRAY_DUMP);
-    _dump->dump_oop(array);
-    _dump->dump_u4(length);
-    _dump->dump_type(jvmpi_type);
-    // dump elements
-    if (_level == JVMPI_DUMP_LEVEL_2) {
-      switch (type) {
-        case T_BOOLEAN: { for (int i = 0; i < length; i++) _dump->dump_bool  (array->bool_at_addr  (i)); } break;
-        case T_CHAR   : { for (int i = 0; i < length; i++) _dump->dump_char  (array->char_at_addr  (i)); } break;
-        case T_FLOAT  : { for (int i = 0; i < length; i++) _dump->dump_float (array->float_at_addr (i)); } break;
-        case T_DOUBLE : { for (int i = 0; i < length; i++) _dump->dump_double(array->double_at_addr(i)); } break;
-        case T_BYTE   : { for (int i = 0; i < length; i++) _dump->dump_byte  (array->byte_at_addr  (i)); } break;
-        case T_SHORT  : { for (int i = 0; i < length; i++) _dump->dump_short (array->short_at_addr (i)); } break;
-        case T_INT    : { for (int i = 0; i < length; i++) _dump->dump_int   (array->int_at_addr   (i)); } break;
-        case T_LONG   : { for (int i = 0; i < length; i++) _dump->dump_long  (array->long_at_addr  (i)); } break;
-        default       : ShouldNotReachHere();
-      }
-    }
-    // debugging
-    if (TraceJVMPI) {
-      tty->print_cr("JVMPI: dump @ " INTPTR_FORMAT " prim array [%d] (type = %d)", (address)array, length, type);
-    }
-  }
-
-  void dump_klass(klassOop klass) {
-    if (Klass::cast(klass)->oop_is_instance()) {
-      instanceKlass* k = instanceKlass::cast(klass);
-      // Check for level 0 dump
-      if (_level == JVMPI_DUMP_LEVEL_0) {
-        // dump type and id only
-        _dump->dump_type(JVMPI_NORMAL_OBJECT);    // Is this right?
-        _dump->dump_oop(k->java_mirror());
-        return;
-      }
-      // dump header
-      _dump->dump_type(JVMPI_GC_CLASS_DUMP);
-      _dump->dump_oop(k->java_mirror());
-      _dump->dump_oop(k->super() == NULL ? (oop)NULL : Klass::cast(k->super())->java_mirror());
-      _dump->dump_oop(k->class_loader());
-      _dump->dump_oop(k->signers());
-      _dump->dump_oop(k->protection_domain());
-      _dump->dump_oop(StringTable::lookup(k->name())); // NULL if not interned string
-      _dump->dump_voids(NULL); // reserved
-      _dump->dump_u4(k->size_helper() * BytesPerWord);
-      // dump interfaces
-      { objArrayOop interfaces = k->local_interfaces();
-        for (int i = 0; i < interfaces->length(); i++) {
-          oop interf = Klass::cast((klassOop)interfaces->obj_at(i))->java_mirror();
-          _dump->dump_oop(interf);
-        }
-      }
-      // dump constant pool
-      { address size_loc = _dump->end();    // remember constant pool size location for later patching  
-        _dump->dump_u2((u2)0);              // reserve space for constant pool size - patched at the end
-        int size = 0;
-        const constantPoolOop pool = k->constants();
-        for (int i = 1; i < pool->length(); i++) { // index i = 0 is unused!
-          address end = _dump->end();
-          // for now we ignore all entries
-          // eventually we should probably
-          // dump at least the oop entries
-          /*
-          switch (pool->tag_at(i).value()) {
-            case JVM_CONSTANT_Class:
-            case JVM_CONSTANT_Fieldref:
-            ...
-          }
-          */
-          if (end != _dump->end()) size++; // count individual entries
-        }
-        // patch number of entries
-        _dump->patch_u2(size_loc, size);
-      }
-      // dump static fields
-      // (note: dumping in reverse order since the klass load event dumps
-      //        the static field description in reverse order as well.)
-      {
-         instanceKlassHandle kh(klass);
-         FieldStream s(kh, true, true);
-         for (; !s.eos(); s.next()) { 
-           // ignore instance fields as they are not in the klass
-           if (s.access_flags().is_static()) {
-             FieldDumper(_dump, _level, s.signature(), (address)klass + s.offset());
-           }
-         }
-      }
-    } else if (Klass::cast(klass)->oop_is_objArray()) {
-      objArrayKlass* k = objArrayKlass::cast(klass);
-      // Check for level 0 dump
-      if (_level == JVMPI_DUMP_LEVEL_0) {
-        // dump type and id only
-        _dump->dump_type(JVMPI_NORMAL_OBJECT);    // Is this right?
-        _dump->dump_oop(k->java_mirror());
-        return;
-      }
-      // still missing
-    }
-  }
-
- public:
-  ObjectDumper(Dump* dump, int level, oop obj) : _dump(dump), _level(level) {
-    // filter out all klasses
-    if (obj->is_klass()) return;
-    // convert mirrors
-    if (obj->klass() == SystemDictionary::class_klass()) {
-      // obj is a mirror - convert into corresponding class if possible
-      if (!java_lang_Class::is_primitive(obj)) {
-        // obj is not a mirror for a primitive class (basic type)
-        // get the corresponding class for dumping
-        obj = java_lang_Class::as_klassOop(obj);
-        assert(obj != NULL, "class for non-primitive mirror must exist");
-      } else {
-        // obj is a mirror for a primitice class (basic type)
-        // for which we don't have a (VM-level) class => dump
-        // mirror as it is.
-      }
-    }
-    // dump object
-           if (obj->is_instance ()) { dump_instance  ((instanceOop )obj);
-    } else if (obj->is_objArray ()) { dump_obj_array ((objArrayOop )obj);
-    } else if (obj->is_typeArray()) { dump_type_array((typeArrayOop)obj);
-    } else if (obj->is_klass    ()) { dump_klass     ((klassOop    )obj);
-    }
-  }
-};
-
-
-class JvmpiHeapDumper: public ObjectClosure {
- private:
-  Dump* _dump;
-  int   _level;
-
- public:
-  JvmpiHeapDumper(Dump* dump, int level) : _dump(dump), _level(level) {
-    assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-    // Ensure the heap's parsable before iterating over it
-    Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
-    Universe::heap()->object_iterate(this);
-  }
-
-  void do_object(oop obj)              { ObjectDumper(_dump, _level, obj); }
-};
-
-
-// Move this in machine specific part !
-class MonitorDumper: public StackObj {
- private:
-  Dump* _dump;
-
-  void dump_for_thread(ObjectMonitor* mid, JavaThread* thread) {
-    assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-    ResourceMark rm;
-    // klassOops may be locked (e.g. due to class initialization). Make sure to skip them.
-    if (((oop)mid->object())->is_klass()) return;
-    //
-    // Solaris implements mid->count() differently than Win32 or Linux so
-    // we had to create and use the OS specific contentions() function.
-    //
-    int n_want_lock = mid->contentions();     // number of threads contending for the monitor
-    int n_waiters = mid->waiters();
-    // this is an unused monitor so skip it
-    if (thread == NULL && n_want_lock == 0 && n_waiters == 0) return;
-    // dump header
-    _dump->dump_type(JVMPI_MONITOR_JAVA);
-    _dump->dump_oop((oop)mid->object());
-    _dump->dump_thread(thread == NULL ? NULL : thread->jni_environment());
-    _dump->dump_u4(n_want_lock + n_waiters); // entry count
-    _dump->dump_u4(n_want_lock); // number of threads waiting to enter
-    if (n_want_lock > 0) {
-      GrowableArray<JavaThread*>* want_list = Threads::get_pending_threads(
-	n_want_lock, (address)mid, false /* no locking needed */);
-      for (int i = 0; i < n_want_lock; i++) {
-        if (i < want_list->length()) {
-          JavaThread* jt = want_list->at(i);
-          _dump->dump_thread(jt->jni_environment());
-        } else {
-          _dump->dump_thread(NULL);
-        }
-      }
-    }
-    _dump->dump_u4(n_waiters); // number of threads waiting to be notified
-    if (n_waiters > 0) {
-      ObjectWaiter* waiter = mid->first_waiter();
-      for (int i = 0; i < n_waiters; i++) {
-//        assert(waiter != NULL, "wrong number of waiters");
-// No guarantee this value doesn't change while dumping
-	if (waiter != NULL) {
-          Thread* thd = mid->thread_of_waiter(waiter);
-          if (thd->is_Java_thread()) {
-            _dump->dump_thread(((JavaThread*)thd)->jni_environment());
-          } else {
-            _dump->dump_thread(NULL);
-          }
-          waiter = mid->next_waiter(waiter);
-	} else {
-	  _dump->dump_thread(NULL);
-	}
-      }
-    }
-  }
-
- public:
-  MonitorDumper(Dump* dump, ObjectMonitor* mid): _dump(dump) {
-    // dump Java lock
-    dump_for_thread(mid, Threads::owning_thread_from_monitor_owner(
-      (address)mid->owner(), false /* no locking needed */));
-  }
-};
-
-
-class JavaMonitorDumper: public MonitorClosure {
- private:
-  Dump* _dump;
-
- public:
-  JavaMonitorDumper(Dump* dump) : _dump(dump) {
-    assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-    ObjectSynchronizer::monitors_iterate(this);   // first dump the monitor cache
-    if (!UseHeavyMonitors) {	// now dump any lightweight monitors
-      ResourceMark rm;
-      GrowableArray<ObjectMonitor*>* fab_list = Threads::jvmpi_fab_heavy_monitors();
-      for (int i = 0; i < fab_list->length(); i++) {
-        ObjectMonitor* fab = fab_list->at(i);
-        assert(fab != NULL, "Expected fabricated heavyweight monitor");
-        MonitorDumper(_dump, fab);
-        // ObjectMonitor is a CHeap object, so remember to free it
-        delete fab;
-      }
-    }
-  }
-  void do_monitor(ObjectMonitor* mid)  { MonitorDumper(_dump, mid); }
-};
-
-
-class RawMonitorDumper: public StackObj {
- private:
-  Dump* _dump;
- public:
-  RawMonitorDumper(Dump* dump) : _dump(dump) {
-    for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
-      dump_rawmonitors_for(thread);
-    }
-  }
-
-  void dump_rawmonitors_for(JavaThread* thread) {
-    char* no_name = NULL;
-    for (RawMonitor* mon = thread->rawmonitor_list(); mon; mon = mon->next_raw()) {
-      assert((PROF_RM_CHECK(mon)), "invalid raw monitor");
-      _dump->dump_type(JVMPI_MONITOR_RAW);
-      _dump->dump_char_array(mon->name());
-      _dump->dump_rawmonitor((JVMPI_RawMonitor) mon);
-      _dump->dump_thread(thread->jni_environment());
-      dump_monitor_info(mon, thread);
-    }
-  }
-
-  void dump_monitor_info(RawMonitor* mid, JavaThread* thread)
-{
-    //
-    // Solaris implements mid->count() differently than Win32 or Linux so
-    // we had to create and use the OS specific contentions() function.
-    //
-    int n_want_lock = mid->contentions();     // number of threads contending for the monitor
-    int n_waiters = mid->waiters();
-    // this is an unused monitor so skip it
-    if (thread == NULL && n_want_lock == 0 && n_waiters == 0) return;
-    // dump header
-    _dump->dump_u4(n_want_lock + n_waiters); // entry count
-    _dump->dump_u4(n_want_lock); // number of threads waiting to enter
-    if (n_want_lock > 0) {
-      GrowableArray<JavaThread*>* want_list = Threads::get_pending_threads(
-	n_want_lock, (address)mid, false /* no locking needed */);
-      for (int i = 0; i < n_want_lock; i++) {
-        if (i < want_list->length()) {
-          JavaThread* jt = want_list->at(i);
-          _dump->dump_thread(jt->jni_environment());
-        } else {
-          _dump->dump_thread(NULL);
-        }
-      }
-    }
-    _dump->dump_u4(n_waiters); // number of threads waiting to be notified
-    if (n_waiters > 0) {
-      ObjectWaiter* waiter = mid->first_waiter();
-      for (int i = 0; i < n_waiters; i++) {
-//        assert(waiter != NULL, "wrong number of waiters");
-// no guarantee that this is not changing dynamically
-          if (waiter != NULL) {
-            Thread* thd = mid->thread_of_waiter(waiter);
-            if (thd->is_Java_thread()) {
-              _dump->dump_thread(((JavaThread*)thd)->jni_environment());
-            } else {
-              _dump->dump_thread(NULL);
-            }
-            waiter = mid->next_waiter(waiter);
-          } else {
-            _dump->dump_thread(NULL);
-          }
-      }
-    }
-  }
-};
-
-// JVMPI GC Root Collection support
-
-class HeapDumpInfoCollector;
-class RootElementForThread;
-class RootElementForFrame;
-
-class CollectRootOopsClosure : public OopClosure {
-public:
-  enum RootType {
-    _unknown,
-    _jni_handle,
-    _stack_frame,
-    _system_class,
-    _thread_block,
-    _monitor_used
-  };
-
- private:
-  JavaThread* _thread;
-  intptr_t*   _frame_id;
-  bool        _is_native_frame;
-  bool        _is_entry_frame;
-  GrowableArray<RootType>* typesStack;
-  HeapDumpInfoCollector*   _rc;
-
- public:
-  CollectRootOopsClosure(HeapDumpInfoCollector *rc) {
-    _rc = rc;
-    typesStack = new (ResourceObj::C_HEAP) GrowableArray<RootType>(5, true);
-    // Support nested begin_iterate and end_iterate calls
-    typesStack->push(_unknown);
-  }
-  ~CollectRootOopsClosure() {
-    assert(typesStack->length() == 1, "All types should be popped");
-    delete typesStack;
-  }
-  void set_thread(JavaThread* thread) { 
-    _thread = thread;
-    _frame_id = NULL;
-    _is_native_frame = false;
-    _is_entry_frame = false;
-  }
-  void set_frame_type(bool is_native, bool is_entry) {
-    _is_native_frame = is_native;
-    _is_entry_frame = is_entry;
-  }
-  void set_frame_id(intptr_t* id) {
-    _frame_id = id; 
-  }
-  void begin_iterate(RootType type) { typesStack->push(type); }
-  void end_iterate(RootType type) { 
-    RootType t = typesStack->pop();
-    assert(t == type, "type doesn't match");
-  }
-  void do_oop(oop* obj_p);
-};
-
-class CallTraceDump: public StackObj {
-  jint _num_traces;
-  int  _index;
-  int  _frame_index;
-  JVMPI_CallTrace*  _traces;
-  JVMPI_CallFrame** _frames;
-public:
-  CallTraceDump() { _num_traces = 0; _traces = NULL; _frames = NULL; _index = 0; }
-  ~CallTraceDump() {
-    for (int i = 0; i < _num_traces; i++) {
-      FREE_C_HEAP_ARRAY(JVMPI_CallFrame, _frames[i]);
-    }
-    FREE_C_HEAP_ARRAY(JVMPI_CallTrace, _traces);
-    FREE_C_HEAP_ARRAY(JVMPI_CallFrame*, _frames);
-  }
-  void set_calltrace(JavaThread* thread, int num_frames) {
-    assert(_traces != NULL && _index < _num_traces, "check number of calltraces generated");
-    assert(_index == -1 || _frame_index == _traces[_index].num_frames, "Previous call trace is not filled.");
-    _index++;
-    _frames[_index] = NEW_C_HEAP_ARRAY(JVMPI_CallFrame, num_frames);
-    _traces[_index].env_id = thread->jni_environment();;
-    _traces[_index].num_frames = num_frames;
-    _traces[_index].frames = _frames[_index];
-    _frame_index = 0;
-  }
-  void set_callframe(jint lineno, jmethodID method_id) {
-    assert(_traces[_index].frames != NULL, "JVMPI_CallFrames must have been allocated"); 
-    assert(_frame_index < _traces[_index].num_frames, "Invalid _frame_index");
-    JVMPI_CallFrame* frame = _traces[_index].frames;
-    frame[_frame_index].lineno = lineno;
-    frame[_frame_index].method_id = method_id;
-    _frame_index++;
-  }
-  void set_num_traces(jint num_traces) { 
-    _num_traces = num_traces; 
-    _index = -1;
-    _frame_index = -1;
-    if (num_traces > 0) {
-      _traces = NEW_C_HEAP_ARRAY(JVMPI_CallTrace, num_traces);
-      _frames = NEW_C_HEAP_ARRAY(JVMPI_CallFrame*, num_traces);
-    } else {
-      _traces = NULL;
-      _frames = NULL;
-    }
-  }
-  jint get_num_traces() { return _num_traces; }
-  JVMPI_CallTrace* get_calltraces() { 
-    assert(_index == (_num_traces - 1), "Not all call traces are filled");
-    assert(_frame_index == _traces[_index].num_frames, "The last call trace is not filled");
-    return _traces; 
-  }
-};
-
-const jint ROOT_JNI_GLOBAL_SIZE   = (1 + BytesPerWord * 2);
-const jint ROOT_JNI_LOCAL_SIZE    = (1 + BytesPerWord * 2 + 4);
-const jint ROOT_JAVA_FRAME_SIZE   = (1 + BytesPerWord * 2 + 4);
-const jint ROOT_NATIVE_STACK_SIZE = (1 + BytesPerWord * 2);
-const jint ROOT_STICKY_CLASS_SIZE = (1 + BytesPerWord);
-const jint ROOT_THREAD_BLOCK_SIZE = (1 + BytesPerWord * 2);
-const jint ROOT_MONITOR_USED_SIZE = (1 + BytesPerWord);
-const jint ROOT_UNKNOWN_SIZE      = (1 + BytesPerWord);
-const jint INIT_ROOTS_ARRAY_SIZE  = 256;
-
-class HeapDumpInfoCollector: public StackObj {
- private:
-  jint                  _num_threads;
-  RootElementForThread* _threadRootInfo;
-  GrowableArray<oop*>*  _jni_global_roots;
-  GrowableArray<oop>*   _sticky_class_roots;
-  GrowableArray<oop>*   _monitor_used_roots;
-  GrowableArray<oop>*   _unknown_roots;
-
-  void collect_roots();
-  void add_root_to_thread(jint root_type, oop root, JavaThread* thread = NULL, intptr_t* sp = NULL, oop* obj_p = NULL);
-  void set_curRootThread(JavaThread *thread);
-  RootElementForThread* curRootThread;
-  bool                  is_collect_roots;
-
- public:
-
-  // HeapDumpInfoCollector collects call traces and
-  // if roots is true, it collects GC root references as well.
-  HeapDumpInfoCollector(bool collect_gc_roots);
-  ~HeapDumpInfoCollector();
-
-  bool is_jni_local(JavaThread* thread, intptr_t* sp, oop* obj_p);
-  void add_root(jint root_type, oop root, JavaThread* thread = NULL, intptr_t* sp = NULL, oop* obj_p = NULL);
-  void add_root(jint root_type, oop* root); // JNI global reference
-  jlong root_dump_size() const;
-  void dump_roots(Dump* dump) const;
-  void dump_calltraces(CallTraceDump* traces) const;
-
-  static void sort_roots(GrowableArray<oop>* roots);
-};
-
-
-class RootElementForFrame : public CHeapObj {
- private:
-  intptr_t* _frame_id;
-  jint      _depth;
-  bool      _is_native_method;
-  jint      _lineno; 
-  jmethodID _method_id;
-  GrowableArray<oop>*  _roots;
-  GrowableArray<oop>*  _jni_local_roots;
-  GrowableArray<oop*>* _jni_local_refs;
-  RootElementForFrame* _next;
-
- public:
-  RootElementForFrame(intptr_t* id, bool is_native, jmethodID mid = 0, jint lineno = 0, jint d = 0) {
-    _frame_id = id;
-    _is_native_method = is_native;
-    _method_id = mid;
-    _lineno = lineno;
-    _depth = d;
-    _next = NULL;
-    _roots = NULL;
-    _jni_local_roots = NULL;
-    _jni_local_refs = NULL;
-  }
-  ~RootElementForFrame() {
-    if (_roots != NULL) {
-      delete _roots;
-    }
-    if (_jni_local_roots != NULL) {
-      delete _jni_local_roots;
-      delete _jni_local_refs;
-    }
-  };
-  RootElementForFrame* next()           { return _next; }
-  void set_next(RootElementForFrame* p) { _next = p; }
-  void set_depth(jint d)                { _depth = d; }
-  jint lineno()                         { return _lineno; } 
-  jmethodID method_id()                 { return _method_id; } 
-  intptr_t* frame_id()                  { return _frame_id; }
-  bool is_jni_local(oop* obj_p) {
-    if (_jni_local_refs == NULL) return false;
-
-    int length = _jni_local_refs->length();
-    for (int i = 0; i < length; i++) {
-      if (_jni_local_refs->at(i) == obj_p) {
-        return true;
-      }
-    }
-    return false;
-  }
-  void add_root(oop obj) {
-    if (_roots == NULL) {
-      _roots = new (ResourceObj::C_HEAP) GrowableArray<oop>(INIT_ROOTS_ARRAY_SIZE, true);
-    }
-    _roots->append(obj); 
-  }
-  void add_jni_local(oop obj, oop* obj_p) {
-    assert(obj_p != NULL, "JNI local ref");
-    if (_jni_local_roots == NULL) {
-      _jni_local_roots = new (ResourceObj::C_HEAP) GrowableArray<oop>(INIT_ROOTS_ARRAY_SIZE, true);
-      _jni_local_refs = new (ResourceObj::C_HEAP) GrowableArray<oop*>(INIT_ROOTS_ARRAY_SIZE, true);
-    }
-    _jni_local_roots->append(obj);
-    _jni_local_refs->append(obj_p);
-  }
-  void sort_roots() {
-    HeapDumpInfoCollector::sort_roots(_roots);
-    HeapDumpInfoCollector::sort_roots(_jni_local_roots);
-  }
-  void dump_roots(Dump* dump, JNIEnv* env_id) const;
-  jlong root_dump_size() const;
-};
-
-class RootElementForThread : public CHeapObj {
- private:
-  JavaThread* _thread;
-  jint        _num_frames;
-  RootElementForFrame*  _frameRootInfo;
-  RootElementForFrame*  _empty_java_frame;
-  GrowableArray<oop>*   _native_stack_roots;
-  GrowableArray<oop>*   _thread_block_roots;
-  RootElementForThread* _next;
-
-  void get_stack_trace();
-  void add_root_to_frame(jint root_type, oop root, intptr_t* sp, oop* obj_p = NULL);
-  RootElementForFrame* curRootFrame;
-
- public:
-  RootElementForThread(JavaThread* t, bool is_collect_roots);
-  ~RootElementForThread();
-
-  RootElementForFrame* get_frame(intptr_t* id);
-  RootElementForThread* next()           { return _next; }
-  void set_next(RootElementForThread* p) { _next = p; }
-  JavaThread* thread()                   { return _thread; }
-  bool is_jni_local(intptr_t* sp, oop* obj_p);
-  void add_root(jint root_type, oop root, intptr_t* sp, oop* obj_p = NULL);
-  void sort_roots() {
-    if (_num_frames == 0) {
-      _empty_java_frame->sort_roots();
-    } else {
-      for (RootElementForFrame* p = _frameRootInfo; p != NULL; p = p->next()) {
-        p->sort_roots();
-      }    
-    }
-
-    HeapDumpInfoCollector::sort_roots(_native_stack_roots);
-    HeapDumpInfoCollector::sort_roots(_thread_block_roots);
-  }
-  void dump_roots(Dump* dump) const;
-  jlong root_dump_size() const;
-  void dump_calltrace(CallTraceDump* dump) const;
-};
-
-// Implementation of CollectRootOopsClosure::do_oop()
-void CollectRootOopsClosure::do_oop(oop* obj_p) {
-  oop obj = *obj_p;
-  RootType type = typesStack->top();
-  bool is_klass = false;
-
-  if (obj == NULL || 
-      (type == _system_class && !obj->is_klass()) || // Skip if not a klass for system class roots
-      (type != _system_class && !obj->is_instance() && !obj->is_typeArray() && !obj->is_objArray())) { 
-      return;
-  }
-
-  if (obj->is_klass()) {
-    if (obj->blueprint()->oop_is_instanceKlass() || obj->blueprint()->oop_is_typeArrayKlass() || obj->blueprint()->oop_is_objArrayKlass()) {
-      obj = Klass::cast((klassOop)obj)->java_mirror();
-      is_klass = true;
-    }
-  }
-
-  switch (type) {
-    case _unknown:
-      _rc->add_root(JVMPI_GC_ROOT_UNKNOWN, obj);
-      break;
-    case _jni_handle:
-      if (obj == JNIHandles::deleted_handle()) {
-        // skip deleted handles
-        break;
-      }
-      if (_thread == NULL) {
-        _rc->add_root(JVMPI_GC_ROOT_JNI_GLOBAL, obj_p);
-      } else {
-        _rc->add_root(JVMPI_GC_ROOT_JNI_LOCAL, obj, _thread, _frame_id, obj_p);
-      }
-      break;
-    case _stack_frame:
-      if (_is_native_frame) {
-        _rc->add_root(JVMPI_GC_ROOT_NATIVE_STACK, obj, _thread);
-      } else if (_is_entry_frame) {
-        // JNI local refs in an entry frame have been traversed separately earlier.
-        // So skip these JNI local refs when they are traversed again in oops_do()
-        // call for this entry frame.
-
-        if (obj != JNIHandles::deleted_handle() && !_rc->is_jni_local(_thread, _frame_id, obj_p)) {
-          _rc->add_root(JVMPI_GC_ROOT_JAVA_FRAME, obj, _thread, _frame_id);
-        }
-      } else {
-        _rc->add_root(JVMPI_GC_ROOT_JAVA_FRAME, obj, _thread, _frame_id);
-      }
-      break;
-    case _system_class:
-      if (is_klass) {
-        _rc->add_root(JVMPI_GC_ROOT_STICKY_CLASS, obj);
-      }
-      break;
-    case _thread_block:
-      assert(_thread != NULL, "NULL thread for CollectRootOopsClosure::_thread_block type");
-      _rc->add_root(JVMPI_GC_ROOT_THREAD_BLOCK, obj, _thread);
-      break;
-    case _monitor_used:
-      _rc->add_root(JVMPI_GC_ROOT_MONITOR_USED, obj);
-      break;
-    default:
-      ShouldNotReachHere();
-  }
-}
-
-// Implementation of RootElementForFrame class
-void RootElementForFrame::dump_roots(Dump* dump, JNIEnv* env_id) const {
-  int length, i;
-
-  length = (_roots != NULL ? _roots->length() : 0);
-  for (i = 0; i < length; i++) {
-    dump->dump_type(JVMPI_GC_ROOT_JAVA_FRAME);
-    dump->dump_oop(_roots->at(i));
-    dump->dump_thread(env_id);
-    dump->dump_u4(_depth);
-  }
-  
-  length = (_jni_local_roots != NULL ? _jni_local_roots->length() : 0);
-  for (i = 0; i < length; i++) {
-    dump->dump_type(JVMPI_GC_ROOT_JNI_LOCAL);
-    dump->dump_oop(_jni_local_roots->at(i));
-    dump->dump_thread(env_id);
-    dump->dump_u4(_depth);
-  }
-}
-
-jlong RootElementForFrame::root_dump_size() const {
-  jlong size = (_roots != NULL ? _roots->length() : 0) * ROOT_JAVA_FRAME_SIZE;
-  size += (_jni_local_roots != NULL ? _jni_local_roots->length() : 0) * ROOT_JNI_LOCAL_SIZE;
-
-  return size;
-};
-
-// Implementation of RootElementForThread class
-RootElementForThread::RootElementForThread(JavaThread* t, bool is_collect_roots) {
-  _thread = t;
-  _next = NULL;
-  _frameRootInfo = NULL;
-  _empty_java_frame = NULL;
-  _thread_block_roots = NULL;
-  _native_stack_roots = NULL;
-  _num_frames = 0;
-  curRootFrame = NULL;
-
-  if (is_collect_roots) {
-    // create root arrays for collecting roots
-    _native_stack_roots = new (ResourceObj::C_HEAP)GrowableArray<oop>(INIT_ROOTS_ARRAY_SIZE, true);
-    _thread_block_roots = new (ResourceObj::C_HEAP)GrowableArray<oop>(INIT_ROOTS_ARRAY_SIZE, true);
-  }
-
-  get_stack_trace();
-}
-
-RootElementForThread::~RootElementForThread() {
-  RootElementForFrame* p = _frameRootInfo;
-  while (p != NULL) {
-    RootElementForFrame *q = p;
-    p = p->next();
-    delete(q);
-  }
-  delete _empty_java_frame;
-  if (_native_stack_roots != NULL) {
-    delete _native_stack_roots;
-  }
-  if (_thread_block_roots != NULL) {
-    delete _thread_block_roots;
-  }
-}
-
-void RootElementForThread::get_stack_trace(){
-  assert(_thread->thread_state() != _thread_in_Java, "All threads must be blocked at safepoint");
-
-  if (!_thread->has_last_Java_frame()) {
-    _empty_java_frame = new RootElementForFrame(0, false);
-    _empty_java_frame->set_depth(-1);
-    return;
-  }
-
-  vframeStream vfst(_thread);
-  RootElementForFrame* last = NULL;
-  int count = 0;
-
-  // Get call trace for this JavaThread
-  for (; !vfst.at_end(); vfst.next(), count++) {
-    methodOop m = vfst.method(); // The method is not stored GC safe
-    int bci     = vfst.bci();
-    int lineno  = m->is_native() ? (-3) : m->line_number_from_bci(bci);
-
-    RootElementForFrame* p = new RootElementForFrame(vfst.frame_id(),
-                                                     m->is_native(),
-                                                     m->jmethod_id(),
-                                                     lineno);
-    if (last == NULL) {
-      _frameRootInfo = p;
-    } else {
-      last->set_next(p);
-    }
-    last = p;
-  }
-
-  _num_frames = count;
-  for (RootElementForFrame* p = _frameRootInfo; p != NULL; p = p->next(), count--) {
-    p->set_depth(count);
-  }
-}
-
-RootElementForFrame* RootElementForThread::get_frame(intptr_t* id) {
-  if (_num_frames == 0) {
-    return _empty_java_frame;
-  }
-
-  if (id == NULL) {
-    // set to the top vframe
-    return _frameRootInfo;
-  } else if (curRootFrame == NULL || curRootFrame->frame_id() != id) {
-    // find the one with a matching id
-    curRootFrame = NULL;
-    for (RootElementForFrame* p = _frameRootInfo; p != NULL; p = p->next()) {
-      if (p->frame_id() == id) {
-        curRootFrame = p;
-        return curRootFrame;
-      }
-    }
-  }
-  return curRootFrame;
-}
-
-bool RootElementForThread::is_jni_local(intptr_t* id, oop* obj_p) {
-  RootElementForFrame* fr = get_frame(id);
-
-  assert(fr != NULL, "Java Frame not found");
-  return fr->is_jni_local(obj_p);
-}
-
-void RootElementForThread::add_root_to_frame(jint root_type, oop root, intptr_t* id, oop* obj_p) {
-  RootElementForFrame* fr = get_frame(id);
-
-  assert(fr != NULL, "Java Frame not found");
-
-  if (root_type == JVMPI_GC_ROOT_JNI_LOCAL) {
-    fr->add_jni_local(root, obj_p);
-  } else {
-    fr->add_root(root);
-  }
-}
-
-
-void RootElementForThread::add_root(jint root_type, oop root, intptr_t* id, oop* obj_p) {
-  switch (root_type) {
-    case JVMPI_GC_ROOT_JNI_LOCAL:
-      add_root_to_frame(root_type, root, id, obj_p);
-      break;
-    case JVMPI_GC_ROOT_JAVA_FRAME:
-      add_root_to_frame(root_type, root, id);
-      break;
-    case JVMPI_GC_ROOT_NATIVE_STACK:
-      _native_stack_roots->append(root);
-      break;
-    case JVMPI_GC_ROOT_THREAD_BLOCK:
-      _thread_block_roots->append(root);
-      break;
-    default:
-      ShouldNotReachHere();
-  }
-}
-
-jlong RootElementForThread::root_dump_size() const {
-  jlong size = (_empty_java_frame != NULL ? _empty_java_frame->root_dump_size() : 0) + 
-              (_native_stack_roots->length() * ROOT_NATIVE_STACK_SIZE) +
-              (_thread_block_roots->length() * ROOT_THREAD_BLOCK_SIZE);
-
-  for (RootElementForFrame* p = _frameRootInfo; p != NULL; p = p->next()) {
-    size += p->root_dump_size();
-  }
-
-  return size;
-};
-
-void RootElementForThread::dump_roots(Dump* dump) const {
-  JNIEnv* env_id = _thread->jni_environment();
-
-  if (_num_frames == 0) {
-    _empty_java_frame->dump_roots(dump, env_id);
-  } else {
-    for (RootElementForFrame* p = _frameRootInfo; p != NULL; p = p->next()) {
-      p->dump_roots(dump, env_id);
-    }
-  }  
-  
-  int length, i;
-
-  length = _native_stack_roots->length();
-  for (i = 0; i < length; i++) {
-    dump->dump_type(JVMPI_GC_ROOT_NATIVE_STACK);
-    dump->dump_oop(_native_stack_roots->at(i));
-    dump->dump_thread(env_id);
-  }
-  length = _thread_block_roots->length();
-  for (i = 0; i < length; i++) {
-    dump->dump_type(JVMPI_GC_ROOT_THREAD_BLOCK);
-    dump->dump_oop(_thread_block_roots->at(i));
-    dump->dump_thread(env_id);
-  }
-}
-
-void RootElementForThread::dump_calltrace(CallTraceDump* dump) const {
-  dump->set_calltrace(_thread, _num_frames);
-  for (RootElementForFrame* p = _frameRootInfo; p != NULL; p = p->next()) {
-    dump->set_callframe(p->lineno(), p->method_id());
-  }
-}
-
-// Implementation of HeapDumpInfoCollector
-HeapDumpInfoCollector::HeapDumpInfoCollector(bool collect_gc_roots) {
-  // initialize _threadRootInfo before collecting roots
-  RootElementForThread* q = NULL;
-  _num_threads = 0;
-  for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
-    RootElementForThread* p = new RootElementForThread(thread, collect_gc_roots);
-    if (q == NULL) {
-      _threadRootInfo = p;
-    } else {
-      q->set_next(p);
-    }
-    q = p;
-    _num_threads++;
-  }
-
-  if (collect_gc_roots) {
-    _jni_global_roots = new (ResourceObj::C_HEAP) GrowableArray<oop*>(INIT_ROOTS_ARRAY_SIZE, true);
-    _sticky_class_roots = new (ResourceObj::C_HEAP) GrowableArray<oop>(INIT_ROOTS_ARRAY_SIZE, true);
-    _monitor_used_roots = new (ResourceObj::C_HEAP) GrowableArray<oop>(INIT_ROOTS_ARRAY_SIZE, true);
-    _unknown_roots = new (ResourceObj::C_HEAP) GrowableArray<oop>(INIT_ROOTS_ARRAY_SIZE, true);
-    curRootThread = NULL;
-    collect_roots(); 
-  }
-  is_collect_roots = collect_gc_roots;
-}
-
-HeapDumpInfoCollector::~HeapDumpInfoCollector() {
-  RootElementForThread* p = _threadRootInfo;
-  while (p != NULL) {
-    RootElementForThread *q = p;
-    p = p->next();
-    delete(q);
-  }
-    
-  if (is_collect_roots) {
-    delete _jni_global_roots;
-    delete _sticky_class_roots;
-    delete _monitor_used_roots;
-    delete _unknown_roots;
-  }
-}
-
-// Collect roots for heap dump
-// Note: the current implemenation of collect_roots() requires explicit knowledge
-// about GC strong roots as well as explicit knowledge about frames.  This function
-// may need to be modified if future modification to the VM internal structures is
-// made.  Watch for future modification to oops_do() methods.
-//
-// Another way to implement it is to modify OopClosure class to add new methods
-// (nop by default) for passing additional profiling information. In addition,
-// modify oops_do() method in various classes to call those OopClosure new
-// methods to pass the root type information.  However, it is not advised to 
-// modify OopClosure to affect its simplicity and its semantics. So we chose
-// the current implemenation.
-//
-void HeapDumpInfoCollector::collect_roots() {
-  CollectRootOopsClosure blk(this);
-
-  // Traverse all system classes
-  blk.begin_iterate(CollectRootOopsClosure::_system_class);
-  SystemDictionary::always_strong_oops_do(&blk);
-  blk.end_iterate(CollectRootOopsClosure::_system_class);
-
-  // Traverse all JNI Global references
-  blk.set_thread(NULL);
-  blk.begin_iterate(CollectRootOopsClosure::_jni_handle);
-  JNIHandles::oops_do(&blk);   // Global (strong) JNI handles
-  blk.end_iterate(CollectRootOopsClosure::_jni_handle);
-
-  // Traverse all monitor objects
-  blk.begin_iterate(CollectRootOopsClosure::_monitor_used);
-  ObjectSynchronizer::oops_do(&blk);
-  blk.end_iterate(CollectRootOopsClosure::_monitor_used);
-
-  // Traverse JNI locals and frames for all Java threads 
-  RootElementForFrame *prev_reff = NULL;
-  for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {  
-    blk.set_thread(thread);
-    set_curRootThread(thread);
-
-    // get all JNI local references for the top frame
-    blk.begin_iterate(CollectRootOopsClosure::_jni_handle);
-    thread->active_handles()->oops_do(&blk);
-    blk.end_iterate(CollectRootOopsClosure::_jni_handle);
-
-    // Traverse the execution stack    
-    blk.begin_iterate(CollectRootOopsClosure::_stack_frame);
-    if (thread->has_last_Java_frame()) {
-      for(StackFrameStream fst(thread); !fst.is_done(); fst.next()) {
-        frame* fr = fst.current();
-
-        // skip the first entry frame
-        if (fr->is_first_frame()) continue;
-
-        blk.set_frame_type(fr->is_native_frame(), fr->is_entry_frame());
-        if (fr->is_entry_frame()) {
-          // An entry frame is considered part of the previous Java
-          // frame on the stack. Use the id from the previous frame
-          // that was found on the RootElementForFrame list.
-          assert(prev_reff != NULL, "must have previous frame");
-          blk.set_frame_id(prev_reff->frame_id());
-
-          // traverse the JNI local refs stored in JavaCallWrapper for an entry frame
-          blk.begin_iterate(CollectRootOopsClosure::_jni_handle);
-          fr->entry_frame_call_wrapper()->handles()->oops_do(&blk);
-          blk.end_iterate(CollectRootOopsClosure::_jni_handle);
-
-        } else {
-          // remember id of the current frame for frame information in the oops traversal.
-          blk.set_frame_id(fr->id());
-        }
-        fr->oops_do(&blk, fst.register_map());
-
-        // If the current frame is found on the RootElementForFrame
-        // list, then save it for a possible "entry frame" later.
-        RootElementForFrame *reff = curRootThread->get_frame(fr->id());
-        if (reff != NULL) {
-          prev_reff = reff;
-        }
-      }
-    }
-    blk.end_iterate(CollectRootOopsClosure::_stack_frame);
-  }
-
-  // sort and remove duplicates
-  // no need to sort _jni_global_roots because all JNI global references are
-  // traversed only once.
-  for (RootElementForThread* p = _threadRootInfo; p != NULL; p = p->next()) {
-    p->sort_roots();
-  }
-  sort_roots(_sticky_class_roots);
-  sort_roots(_monitor_used_roots);
-  sort_roots(_unknown_roots);
-}
-
-static int cmp(oop* x, oop* y) { return (oopDesc*)*x - (oopDesc*)*y; }
-void HeapDumpInfoCollector::sort_roots(GrowableArray<oop>* roots) {
-  if (roots == NULL) return;
-
-  // sort roots
-  roots->sort(cmp);
-
-  // remove duplicates by compacting array
-  const int len = roots->length();
-  oop obj = NULL; // we don't need NULL roots
-  int j = 0;
-  for (int i = 0; i < len; i++) {
-    assert(i >= j, "algorithmic error");
-    if (roots->at(i) != obj) {
-      obj = roots->at(i);
-      roots->at_put(j++, obj);
-    }
-  }
-  roots->trunc_to(j);
-  assert(roots->length() == j, "just checking");
-}
-
-void HeapDumpInfoCollector::set_curRootThread(JavaThread *thread) {
-  if (curRootThread == NULL || curRootThread->thread() != thread) {
-    curRootThread = NULL;
-    for (RootElementForThread* p = _threadRootInfo; p != NULL; p = p->next()) {
-      if (p->thread() == thread) {
-        curRootThread = p;
-        break;
-      }
-    }
-  }
-  assert(curRootThread != NULL, "Thread not found");
-}
-
-bool HeapDumpInfoCollector::is_jni_local(JavaThread* thread, intptr_t* sp, oop* obj_p) {
-  set_curRootThread(thread);
-  return curRootThread->is_jni_local(sp, obj_p);
-}
-
-jlong HeapDumpInfoCollector::root_dump_size() const {
-  jlong size = (_jni_global_roots->length() * ROOT_JNI_GLOBAL_SIZE) +
-              (_sticky_class_roots->length() * ROOT_STICKY_CLASS_SIZE) +
-              (_monitor_used_roots->length() * ROOT_MONITOR_USED_SIZE) +
-              (_unknown_roots->length() * ROOT_UNKNOWN_SIZE);
-
-  for (RootElementForThread* p = _threadRootInfo; p != NULL; p = p->next()) {
-    size += p->root_dump_size();
-  }
-  return size;
-}
-
-void HeapDumpInfoCollector::dump_roots(Dump* dump) const {
-  for (RootElementForThread* p = _threadRootInfo; p != NULL; p = p->next()) {
-    p->dump_roots(dump);
-  }
-
-  int length, i;
-
-  length = _jni_global_roots->length();
-  for (i = 0; i < length; i++) {
-    oop* handle = _jni_global_roots->at(i);
-    oop obj = *handle;
-
-    dump->dump_type(JVMPI_GC_ROOT_JNI_GLOBAL);
-    if (obj->is_klass()) {
-      obj = Klass::cast((klassOop)obj)->java_mirror();
-    }
-    dump->dump_oop(obj);
-    dump->dump_voids((void*) handle);
-  }
-  length = _sticky_class_roots->length();
-  for (i = 0; i < length; i++) {
-    dump->dump_type(JVMPI_GC_ROOT_STICKY_CLASS);
-    dump->dump_oop(_sticky_class_roots->at(i));
-  }
-  length = _monitor_used_roots->length();
-  for (i = 0; i < length; i++) {
-    dump->dump_type(JVMPI_GC_ROOT_MONITOR_USED);
-    dump->dump_oop(_monitor_used_roots->at(i));
-  }
-  length = _unknown_roots->length();
-  for (i = 0; i < length; i++) {
-    dump->dump_type(JVMPI_GC_ROOT_UNKNOWN);
-    dump->dump_oop(_unknown_roots->at(i));
-  }
-
-}
-
-void HeapDumpInfoCollector::add_root_to_thread(jint root_type, oop root, JavaThread* thread, intptr_t* sp, oop* obj_p) {
-  set_curRootThread(thread);
-  curRootThread->add_root(root_type, root, sp, obj_p);
-}
-
-void HeapDumpInfoCollector::add_root(jint root_type, oop* root) {
-  assert(root_type == JVMPI_GC_ROOT_JNI_GLOBAL, "Must be JNI globals");
-
-  bool is_root = true;
-  int length = (skipped_globalrefs != NULL ? skipped_globalrefs->length() : 0);
-  for (int i = 0; i < length; i++) {
-    if (skipped_globalrefs->at(i) == (jobject) root) {
-      is_root = false;
-      break;
-    }
-  }
-
-  if (is_root) {
-    _jni_global_roots->append(root);
-  }
-}
-
-void HeapDumpInfoCollector::add_root(jint root_type, oop root, JavaThread* thread, intptr_t* sp, oop* obj_p) {
-  switch (root_type) {
-    case JVMPI_GC_ROOT_UNKNOWN:
-      _unknown_roots->append(root);
-      break;
-    case JVMPI_GC_ROOT_JNI_LOCAL:
-      add_root_to_thread(root_type, root, thread, sp, obj_p);
-      break;
-    case JVMPI_GC_ROOT_JAVA_FRAME:
-    case JVMPI_GC_ROOT_NATIVE_STACK:
-      add_root_to_thread(root_type, root, thread, sp);
-      break;
-    case JVMPI_GC_ROOT_STICKY_CLASS:
-      _sticky_class_roots->append(root);
-      break;
-    case JVMPI_GC_ROOT_THREAD_BLOCK:
-      add_root_to_thread(root_type, root, thread, sp);
-      break;
-    case JVMPI_GC_ROOT_MONITOR_USED:
-      _monitor_used_roots->append(root);
-      break;
-    default:
-      ShouldNotReachHere();
-  }
-}
-
-void HeapDumpInfoCollector::dump_calltraces(CallTraceDump* dump) const {
-  dump->set_num_traces(_num_threads);
-  for (RootElementForThread* p = _threadRootInfo; p != NULL; p = p->next()) {
-    p->dump_calltrace(dump);
-  }
-}
-
-void jvmpi::post_object_dump_event(oop obj, int flag) {
-  No_GC_Verifier nogc;
-  Dump dump;
-  // 1st dump to measure dump size
-  { ObjectDumper od(&dump, JVMPI_DUMP_LEVEL_2, obj); }
-  // 2nd dump to actually write dump
-  dump.enable_write(dump.size());
-  { ObjectDumper od(&dump, JVMPI_DUMP_LEVEL_2, obj); }
-  // create event
-  JVMPI_Event event;
-  event.event_type             = JVMPI_EVENT_OBJECT_DUMP | flag;
-  event.u.heap_dump.begin      = (char*)dump.begin();
-  event.u.heap_dump.end        = (char*)dump.end  ();
-  event.u.heap_dump.num_traces = 0;
-  event.u.heap_dump.traces     = NULL;
-  // post event
-  post_event_vm_mode(&event, NULL);
-}
-
-class VM_JVMPIPostHeapDump: public VM_Operation {
- private:
-  Dump* _dump;
-  int   _level;
-  int   _flag;
-  CallTraceDump* _traces;
- public:
-  VM_JVMPIPostHeapDump(Dump* dump, int level, int flag, CallTraceDump *traces) { 
-    _dump   = dump; 
-    _level  = level; 
-    _flag   = flag;  
-    _traces = traces;
-  }
-  void doit() {
-    // 1st heap dump to measure dump size for heap objects
-    { JvmpiHeapDumper hd(_dump, _level); }
-    // collect VM roots and dump them
-    if (_level == JVMPI_DUMP_LEVEL_0) {
-      // dump level 0 => no roots
-      HeapDumpInfoCollector rd(false);
-      _dump->enable_write(_dump->size());
-      rd.dump_calltraces(_traces);
-    } else {
-      // dump level 1 & 2 => include roots
-      HeapDumpInfoCollector rd(true);
-      debug_only(int heap_dump_size = _dump->size());
-      debug_only(int gc_root_dump_size = rd.root_dump_size());
-
-      _dump->enable_write((int) rd.root_dump_size() + _dump->size());
-      rd.dump_roots(_dump);
-      rd.dump_calltraces(_traces);
-      assert((int) rd.root_dump_size() == _dump->size(), "dump size inconsistent");
-    }
-    // 2nd heap dump to actually write heap objects
-    { JvmpiHeapDumper hd(_dump, _level); }
-
-    // Disable GC to prevent GC from happening before the agent 
-    // finishes processing the heap dump.
-
-    GC_locker::lock();
-  }
-  const char* name() const { return "post JVMPI heap dump"; }
-};
-
-
-void jvmpi::post_heap_dump_event_in_safepoint(int level, int flag) {
-  Dump dump;
-  CallTraceDump traces;
-
-  {
-    // We must acquire the Heap_lock before collecting heap dump 
-    MutexLocker ml(Heap_lock);
-
-    // We count and collect the heap information at a safepoint
-    VM_JVMPIPostHeapDump op(&dump, level, flag, &traces);
-    VMThread::execute(&op);
-  }
-
-  // Create and post the event in the JavaThread
-  // We don't put this in a doit_epilogue to avoid exposing the Dump class
-  //  assert(Thread::current()->is_Java_thread(), "must be in JavaThread");
-  JVMPI_Event event;
-  event.event_type             = JVMPI_EVENT_HEAP_DUMP | flag;
-  event.u.heap_dump.dump_level = level;
-  event.u.heap_dump.begin      = (char*)dump.begin();
-  event.u.heap_dump.end        = (char*)dump.end  ();
-  event.u.heap_dump.num_traces = traces.get_num_traces();
-  event.u.heap_dump.traces     = traces.get_calltraces();
-  // post event
-  post_event_vm_mode(&event, NULL);
-
-  // Enable GC
-  GC_locker::unlock();
-}
-
-
-class VM_JVMPIPostMonitorDump: public VM_Operation {
- private:
-  Dump* _dump;
-  int   _flag;
- public:
-  VM_JVMPIPostMonitorDump(Dump* dump, int flag) { _dump = dump; _flag = flag; }
-  void doit() {
-    // 1st dump to measure dump size
-    { JavaMonitorDumper md(_dump); 
-      RawMonitorDumper rmd(_dump);
-    }
-    // 2nd dump to actually write dump
-    _dump->enable_write(_dump->size());
-    { JavaMonitorDumper md(_dump); 
-      RawMonitorDumper rmd(_dump);
-    }
-  }
-  const char* name() const { return "post JVMPI monitor dump"; }
-};
-
-
-void jvmpi::post_monitor_dump_event_in_safepoint(int flag) {
-  Dump dump;
-  // We count and collect the monitor information at a safepoint
-  VM_JVMPIPostMonitorDump op(&dump, flag);
-  VMThread::execute(&op);
-  // Create and post the event in the JavaThread
-  // We don't put this in a doit_epilogue to avoid exposing the Dump class
-//  assert(Thread::current()->is_Java_thread(), "must be in JavaThread");
-  JVMPI_Event event;
-  event.event_type = JVMPI_EVENT_MONITOR_DUMP | flag;
-  event.u.monitor_dump.begin          = (char*)dump.begin();
-  event.u.monitor_dump.end            = (char*)dump.end  ();
-  event.u.monitor_dump.num_traces     = 0;
-  event.u.monitor_dump.threads_status = 0;
-  // post event
-  post_event_vm_mode(&event, NULL);
-}
-
-
-bool should_invalidate_nmethods(jint event_type) {
-  switch (event_type) {
-    case JVMPI_EVENT_METHOD_ENTRY : // fall through
-    case JVMPI_EVENT_METHOD_ENTRY2: // fall through
-    case JVMPI_EVENT_METHOD_EXIT  : return true;
-  }
-  return false;
-}
-
-
-void invalidate_nmethods() {
-  // need do deoptimize all frames; for the moment we just make all methods
-  // non-entrant
-}
-
-
-bool needs_slow_allocation(jint event_type) {
-  switch(event_type) {
-    case JVMPI_EVENT_OBJECT_ALLOC       : // fall through
-    case JVMPI_EVENT_OBJECT_MOVE        : // fall through
-    case JVMPI_EVENT_OBJECT_FREE        : // fall through
-    case JVMPI_EVENT_ARENA_NEW          : // fall through
-    case JVMPI_EVENT_DELETE_ARENA       : // fall through
-    case JVMPI_EVENT_JNI_GLOBALREF_ALLOC: // fall through
-    case JVMPI_EVENT_JNI_GLOBALREF_FREE : return true;
-  }
-  return false;
-}
-
-void jvmpi::reset_jvmpi_allocation() {
-  bool use_jvmpi_allocation = (is_event_enabled(JVMPI_EVENT_OBJECT_ALLOC) ||
-                               is_event_enabled(JVMPI_EVENT_OBJECT_MOVE)  ||
-                               is_event_enabled(JVMPI_EVENT_OBJECT_FREE)  ||
-                               is_event_enabled(JVMPI_EVENT_ARENA_NEW)    ||
-                               is_event_enabled(JVMPI_EVENT_DELETE_ARENA) ||
-                               is_event_enabled(JVMPI_EVENT_JNI_GLOBALREF_ALLOC) ||
-                               is_event_enabled(JVMPI_EVENT_JNI_GLOBALREF_FREE));
-
-  if (use_jvmpi_allocation && !slow_allocation) {
-    // Enable slow allocation
-
-    slow_allocation = true;
-    Universe::set_jvmpi_alloc_event_enabled(Universe::_jvmpi_enabled);
-
-    // Note:  I think disabling GC-events should be done only 
-    // during startup time.  When the agent is ready to handle
-    // GC-events, we should report it.  As this piece of code
-    // has been there for a while, I just leave it as it is but 
-    // we should look into it in jvmpi 2.0.
-
-    // it is too early to report GC-events
-    bool old_gc_start = is_event_enabled(JVMPI_EVENT_GC_START);
-    bool old_gc_finish = is_event_enabled(JVMPI_EVENT_GC_FINISH);
-    disable_event(JVMPI_EVENT_GC_START);
-    disable_event(JVMPI_EVENT_GC_FINISH);
-
-    // ensure that the heap is initialized the way we want it to be;
-    // in particular, the new generation must be filled so we always
-    // perform slow allocations
-    Universe::heap()->collect(GCCause::_java_lang_system_gc);
-
-    if (old_gc_start) enable_event(JVMPI_EVENT_GC_START);
-    if (old_gc_finish) enable_event(JVMPI_EVENT_GC_FINISH);
-
-  } else if (!use_jvmpi_allocation && slow_allocation) {
-    // Disable slow allocation
-
-    slow_allocation = false;
-
-    // Do a GC to enable the heap for fast allocation since the new generation
-    // was filled up for slow allocation.  
-    // Note that fast allocation is not immediately turned on until a GC 
-    // is completed.  If GC is disabled (due to some other jvmpi events),  
-    // this GC is cancelled and the new generation is still filled up.
-
-    Universe::set_jvmpi_alloc_event_enabled(Universe::_jvmpi_disabling);
-    Universe::heap()->collect(GCCause::_java_lang_system_gc);
-  }
-}
-
-// ----------------------------------------------
-// Functions exported through the JVMPI interface
-// ----------------------------------------------
-
-JVMPI_ENTRY(jint, jvmpi::enable_event(jint event_type, void *arg))
-  if (!is_event_supported(event_type)) {
-    return JVMPI_NOT_AVAILABLE;
-  }
-
-  enable_event(event_type);
-  if (should_invalidate_nmethods(event_type)) {
-    invalidate_nmethods();
-  }
-  if (event_type == JVMPI_EVENT_OBJECT_MOVE) {
-    Universe::set_jvmpi_move_event_enabled(true);
-  } else if (event_type == JVMPI_EVENT_METHOD_ENTRY || event_type == JVMPI_EVENT_METHOD_ENTRY2) {
-  // Missing disabling of inlining
-    // Inline flag is a constant in product mode
-    // Inline = false;
-  } else if (event_type == JVMPI_EVENT_JNI_GLOBALREF_ALLOC) {
-    Universe::set_jvmpi_jni_global_alloc_event_enabled(true);
-  } else if (event_type == JVMPI_EVENT_JNI_GLOBALREF_FREE) {
-    Universe::set_jvmpi_jni_global_free_event_enabled(true);
-  } else if (event_type == JVMPI_EVENT_JNI_WEAK_GLOBALREF_ALLOC) {
-    Universe::set_jvmpi_jni_weak_global_alloc_event_enabled(true);
-  } else if (event_type == JVMPI_EVENT_JNI_WEAK_GLOBALREF_FREE) {
-    Universe::set_jvmpi_jni_weak_global_free_event_enabled(true);
-  }
-
-  // enable slow allocation, if necessary 
-  if (!slow_allocation && needs_slow_allocation(event_type)) {
-    reset_jvmpi_allocation();
-  }
-  return JVMPI_SUCCESS;
-JVMPI_END
-
-
-JVMPI_ENTRY(jint, jvmpi::disable_event(jint event_type, void *arg))
-  if (!is_event_supported(event_type)) {
-    return JVMPI_NOT_AVAILABLE;
-  }
-
-  if (should_invalidate_nmethods(event_type)) {
-    invalidate_nmethods();
-  }
-  disable_event(event_type);
-
-  if (event_type == JVMPI_EVENT_OBJECT_MOVE) {
-    Universe::set_jvmpi_move_event_enabled(false);
-  } else if (event_type == JVMPI_EVENT_JNI_GLOBALREF_ALLOC) {
-    Universe::set_jvmpi_jni_global_alloc_event_enabled(false);
-  } else if (event_type == JVMPI_EVENT_JNI_GLOBALREF_FREE) {
-    Universe::set_jvmpi_jni_global_free_event_enabled(false);
-  } else if (event_type == JVMPI_EVENT_JNI_WEAK_GLOBALREF_ALLOC) {
-    Universe::set_jvmpi_jni_weak_global_alloc_event_enabled(false);
-  } else if (event_type == JVMPI_EVENT_JNI_WEAK_GLOBALREF_FREE) {
-    Universe::set_jvmpi_jni_weak_global_free_event_enabled(false);
-  }
-
-  // disable slow allocation and use fast allocation, if necessary 
-  if (slow_allocation && needs_slow_allocation(event_type)) {
-    reset_jvmpi_allocation();
-  }
-  return JVMPI_SUCCESS;
-JVMPI_END
-
-
-JVMPI_ENTRY(void, jvmpi::disable_gc())
-  GC_locker::lock();
-JVMPI_END
-
-
-JVMPI_ENTRY(void, jvmpi::enable_gc())
-  GC_locker::unlock();
-JVMPI_END
-
-inline bool is_valid_method(methodOop method) {
-  if (method == NULL || 
-      !method->is_perm() || 
-      oop(method)->klass() != Universe::methodKlassObj() ||
-      !method->is_method()) {
-    return false;   // doesn't look good
-  }
-  return true;      // hopefully this is a method indeed
-}
-
-// Return the top-most frame that can be used for vframeStream
-// This frame will be skipped by vframeStream for stack walking.
-frame is_walkable_frame(JavaThread* thread, frame* fr, methodOop* method_p, int* bci_p) {
-  methodOop method = NULL;
-  int bci = -1;
-  frame walkframe;
-
-  if (fr->is_interpreted_frame()) {
-    // top frame is an interpreted frame 
-    // check if it is walkable (i.e. valid methodOop and valid bci)
-    if (fr->is_interpreted_frame_valid()) {
-      if (fr->fp() != NULL) {
-        // access address in order not to trigger asserts that
-        // are built in interpreter_frame_method function
-        method = *fr->interpreter_frame_method_addr();
-        if (is_valid_method(method)) {
-          intptr_t bcx = fr->interpreter_frame_bcx();
-          bci = method->validate_bci_from_bcx(bcx);
-          walkframe = *fr;
-        } else {
-          method = NULL;
-        }
-      }
-    }
-
-  } else {
-    method = NULL;
-    walkframe = *fr;
-    // Determine if this top frame is executing a Java method.
-    if (CodeCache::contains(fr->pc())) {
-      // top frame is a compiled frame or stub routines
-      CodeBlob* cb = CodeCache::find_blob(fr->pc());
-      if (cb->is_nmethod()) {
-        method = ((nmethod *)cb)->method();
-      }
-    }
-  }
-
-  if (method_p != NULL) {
-    *method_p = method;
-  }
-  if(bci_p != NULL) {
-    *bci_p = bci;
-  }
-  return walkframe;
-}
-
-// The thread we are examining must be suspended
-void fill_call_trace_at_safepoint(JavaThread* thd, JVMPI_CallTrace* trace, int depth) {
-  vframeStream st(thd); 
-
-  int count = 0;
-  // collect the rest
-  for (;!st.at_end() && count < depth; st.next(), count++) {    
-    methodOop m = st.method(); // The method is not stored GC safe
-    int bci     = st.bci();
-    int lineno  = m->is_native() ? (-3) : m->line_number_from_bci(bci);
-    trace->frames[count].method_id = m->jmethod_id();
-    trace->frames[count].lineno = lineno;
-  }
-
-  trace->num_frames = count;
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: fill_call_trace_at_safepoint return, thread: " INTPTR_FORMAT ", trace->num_frames = %d\n",
-		  thd, trace->num_frames);
-  }
-  return; 
-}
-
-void fill_call_trace_given_top(JavaThread* thd, JVMPI_CallTrace* trace, int depth, frame top_frame) {
-  frame walkframe;
-  methodOop method;
-  int bci;
-  int count;
-
-  count = 0;
-  assert(trace->frames != NULL, "trace->frames must be non-NULL");
-
-  walkframe = is_walkable_frame(thd, &top_frame, &method, &bci);
-  if (method != NULL) {
-    count++;
-    trace->num_frames = count;
-    trace->frames[0].method_id = method->jmethod_id();
-    if (!method->is_native()) {
-      trace->frames[0].lineno = method->line_number_from_bci(bci);
-    } else {
-      trace->frames[0].lineno = -3;
-    }
-  } 
-
-  // return if no walkable frame is found
-  if (walkframe.sp() == NULL) {
-    return;
-  }
-
-  // check has_last_Java_frame() after looking at the top frame
-  // which may be an interpreted Java frame.
-  if (!thd->has_last_Java_frame() && count == 0) {
-    trace->num_frames = 0;
-    return;
-  }
-
-  vframeStream st(thd, walkframe);
-  for (; !st.at_end() && count < depth; st.next(), count++) {
-    bci = st.bci();
-    method = st.method(); // The method is not stored GC safe
-
-    trace->frames[count].method_id = method->jmethod_id();
-    if (!method->is_native()) {
-      trace->frames[count].lineno = method->line_number_from_bci(bci);
-    } else {
-      trace->frames[count].lineno = -3;
-    }
-  }
-  trace->num_frames = count;
-  return;
-}
-
-JVMPI_ENTRY(void, jvmpi::get_call_trace(JVMPI_CallTrace *trace, jint depth))
-  JavaThread* thd;
-  ResourceMark rm;
-
-  trace->num_frames = 0;
-  if (!((trace->env_id) && (thd = JavaThread::thread_from_jni_environment(trace->env_id))))  {
-    return;
-  }
-
-  // ensure thread suspension completed for other threads
-  // Note: need to ensure hprof agent actually suspends threads
-  // May need to temporarily suspend thread for the caller
-  uint32_t debug_bits = 0;
-  if (thd != Thread::current()) {
-    if (!thd->wait_for_ext_suspend_completion(SuspendRetryCount,
-        SuspendRetryDelay, &debug_bits)) {
-      return;
-    }
-  }
-
-  switch (thd->thread_state()) {
-    // The thread is either in the VM or in native code so use information
-    // from the last Java frame.
-    case _thread_blocked:
-    case _thread_in_native:
-    case _thread_in_vm:    
-      if (thd->has_last_Java_frame()) {
-        fill_call_trace_at_safepoint(thd, trace, depth);
-      }
-      break;
-    case _thread_in_Java:  
-      { frame fr;
-        trace->num_frames = 0;
-        // profile_last_Java_frame sets up the frame 'fr' and returns true;
-        if (thd->profile_last_Java_frame(&fr)) {
-          fill_call_trace_given_top(thd, trace, depth, fr);
-        }
-      }
-      break;
-    default: break;
-  }
-JVMPI_END
-
-
-JVMPI_ENTRY(jlong, jvmpi::get_current_thread_cpu_time())
-  return os::current_thread_cpu_time();
-JVMPI_END
-
-
-JVMPI_RAW_ENTRY(JVMPI_RawMonitor, jvmpi::raw_monitor_create(char *lock_name))
-  RawMonitor * monitor = new RawMonitor(lock_name, PROF_RM_MAGIC);
-  return (JVMPI_RawMonitor)monitor;
-JVMPI_RAW_END
-
-
-JVMPI_RAW_ENTRY(void, jvmpi::raw_monitor_enter(JVMPI_RawMonitor lock_id))
-  RawMonitor *monitor = (RawMonitor *)lock_id;
-  if (!(PROF_RM_CHECK(monitor))) {
-      return;
-  }
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: raw_monitor_enter for thread id " INTPTR_FORMAT " lock_id = " INTPTR_FORMAT " ", THREAD, lock_id);
-  }
-  // JVMPI can't do proper transitions on RAW_ENTRY
-  // Because VM thread posting events can deadlock. When
-  // vmthread posting is fixed enable this code
-  if (THREAD && THREAD->is_Java_thread()) {
-#ifdef PROPER_TRANSITIONS
-    ThreadInVMfromUnknown __tiv;
-    {
-      ThreadBlockInVM __tbivm((JavaThread*)THREAD);
-      monitor->raw_enter(THREAD, true);
-    }
-#else
-
-    /* Transition to thread_blocked without entering vm state          */
-    /* This is really evil. Normally you can't undo _thread_blocked    */
-    /* transitions like this because it would cause us to miss a       */
-    /* safepoint but since the thread was already in _thread_in_native */
-    /* the thread is not leaving a safepoint safe state and it will    */
-    /* block when it tries to return from native. We can't safepoint   */
-    /* block in here because we could deadlock the vmthread. Blech.    */
-
-    JavaThread* jt = (JavaThread*) THREAD;
-    JavaThreadState state = jt->thread_state();
-    assert(state == _thread_in_native, "Must be _thread_in_native");
-    // frame should already be walkable since we are in native
-    assert(!jt->has_last_Java_frame() || jt->frame_anchor()->walkable(), "Must be walkable");
-    jt->set_thread_state(_thread_blocked);
-
-    monitor->raw_enter(THREAD, true);
-
-    // restore state, still at a safepoint safe state
-    jt->set_thread_state(state);
-#endif /* PROPER_TRANSITIONS */
-  } else {
-    monitor->raw_enter(THREAD, true);
-  }
-
-JVMPI_RAW_END
-
-
-JVMPI_RAW_ENTRY(void, jvmpi::raw_monitor_exit(JVMPI_RawMonitor lock_id))
-  RawMonitor *monitor = (RawMonitor *)lock_id;
-  if (!(PROF_RM_CHECK(monitor))) {
-      return;
-  }
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: raw_monitor_exit for thread id " INTPTR_FORMAT " lock_id = " INTPTR_FORMAT " ", THREAD, lock_id);
-  }
-  // JVMPI can't do proper transitions on RAW_ENTRY
-  // Because VM thread posting events can deadlock. When
-  // vmthread posting is fixed enable this code
-#ifdef PROPER_TRANSITIONS
-  if (THREAD && THREAD->is_Java_thread()) {
-    ThreadInVMfromUnknown __tiv;
-    monitor->raw_exit(THREAD, true);
-  } else {
-    monitor->raw_exit(THREAD, true);
-  }
-#else
-  // Doesn't block so we don't need to do anything special here
-  monitor->raw_exit(THREAD, true);
-#endif /* PROPER_TRANSITIONS */
-
-JVMPI_RAW_END
-
-
-JVMPI_RAW_ENTRY(void, jvmpi::raw_monitor_destroy(JVMPI_RawMonitor lock_id))
-  RawMonitor *monitor = (RawMonitor *)lock_id;
-  if (!(PROF_RM_CHECK(monitor))) {
-      return;
-  }
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: raw_monitor_destroy for thread id " INTPTR_FORMAT " lock_id = " INTPTR_FORMAT " ", THREAD, lock_id);
-  }
-  // JVMPI can't do proper transitions on RAW_ENTRY
-  // Because VM thread posting events can deadlock. When
-  // vmthread posting is fixed enable this code
-#ifdef PROPER_TRANSITIONS
-  if (THREAD && THREAD->is_Java_thread()) {
-    ThreadInVMfromUnknown __tiv;
-    monitor->raw_exit(THREAD, true);
-    monitor->raw_destroy();
-  } else {
-    monitor->raw_exit(THREAD, true);
-    monitor->raw_destroy();
-  }
-#else
-  // Doesn't block so we don't need to do anything special here
-  monitor->raw_exit(THREAD, true);
-  monitor->raw_destroy();
-#endif /* PROPER_TRANSITIONS */
-
-JVMPI_RAW_END
-
-
-JVMPI_RAW_ENTRY(void, jvmpi::raw_monitor_wait(JVMPI_RawMonitor lock_id, jlong ms))
-  RawMonitor *monitor = (RawMonitor *)lock_id;
-  if (!(PROF_RM_CHECK(monitor))) {
-      return;
-  }
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: raw_monitor_wait for thread id " INTPTR_FORMAT " lock_id = " INTPTR_FORMAT " ", THREAD, lock_id);
-  }
-  // JVMPI can't do proper transitions on RAW_ENTRY
-  // Because VM thread posting events can deadlock. When
-  // vmthread posting is fixed enable this code
-  if (THREAD && THREAD->is_Java_thread()) {
-#ifdef PROPER_TRANSITIONS
-    ThreadInVMfromUnknown __tiv;
-    {
-      ThreadBlockInVM __tbivm((JavaThread*) THREAD);
-      monitor->raw_wait(ms, true, THREAD);
-    }
-#else
-    /* Transition to thread_blocked without entering vm state          */
-    /* This is really evil. Normally you can't undo _thread_blocked    */
-    /* transitions like this because it would cause us to miss a       */
-    /* safepoint but since the thread was already in _thread_in_native */
-    /* the thread is not leaving a safepoint safe state and it will    */
-    /* block when it tries to return from native. We can't safepoint   */
-    /* block in here because we could deadlock the vmthread. Blech.    */
-
-    JavaThread* jt = (JavaThread*) THREAD;
-    JavaThreadState state = jt->thread_state();
-    assert(state == _thread_in_native, "Must be _thread_in_native");
-    // frame should already be walkable since we are in native
-    assert(!jt->has_last_Java_frame() || jt->frame_anchor()->walkable(), "Must be walkable");
-    jt->set_thread_state(_thread_blocked);
-
-    monitor->raw_wait(ms, true, THREAD);
-    // restore state, still at a safepoint safe state
-    jt->set_thread_state(state);
-
-#endif /* PROPER_TRANSITIONS */
-  } else {
-    monitor->raw_wait(ms, true, THREAD);
-  }
-
-JVMPI_RAW_END
-
-
-JVMPI_RAW_ENTRY(void, jvmpi::raw_monitor_notify_all(JVMPI_RawMonitor lock_id))
-  RawMonitor *monitor = (RawMonitor *)lock_id;
-  if (!(PROF_RM_CHECK(monitor))) {
-      return;
-  }
-  if (TraceJVMPI) {
-    tty->cr();
-    tty->print_cr("JVMPI: raw_monitor_notify_all for thread id " INTPTR_FORMAT " lock_id = " INTPTR_FORMAT " ", THREAD, lock_id);
-  }
-  // JVMPI can't do proper transitions on RAW_ENTRY
-  // Because VM thread posting events can deadlock. When
-  // vmthread posting is fixed enable this code
-#ifdef PROPER_TRANSITIONS
-  if (THREAD && THREAD->is_Java_thread()) {
-    ThreadInVMfromUnknown __tiv;
-    monitor->raw_notifyAll(THREAD);
-  } else {
-    monitor->raw_notifyAll(THREAD);
-  }
-#else
-  // Doesn't block so we don't need to do anything special here
-  monitor->raw_notifyAll(THREAD);
-#endif /* PROPER_TRANSITIONS */
-
-JVMPI_RAW_END
-
-// Use shared java_suspend.
-JVMPI_ENTRY(void, jvmpi::suspend_thread(JNIEnv *env))
-  if (env == NULL) return;
-  JavaThread *java_thread = JavaThread::thread_from_jni_environment(env);
-  if (java_thread == NULL) return;
-  // the thread has not yet run or has exited (not on threads list)
-  if (java_thread->threadObj() == NULL) return;
-  if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) return;
-
-  // don't allow hidden thread suspend request.
-  if (java_thread->is_hidden_from_external_view()) {
-    return;
-  }
-
-  // Don't allow self-suspension, hprof agent expects to keep
-  // running so as to process resumes of all threads.
-  if (Thread::current() == (Thread *)java_thread) {
-    return;
-  }
-
-  {
-    MutexLockerEx ml(java_thread->SR_lock(), Mutex::_no_safepoint_check_flag);
-    if (java_thread->is_external_suspend()) {
-      // Don't allow nested external suspend requests. We can't return
-      // an error from this interface so just ignore the problem.
-      return;
-    }
-    if (java_thread->is_exiting()) { // thread is in the process of exiting
-      return;
-    }
-    java_thread->set_external_suspend();
-  }
-
-  //
-  // If a thread in state _thread_in_native is not immediately
-  // suspended, then a blocked RawMonitorEnter() call may enter
-  // the RawMonitor even if RawMonitorExit() is called after
-  // SuspendThread() returns. java_suspend() will catch threads
-  // in the process of exiting and will ignore them.
-  //
-  java_thread->java_suspend();
-
-  // It would be nice to have the following assertion in all the time,
-  // but it is possible for a racing resume request to have resumed
-  // this thread right after we suspended it. Temporarily enable this
-  // assertion if you are chasing a different kind of bug.
-  //
-  // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
-  //   java_thread->is_being_ext_suspended(), "thread is not suspended");
-JVMPI_END
-
-// Use shared java_suspend.
-JVMPI_ENTRY(void, jvmpi::suspend_thread_list(jint reqCnt, JNIEnv **reqList, jint *results))
-
-  if (reqCnt <= 0 || reqList == NULL || results == NULL) {
-    // parameter problem so bail out
-    return;
-  }
-
-  int needSafepoint = 0;  // > 0 if we need a safepoint
-
-  for (int i = 0; i < reqCnt; i++) {
-    if (reqList[i] == NULL) {
-      results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-      continue;
-    }
-    JavaThread *java_thread = JavaThread::thread_from_jni_environment(reqList[i]);
-    if (java_thread == NULL) {
-      results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-      continue;
-    }
-    // the thread has not yet run or has exited (not on threads list)
-    if (java_thread->threadObj() == NULL) {
-      results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-      continue;
-    }
-    if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
-      results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-      continue;
-    }
-    // don't allow hidden thread suspend request.
-    if (java_thread->is_hidden_from_external_view()) {
-      results[i] = 0;  // indicate successful suspend
-      continue;
-    }
-
-    // Don't allow self-suspension, hprof agent expects to keep
-    // running so as to process resumes of all threads.
-    if (Thread::current() == (Thread *)java_thread) {
-      results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-      continue;
-    }
-
-    {
-      MutexLockerEx ml(java_thread->SR_lock(), Mutex::_no_safepoint_check_flag);
-      if (java_thread->is_external_suspend()) {
-        // Don't allow nested external suspend requests. We can't return
-        // an error from this interface so just ignore the problem.
-        results[i] = 14; // same as JVMDI_ERROR_THREAD_SUSPENDED
-        continue;
-      }
-      if (java_thread->is_exiting()) { // thread is in the process of exiting
-        results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-        continue;
-      }
-      java_thread->set_external_suspend();
-    }
-
-    if (java_thread->thread_state() == _thread_in_native) {
-      // We need to try and suspend native threads here. Threads in
-      // other states will self-suspend on their next transition.
-      // java_suspend() will catch threads in the process of exiting
-      // and will ignore them.
-      java_thread->java_suspend();
-    } else {
-      needSafepoint++;
-    }
-
-    results[i] = 0;  // indicate successful suspend
-  }
-
-  if (needSafepoint > 0) {
-    VM_ForceSafepoint vfs;
-    VMThread::execute(&vfs);
-  }
-JVMPI_END
-
-// Use shared java_resume. Requires owning the Threads lock.
-JVMPI_ENTRY(void, jvmpi::resume_thread(JNIEnv *env))
-  JavaThread *java_thread;
-  if ((env) && (java_thread = JavaThread::thread_from_jni_environment(env))) {
-    MutexLocker ml(Threads_lock);
-
-    // don't allow hidden thread resume request.
-    if (java_thread->is_hidden_from_external_view()) {
-      return;
-    }
-
-    java_thread->java_resume();
-  }
-JVMPI_END
-
-// Use shared java_resume. Requires owning the Threads lock.
-JVMPI_ENTRY(void, jvmpi::resume_thread_list(jint reqCnt, JNIEnv **reqList, jint *results))
-
-  if (reqCnt <= 0 || reqList == NULL || results == NULL) {
-    // parameter problem so bail out
-    return;
-  }
-
-  for (int i = 0; i < reqCnt; i++) {
-    if (reqList[i] == NULL) {
-      results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-      continue;
-    }
-    JavaThread *java_thread = JavaThread::thread_from_jni_environment(reqList[i]);
-    if (java_thread == NULL) {
-      results[i] = 10; // same as JVMDI_ERROR_INVALID_THREAD
-      continue;
-    }
-    // don't allow hidden thread resume request.
-    if (java_thread->is_hidden_from_external_view()) {
-      results[i] = 0;  // indicate successful resume
-      continue;
-    }
-
-    {
-      MutexLocker ml(Threads_lock);
-      java_thread->java_resume();
-    }
-
-    results[i] = 0;  // indicate successful resume
-  }
-JVMPI_END
-
-// 2.0: redesign to match jvmdi. handle errors and more states
-JVMPI_ENTRY(jint, jvmpi::get_thread_status(JNIEnv *env))
-  jint res = JVMPI_THREAD_RUNNABLE;
-  JavaThread *tp;
-  { MutexLocker mu(Threads_lock);
-    if ((env) && (tp = JavaThread::thread_from_jni_environment(env))) {
-      JavaThreadState state;
-      ThreadState t_state;
-      if ((state = tp->thread_state()) && (tp->osthread()) && (t_state = tp->osthread()->get_state())) {
-
-          if (state == _thread_blocked|| state == _thread_blocked_trans) {
-              switch (t_state) {
-                  case CONDVAR_WAIT: 
-                  case OBJECT_WAIT: 
-                      res = JVMPI_THREAD_CONDVAR_WAIT; 
-                      break;
-                  case MONITOR_WAIT: 
-                      res = JVMPI_THREAD_MONITOR_WAIT; 
-                      break;
-                  case SLEEPING:
-                  case ZOMBIE:
-                  case RUNNABLE    : // fall through
-                      res = JVMPI_THREAD_RUNNABLE;
-                      break;
-                  default:
-                      break;
-              }
-           }
-          if (tp->is_being_ext_suspended()) {
-              // internal suspend doesn't count for this flag
-              res = res | JVMPI_THREAD_SUSPENDED;
-          }
-          if (tp->osthread()->interrupted()) {
-              res = res | JVMPI_THREAD_INTERRUPTED;
-          }
-      }
-    }
-  } // release Threads_lock
-  return res;
-JVMPI_END
-
-
-// There is no provision in VM to check that; assume yes
-// Do NOT call thread_is_running - this calls thr_getstate
-// which only works if you have called thr_suspend.
-JVMPI_ENTRY(jboolean, jvmpi::thread_has_run(JNIEnv *env))
-  JavaThread* java_thread;
-  if ((env) && (java_thread = JavaThread::thread_from_jni_environment(env)))  {
-    return JNI_TRUE;
-  } else {
-    return JNI_FALSE;
-  }
-JVMPI_END
-
-
-JVMPI_ENTRY(void, jvmpi::run_gc())
-  Universe::heap()->collect(GCCause::_java_lang_system_gc);
-JVMPI_END
-
-
-JVMPI_ENTRY(void, jvmpi::profiler_exit(jint exit_code))
-  vm_exit(exit_code /*user_exit == true*/); 
-  ShouldNotReachHere();
-JVMPI_END
-
-
-static void jvmpi_daemon_thread_entry(JavaThread* thread, TRAPS) {
-  assert(thread->is_jvmpi_daemon_thread(), "wrong thread");
-  JVMPIDaemonThread* daemon_thread = (JVMPIDaemonThread*)thread;
-
-  // ThreadToNativeFromVM takes care of changing thread_state, so safepoint code knows that
-  // we have left the VM
-  { JavaThread* thread = (JavaThread*) THREAD;
-    ThreadToNativeFromVM ttn(thread);
-    HandleMark hm(thread);
-
-    daemon_thread->function()(NULL);
-  }
-}
-
-
-JVMPI_ENTRY(jint, jvmpi::create_system_thread(char *name, jint priority, JVMPIDaemonFunction f))
-  const int invalid_res = JNI_ERR;
-  klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), true, CHECK_(invalid_res));
-  instanceKlassHandle klass (THREAD, k);
-  instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_(invalid_res));
-  Handle string = java_lang_String::create_from_str(name, CHECK_(invalid_res));    
-
-  // Initialize thread_oop to put it into the system threadGroup    
-  Handle thread_group (THREAD, Universe::system_thread_group());
-  JavaValue result(T_VOID);
-  JavaCalls::call_special(&result, thread_oop, 
-                         klass, 
-                         vmSymbolHandles::object_initializer_name(), 
-                         vmSymbolHandles::threadgroup_string_void_signature(), 
-                         thread_group, 
-                         string, 
-                         CHECK_(invalid_res));  
-  
-  { MutexLocker mu(Threads_lock);
-    JVMPIDaemonThread* daemon_thread = new JVMPIDaemonThread(&jvmpi_daemon_thread_entry, f);
-
-    // At this point it may be possible that no osthread was created for the
-    // JavaThread due to lack of memory.
-    if (daemon_thread == NULL || daemon_thread->osthread() == NULL) {
-      if (daemon_thread) delete daemon_thread;
-      return JNI_ERR;
-    }
-   
-    ThreadPriority thread_priority = NoPriority;
-    switch (priority) {
-      case JVMPI_MINIMUM_PRIORITY: thread_priority = MinPriority ; break;
-      case JVMPI_MAXIMUM_PRIORITY: thread_priority = MaxPriority ; break;
-      case JVMPI_NORMAL_PRIORITY : thread_priority = NormPriority; break;
-      default: ShouldNotReachHere();
-    }
-
-    java_lang_Thread::set_thread(thread_oop(), daemon_thread);      
-    java_lang_Thread::set_priority(thread_oop(), thread_priority);
-    java_lang_Thread::set_daemon(thread_oop());
-
-    daemon_thread->set_threadObj(thread_oop());
-    Threads::add(daemon_thread);  
-    Thread::start(daemon_thread);
-
-  } // Release Threads_lock before calling up to agent code
-  // post_thread_start_event called from "run"
-  
-  return JNI_OK;
-JVMPI_END
-
-
-JVMPI_ENTRY(jint, jvmpi::request_event(jint event_type, void *arg))
-  switch (event_type) {
-    case JVMPI_EVENT_OBJECT_ALLOC:
-      post_object_alloc_event((oop)arg, ((oop)arg)->size() * HeapWordSize,
-			      Universe::heap()->addr_to_arena_id(arg),
-			      JVMPI_REQUESTED_EVENT);
-      return JVMPI_SUCCESS;
-    case JVMPI_EVENT_THREAD_START:
-      post_thread_start_event(java_lang_Thread::thread((oop)arg), JVMPI_REQUESTED_EVENT);
-      return JVMPI_SUCCESS;
-    case JVMPI_EVENT_CLASS_LOAD:
-      post_class_load_event((oop)arg, JVMPI_REQUESTED_EVENT);
-      return JVMPI_SUCCESS;
-    case JVMPI_EVENT_OBJECT_DUMP:
-      post_object_dump_event((oop)arg, JVMPI_REQUESTED_EVENT);
-      return JVMPI_SUCCESS;
-    case JVMPI_EVENT_HEAP_DUMP: {
-      int heap_dump_level;
-
-      if (arg == NULL) {
-        heap_dump_level = JVMPI_DUMP_LEVEL_2;
-      } else {
-        heap_dump_level = ((JVMPI_HeapDumpArg*)arg)->heap_dump_level;
-      }
-
-      post_heap_dump_event_in_safepoint(heap_dump_level, JVMPI_REQUESTED_EVENT);
-      return JVMPI_SUCCESS;
-    }
-    case JVMPI_EVENT_MONITOR_DUMP:
-      post_monitor_dump_event_in_safepoint(JVMPI_REQUESTED_EVENT);
-      return JVMPI_SUCCESS;
-    default:
-      return JVMPI_NOT_AVAILABLE;
-  }
-JVMPI_END
-
-
-// Using JVMPI_RAW_ENTRY() to allow this API to be called from a
-// SIGPROF signal handler. ThreadInVMFromUnknown's use of a
-// HandleMarkCleaner will cleanup unexpected Handles when called
-// from a signal handler.
-JVMPI_RAW_ENTRY(void, jvmpi::set_thread_local_storage(JNIEnv *env, void *ptr))
-  if (env != NULL) {
-    JavaThread* jt = JavaThread::thread_from_jni_environment(env);
-    if (jt != NULL) {
-      jt->set_jvmpi_data(ptr);
-    }
-  }
-JVMPI_END
-
-
-// See set_thread_local_storage comment above.
-JVMPI_RAW_ENTRY(void*, jvmpi::get_thread_local_storage(JNIEnv *env))
-  if (env == NULL) return NULL;
-  JavaThread* jt = JavaThread::thread_from_jni_environment(env);
-  if (jt == NULL) return NULL;
-  return jt->jvmpi_data();
-JVMPI_END
-
-
-JVMPI_ENTRY(jobjectID, jvmpi::get_thread_object(JNIEnv *env))
-  if (env == NULL) return NULL;
-  return (jobjectID) JavaThread::thread_from_jni_environment(env)->threadObj();
-JVMPI_END
-
-
-JVMPI_ENTRY(jobjectID, jvmpi::get_method_class(jmethodID mid))
-  return (jobjectID) Klass::cast(JNIHandles::resolve_jmethod_id(mid)->method_holder())->java_mirror();
-JVMPI_END
-
-
-JVMPI_ENTRY(jobject, jvmpi::jobjectID_2_jobject(jobjectID jid))
-  assert(GC_locker::is_active(), "jobjectID_2_jobject may be called only with disabled GC");
-  Thread* thd = Thread::current();
-  assert(thd->is_Java_thread(), "call to jobjectID_2_jobject can only happen in a Java thread");
-
-  JavaThread* jthread = (JavaThread*)thd;
-
-  JNIEnv* env = jthread->jni_environment();
-  return JNIHandles::make_local(env, (oop)jid);
-JVMPI_END
-
-
-JVMPI_ENTRY(jobjectID, jvmpi::jobject_2_jobjectID(jobject jobj))
-  assert(GC_locker::is_active(), "jobject_2_jobjectID may be called only with disabled GC");
-  return (jobjectID)JNIHandles::resolve(jobj);
-JVMPI_END
-
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/share/vm/prims/jvmpi.h	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,665 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)jvmpi.h	1.26 07/05/05 17:06:35 JVM"
-#endif
-/*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-#ifdef JVMPI_SUPPORT
-#ifndef _JAVASOFT_JVMPI_H_
-#define _JAVASOFT_JVMPI_H_
-
-#include "jni.h"
-
-#define JVMPI_VERSION_1   ((jint)0x10000001)  /* implied 0 for minor version */
-#define JVMPI_VERSION_1_1 ((jint)0x10000002)
-#define JVMPI_VERSION_1_2 ((jint)0x10000003)
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-  typedef void (*jvmpi_void_function_of_void)(void *);
-#ifdef __cplusplus
-}
-#endif
-
-/****************************************************************
- * Profiler interface data structures.
- ****************************************************************/
-/* identifier types. */
-struct _jobjectID;
-typedef struct _jobjectID * jobjectID;       /* type of object ids */ 
-
-/* raw monitors */
-struct _JVMPI_RawMonitor;
-typedef struct _JVMPI_RawMonitor * JVMPI_RawMonitor;
-
-/* call frame */
-typedef struct {
-    jint lineno;                      /* line number in the source file */
-    jmethodID method_id;              /* method executed in this frame */
-} JVMPI_CallFrame;
-
-/* call trace */
-typedef struct {
-    JNIEnv *env_id;                   /* Env where trace was recorded */
-    jint num_frames;                  /* number of frames in this trace */
-    JVMPI_CallFrame *frames;          /* frames */
-} JVMPI_CallTrace;
-
-/* method */
-typedef struct {
-    char *method_name;                /* name of method */
-    char *method_signature;           /* signature of method */
-    jint start_lineno;                /* -1 if native, abstract .. */
-    jint end_lineno;                  /* -1 if native, abstract .. */
-    jmethodID method_id;              /* id assigned to this method */
-} JVMPI_Method;
-
-/* Field */
-typedef struct {
-    char *field_name;                 /* name of field */
-    char *field_signature;            /* signature of field */
-} JVMPI_Field;
-
-/* line number info for a compiled method */
-typedef struct {
-    jint offset;                      /* offset from beginning of method */
-    jint lineno;                      /* lineno from beginning of src file */
-} JVMPI_Lineno;
-
-/* event */
-typedef struct {
-    jint event_type;                  /* event_type */
-    JNIEnv *env_id;                   /* env where this event occured */
-  
-    union {
-        struct {
-	    const char *class_name;   /* class name */
-	    char *source_name;        /* name of source file */
-	    jint num_interfaces;      /* number of interfaces implemented */
-  	    jint num_methods;         /* number of methods in the class */
-	    JVMPI_Method *methods;    /* methods */
-	    jint num_static_fields;   /* number of static fields */
-	    JVMPI_Field *statics;     /* static fields */
-	    jint num_instance_fields; /* number of instance fields */
-	    JVMPI_Field *instances;   /* instance fields */
-	    jobjectID class_id;       /* id of the class object */
-	} class_load;
-
-        struct {
-	    jobjectID class_id;       /* id of the class object */
-	} class_unload;
-
-        struct {
-	    unsigned char *class_data;        /* content of class file */
-	    jint class_data_len;              /* class file length */
-	    unsigned char *new_class_data;    /* instrumented class file */
-	    jint new_class_data_len;          /* new class file length */
-	    void * (*malloc_f)(unsigned int); /* memory allocation function */
-	} class_load_hook;
-
-        struct {
-            jint arena_id;
-	    jobjectID class_id;       /* id of object class */
-	    jint is_array;            /* JVMPI_NORMAL_OBJECT, ... */
-	    jint size;                /* size in number of bytes */
-	    jobjectID obj_id;         /* id assigned to this object */
-        } obj_alloc;
-
-        struct {
-	    jobjectID obj_id;         /* id of the object */
-	} obj_free;
-
-        struct {
-	    jint arena_id;            /* cur arena id */
-	    jobjectID obj_id;         /* cur object id */
-	    jint new_arena_id;        /* new arena id */
-	    jobjectID new_obj_id;     /* new object id */	  
-	} obj_move;
-
-        struct {
-	    jint arena_id;            /* id of arena */
-	    const char *arena_name;   /* name of arena */
-	} new_arena;
-
-        struct {
-	    jint arena_id;            /* id of arena */
-	} delete_arena;
-
-        struct {
-	    char *thread_name;        /* name of thread */
-	    char *group_name;         /* name of group */
-	    char *parent_name;        /* name of parent */
-	    jobjectID thread_id;      /* id of the thread object */
-	    JNIEnv *thread_env_id;
-        } thread_start;
-
-        struct {
-	    int dump_level;           /* level of the heap dump info */
-	    char *begin;              /* where all the root records begin,
-					 please see the heap dump buffer 
-				         format described below */
-	    char *end;                /* where the object records end. */
-	    jint num_traces;          /* number of thread traces, 
-				         0 if dump level = JVMPI_DUMP_LEVEL_0 */
-	    JVMPI_CallTrace *traces;  /* thread traces collected during 
-					 heap dump */ 
-	} heap_dump;
-
-        struct {
-	    jobjectID obj_id;         /* object id */
-	    jobject ref_id;           /* id assigned to the globalref */
-	} jni_globalref_alloc;
-      
-        struct {
-	    jobject ref_id;           /* id of the global ref */
-	} jni_globalref_free;
-
-        struct {
-	    jmethodID method_id;      /* method */
-	} method;
-
-        struct {
-	    jmethodID method_id;      /* id of method */
-	    jobjectID obj_id;         /* id of target object */
-	} method_entry2;
-
-        struct {
-	    jmethodID method_id;        /* id of compiled method */
-	    void *code_addr;            /* code start addr. in memory */
-	    jint code_size;             /* code size */
-	    jint lineno_table_size;     /* size of lineno table */
-	    JVMPI_Lineno *lineno_table; /* lineno info */
-	} compiled_method_load;
-        
-        struct {
-	    jmethodID method_id;        /* id of unloaded compiled method */
-	} compiled_method_unload;
-
-        struct {
-            jmethodID method_id;  /* id of the method the instruction belongs to */
-            jint      offset;	  /* instruction offset in the method's bytecode */
-            union {
-	        struct {
-	            jboolean is_true; /* whether true or false branch is taken  */
-	        } if_info;
-	        struct {
-	            jint key;     /* top stack value used as an index */ 
-	            jint low;     /* min value of the index           */	
-	            jint hi;      /* max value of the index           */	
-	        } tableswitch_info;
-	        struct {
-		    jint chosen_pair_index; /* actually chosen pair index (0-based)
-                                             * if chosen_pair_index == pairs_total then
-                                             * the 'default' branch is taken
-                                             */
-		    jint pairs_total;       /* total number of lookupswitch pairs */
-	        } lookupswitch_info;
-            } u;
-        } instruction;
-
-        struct {
-	    char *begin;                /* beginning of dump buffer, 
-					   see below for format */
-	    char *end;                  /* end of dump buffer */
-	    jint num_traces;            /* number of traces */
-	    JVMPI_CallTrace *traces;    /* traces of all threads */
-	    jint *threads_status;       /* status of all threads */
-	} monitor_dump;
-
-        struct {
-	    const char *name;           /* name of raw monitor */
-	    JVMPI_RawMonitor id;        /* id */
-	} raw_monitor;
-
-        struct {
-	    jobjectID object;           /* Java object */
-	} monitor;
-
-        struct {
-	    jobjectID object;           /* Java object */
-	    jlong timeout;              /* timeout period */
-	} monitor_wait;
-
-        struct {
-	    jlong used_objects;
-	    jlong used_object_space;
-	    jlong total_object_space;
-	} gc_info;
-
-        struct {
-	    jint data_len;
-	    char *data;
-	} object_dump;
-
-    } u;
-} JVMPI_Event;
-
-/* interface functions */
-typedef struct {
-    jint version;   /* JVMPI version */
-    
-    /* ------interface implemented by the profiler------ */
-
-    /**
-     * Function called by the JVM to notify an event. 
-     */
-    void (*NotifyEvent)(JVMPI_Event *event);
-  
-    /* ------interface implemented by the JVM------ */
-    
-    /**
-     * Function called by the profiler to enable/disable/send notification 
-     * for a particular event type.  
-     * 
-     * event_type - event_type
-     * arg - event specific arg
-     *
-     * return JVMPI_NOT_AVAILABLE, JVMPI_SUCCESS or JVMPI_FAIL
-     */
-    jint (*EnableEvent)(jint event_type, void *arg);
-    jint (*DisableEvent)(jint event_type, void *arg);
-    jint (*RequestEvent)(jint event_type, void *arg);
-  
-    /**
-     * Function called by the profiler to get a stack
-     * trace from the JVM.
-     *
-     * trace - trace data structure to be filled
-     * depth - maximum depth of the trace.
-     */
-    void (*GetCallTrace)(JVMPI_CallTrace *trace, jint depth);
-
-    /**
-     * Function called by profiler when it wants to exit/stop.
-     */
-    void (*ProfilerExit)(jint);
-
-    /**
-     * Utility functions provided by the JVM.
-     */
-    JVMPI_RawMonitor (*RawMonitorCreate)(char *lock_name);
-    void (*RawMonitorEnter)(JVMPI_RawMonitor lock_id);
-    void (*RawMonitorExit)(JVMPI_RawMonitor lock_id);
-    void (*RawMonitorWait)(JVMPI_RawMonitor lock_id, jlong ms);
-    void (*RawMonitorNotifyAll)(JVMPI_RawMonitor lock_id);
-    void (*RawMonitorDestroy)(JVMPI_RawMonitor lock_id);
-
-    /**
-     * Function called by the profiler to get the current thread's CPU time.
-     *
-     * return time in nanoseconds;
-     */
-    jlong (*GetCurrentThreadCpuTime)(void);
-
-    void (*SuspendThread)(JNIEnv *env);
-    void (*ResumeThread)(JNIEnv *env);
-    jint (*GetThreadStatus)(JNIEnv *env);
-    jboolean (*ThreadHasRun)(JNIEnv *env);
-
-    /* This function can be called safely only after JVMPI_EVENT_VM_INIT_DONE
-       notification by the JVM. */
-    jint (*CreateSystemThread)(char *name, jint priority, jvmpi_void_function_of_void f);
-
-    /* thread local storage access functions to avoid locking in time 
-       critical functions */
-    void (*SetThreadLocalStorage)(JNIEnv *env_id, void *ptr);
-    void * (*GetThreadLocalStorage)(JNIEnv *env_id);
-
-    /* control GC */
-    void (*DisableGC)(void);
-    void (*EnableGC)(void);
-    void (*RunGC)(void);
-
-    jobjectID (*GetThreadObject)(JNIEnv *env);
-    jobjectID (*GetMethodClass)(jmethodID mid);
-
-    /* JNI <-> jobject conversions */
-    jobject   (*jobjectID2jobject)(jobjectID jid);
-    jobjectID (*jobject2jobjectID)(jobject jobj);
-
-    void (*SuspendThreadList)
-      (jint reqCount, JNIEnv **reqList, jint *results);
-    void (*ResumeThreadList)
-      (jint reqCount, JNIEnv **reqList, jint *results);
-} JVMPI_Interface;
-
-/* type of argument passed to RequestEvent for heap dumps */
-typedef struct {
-    jint heap_dump_level;
-} JVMPI_HeapDumpArg;
-
-/**********************************************************************
- * Constants and formats used in JVM Profiler Interface.
- **********************************************************************/
-/*
- * Event type constants.
- */
-
-#define JVMPI_EVENT_METHOD_ENTRY                  ((jint) 1) 
-#define JVMPI_EVENT_METHOD_ENTRY2                 ((jint) 2) 
-#define JVMPI_EVENT_METHOD_EXIT                   ((jint) 3) 
-
-#define JVMPI_EVENT_OBJECT_ALLOC                  ((jint) 4) 
-#define JVMPI_EVENT_OBJECT_FREE                   ((jint) 5) 
-#define JVMPI_EVENT_OBJECT_MOVE                   ((jint) 6) 
-
-#define JVMPI_EVENT_COMPILED_METHOD_LOAD          ((jint) 7) 
-#define JVMPI_EVENT_COMPILED_METHOD_UNLOAD        ((jint) 8) 
-
-#define JVMPI_EVENT_INSTRUCTION_START             ((jint) 9)
-
-#define JVMPI_EVENT_THREAD_START                  ((jint)33) 
-#define JVMPI_EVENT_THREAD_END                    ((jint)34) 
-
-#define JVMPI_EVENT_CLASS_LOAD_HOOK               ((jint)35) 
-
-#define JVMPI_EVENT_HEAP_DUMP                     ((jint)37) 
-#define JVMPI_EVENT_JNI_GLOBALREF_ALLOC           ((jint)38) 
-#define JVMPI_EVENT_JNI_GLOBALREF_FREE            ((jint)39) 
-#define JVMPI_EVENT_JNI_WEAK_GLOBALREF_ALLOC      ((jint)40) 
-#define JVMPI_EVENT_JNI_WEAK_GLOBALREF_FREE       ((jint)41) 
-#define JVMPI_EVENT_CLASS_LOAD                    ((jint)42) 
-#define JVMPI_EVENT_CLASS_UNLOAD                  ((jint)43) 
-#define JVMPI_EVENT_DATA_DUMP_REQUEST             ((jint)44) 
-#define JVMPI_EVENT_DATA_RESET_REQUEST            ((jint)45) 
-
-#define JVMPI_EVENT_JVM_INIT_DONE                 ((jint)46) 
-#define JVMPI_EVENT_JVM_SHUT_DOWN                 ((jint)47) 
-
-#define JVMPI_EVENT_ARENA_NEW                     ((jint)48)
-#define JVMPI_EVENT_ARENA_DELETE                  ((jint)49)
-
-#define JVMPI_EVENT_OBJECT_DUMP                   ((jint)50)
-
-#define JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTER   ((jint)51)
-#define JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTERED ((jint)52)
-#define JVMPI_EVENT_RAW_MONITOR_CONTENDED_EXIT    ((jint)53)
-#define JVMPI_EVENT_MONITOR_CONTENDED_ENTER       ((jint)54)
-#define JVMPI_EVENT_MONITOR_CONTENDED_ENTERED     ((jint)55)
-#define JVMPI_EVENT_MONITOR_CONTENDED_EXIT        ((jint)56)
-#define JVMPI_EVENT_MONITOR_WAIT                  ((jint)57)
-#define JVMPI_EVENT_MONITOR_WAITED                ((jint)58)
-#define JVMPI_EVENT_MONITOR_DUMP                  ((jint)59)
-
-#define JVMPI_EVENT_GC_START                      ((jint)60)
-#define JVMPI_EVENT_GC_FINISH                     ((jint)61)
-
-#define JVMPI_MAX_EVENT_TYPE_VAL                  ((jint)61)
-
-/* old definitions, to be removed */
-#define JVMPI_EVENT_LOAD_COMPILED_METHOD          ((jint) 7) 
-#define JVMPI_EVENT_UNLOAD_COMPILED_METHOD        ((jint) 8) 
-#define JVMPI_EVENT_NEW_ARENA                     ((jint)48)
-#define JVMPI_EVENT_DELETE_ARENA                  ((jint)49)
-#define JVMPI_EVENT_DUMP_DATA_REQUEST             ((jint)44) 
-#define JVMPI_EVENT_RESET_DATA_REQUEST            ((jint)45) 
-#define JVMPI_EVENT_OBJ_ALLOC                     ((jint) 4) 
-#define JVMPI_EVENT_OBJ_FREE                      ((jint) 5) 
-#define JVMPI_EVENT_OBJ_MOVE                      ((jint) 6) 
-
-#define JVMPI_REQUESTED_EVENT                     ((jint)0x10000000)
-
-
- 
-/* 
- * enabling/disabling event notification.
- */
-/* results */
-#define JVMPI_SUCCESS                    ((jint)0)
-#define JVMPI_NOT_AVAILABLE              ((jint)1)
-#define JVMPI_FAIL                       ((jint)-1)
-
-/*
- * Thread status
- */
-enum {
-    JVMPI_THREAD_RUNNABLE = 1,
-    JVMPI_THREAD_MONITOR_WAIT,
-    JVMPI_THREAD_CONDVAR_WAIT
-};
-
-#define JVMPI_THREAD_SUSPENDED      0x8000
-#define JVMPI_THREAD_INTERRUPTED    0x4000
-
-/*
- * Thread priority
- */
-#define JVMPI_MINIMUM_PRIORITY	    1
-#define JVMPI_MAXIMUM_PRIORITY	    10
-#define JVMPI_NORMAL_PRIORITY	    5
-
-/*
- * Object type constants.
- */
-#define JVMPI_NORMAL_OBJECT	    ((jint)0)
-#define JVMPI_CLASS		    ((jint)2)
-#define JVMPI_BOOLEAN	            ((jint)4)
-#define JVMPI_CHAR                  ((jint)5)
-#define JVMPI_FLOAT                 ((jint)6)
-#define JVMPI_DOUBLE                ((jint)7)
-#define JVMPI_BYTE                  ((jint)8)
-#define JVMPI_SHORT                 ((jint)9)
-#define JVMPI_INT                   ((jint)10)
-#define JVMPI_LONG                  ((jint)11)    
-
-/*
- * Monitor dump constants.
- */
-
-#define JVMPI_MONITOR_JAVA          0x01
-#define JVMPI_MONITOR_RAW           0x02
-
-/*
- * Heap dump constants.
- */
-#define JVMPI_GC_ROOT_UNKNOWN       0xff
-#define JVMPI_GC_ROOT_JNI_GLOBAL    0x01
-#define JVMPI_GC_ROOT_JNI_LOCAL     0x02
-#define JVMPI_GC_ROOT_JAVA_FRAME    0x03
-#define JVMPI_GC_ROOT_NATIVE_STACK  0x04
-#define JVMPI_GC_ROOT_STICKY_CLASS  0x05
-#define JVMPI_GC_ROOT_THREAD_BLOCK  0x06
-#define JVMPI_GC_ROOT_MONITOR_USED  0x07
-#define JVMPI_GC_ROOT_THREAD_OBJ    0x08
-
-#define JVMPI_GC_CLASS_DUMP         0x20
-#define JVMPI_GC_INSTANCE_DUMP      0x21 
-#define JVMPI_GC_OBJ_ARRAY_DUMP     0x22
-#define JVMPI_GC_PRIM_ARRAY_DUMP    0x23
-
-/*
- * Dump levels
- */
-#define JVMPI_DUMP_LEVEL_0    ((jint)0)
-#define JVMPI_DUMP_LEVEL_1    ((jint)1)
-#define JVMPI_DUMP_LEVEL_2    ((jint)2)
-
-/* Types used in dumps -
- *
- * u1: 1 byte
- * u2: 2 bytes
- * u4: 4 bytes
- * u8: 8 bytes
- *
- * ty: u1 where:
- *     JVMPI_CLASS:   object
- *     JVMPI_BOOLEAN: boolean
- *     JVMPI_CHAR:    char
- *     JVMPI_FLOAT:   float
- *     JVMPI_DOUBLE:  double
- *     JVMPI_BYTE:    byte
- *     JVMPI_SHORT:   short
- *     JVMPI_INT:     int
- *     JVMPI_LONG:    long
- *
- * vl: values, exact type depends on the type of the value:
- *     JVMPI_BOOLEAN & JVMPI_BYTE:   u1
- *     JVMPI_SHORT & JVMPI_CHAR:     u2
- *     JVMPI_INT & JVMPI_FLOAT:      u4
- *     JVMPI_LONG & JVMPI_DOUBLE:    u8
- *     JVMPI_CLASS:                  jobjectID
- */
-
-/* Format of the monitor dump buffer:
- *
- *               u1                          monitor type
- *
- *               JVMPI_MONITOR_JAVA          Java monitor
- *
- *                          jobjectID        object
- *                          JNIEnv *         owner thread
- *                          u4               entry count
- *                          u4               # of threads waiting to enter
- *                          [JNIEnv *]*      threads waiting to enter
- *                          u4               # of threads waiting to be notified
- *                          [JNIEnv *]*      threads waiting to be notified
- *
- *               JVMPI_MONITOR_RAW           raw monitor
- *
- *                          char *           name
- *                          JVMPI_RawMonitor raw monitor
- *                          JNIEnv *         owner thread
- *                          u4               entry count
- *                          u4               # of threads waiting to enter
- *                          [JNIEnv *]*      threads waiting to enter
- *                          u4               # of threads waiting to be notified
- *                          [JNIEnv *]*      threads waiting to be notified
- */
-
-/* Format of the heap dump buffer depends on the dump level 
- * specified in the JVMPI_HeapDumpArg passed to RequestEvent as arg. 
- * The default is JVMPI_DUMP_LEVEL_2.
- *
- * JVMPI_DUMP_LEVEL_0:
- * 
- *               u1                          object type (JVMPI_CLASS ...)
- *               jobjectID                   object
- *
- * JVMPI_DUMP_LEVEL_1 and JVMPI_DUMP_LEVEL_2 use the following format:  
- * In the case of JVMPI_DUMP_LEVEL_1 the values of primitive fields in object 
- * instance dumps , the values of primitive statics in class dumps and the 
- * values of primitive arrays are excluded.  JVMPI_DUMP_LEVEL_2 includes the
- * primitive values.
- *
- *               u1                          record type
- *
- *               JVMPI_GC_ROOT_UNKNOWN       unknown root
- *
- *                          jobjectID        object
- *
- *               JVMPI_GC_ROOT_JNI_GLOBAL    JNI global ref root
- *
- *                          jobjectID        object
- *                          jobject          JNI global reference
- *
- *               JVMPI_GC_ROOT_JNI_LOCAL     JNI local ref
- *
- *                          jobjectID        object
- *                          JNIEnv *         thread
- *                          u4               frame # in stack trace (-1 for empty)
- *
- *               JVMPI_GC_ROOT_JAVA_FRAME    Java stack frame
- *
- *                          jobjectID        object
- *                          JNIEnv *         thread
- *                          u4               frame # in stack trace (-1 for empty)
- *
- *               JVMPI_GC_ROOT_NATIVE_STACK  Native stack
- *
- *                          jobjectID        object
- *                          JNIEnv *         thread
- *
- *               JVMPI_GC_ROOT_STICKY_CLASS  System class
- *
- *                          jobjectID        class object
- *
- *               JVMPI_GC_ROOT_THREAD_BLOCK  Reference from thread block
- *
- *                          jobjectID        thread object
- *                          JNIEnv *         thread
- *
- *               JVMPI_GC_ROOT_MONITOR_USED  Busy monitor
- *
- *                          jobjectID        object
- *
- *               JVMPI_GC_CLASS_DUMP         dump of a class object
- *
- *                          jobjectID        class
- *                          jobjectID        super
- *                          jobjectID        class loader
- *                          jobjectID        signers
- *                          jobjectID        protection domain
- *                          jobjectID        class name
- *                          void *           reserved
- *
- *                          u4               instance size (in bytes)
- *
- *                          [jobjectID]*     interfaces
- *
- *                          u2               size of constant pool
- *                          [u2,             constant pool index,
- *                           ty,             type, 
- *                           vl]*            value
- *
- *                          [vl]*            static field values
- *
- *               JVMPI_GC_INSTANCE_DUMP      dump of a normal object
- *
- *                          jobjectID        object
- *                          jobjectID        class
- *                          u4               number of bytes that follow
- *                          [vl]*            instance field values (class, followed
- *                                           by super, super's super ...)
- *
- *               JVMPI_GC_OBJ_ARRAY_DUMP     dump of an object array
- *
- *                          jobjectID        array object
- *                          u4               number of elements
- *                          jobjectID        element class
- *                          [jobjectID]*     elements
- *
- *               JVMPI_GC_PRIM_ARRAY_DUMP    dump of a primitive array
- *
- *                          jobjectID        array object
- *                          u4               number of elements
- *                          ty               element type
- *                          [vl]*            elements
- *
- */
-
-/* Format of the dump received in JVMPI_EVENT_OBJECT_DUMP:
- * All the records have JVMPI_DUMP_LEVEL_2 information.
- *
- *               u1                          record type
- *
- *                     followed by a:
- *
- *                          JVMPI_GC_CLASS_DUMP,
- *                          JVMPI_GC_INSTANCE_DUMP,
- *                          JVMPI_GC_OBJ_ARRAY_DUMP, or
- *                          JVMPI_GC_PRIM_ARRAY_DUMP record.
- */
-
-#endif /* !_JAVASOFT_JVMPI_H_ */
-#endif /* JVMPI_SUPPORT */
--- a/hotspot/src/share/vm/prims/jvmpi.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,190 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)jvmpi.hpp	1.50 07/05/05 17:06:34 JVM"
-#endif
-/*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-#define JVMPI_EVENT_DISABLED        0
-#define JVMPI_EVENT_NOT_SUPPORTED  ((unsigned int)-1)
-#define JVMPI_EVENT_ENABLED        ((unsigned int)-2)
-
-#define JVMPI_PROFILING_OFF        0x00000000
-#define JVMPI_PROFILING_ON         0x80000000
-
-#define JVMPI_INVALID_CLASS ((oop)(-1))
-
-typedef struct {
-    methodOop method;               /* method being compiled */
-    void *code_addr;                /* virtual address of the the method */
-    jint code_size;                 /* size of compiled method in memory */
-    jint lineno_table_len;          /* number of lineno table entries */
-    JVMPI_Lineno *lineno_table;     /* pointer to beginning of line table */
-} compiled_method_t;
-
-
-class jvmpi : public AllStatic {
- private:
-  // JVMPI interface data structure
-  static JVMPI_Interface jvmpi_interface;
-  static bool slow_allocation;
-
-  static void reset_jvmpi_allocation();
-
-  // To track if notification for a particular event type is enabled/disabled.
-  static unsigned int _event_flags_array[JVMPI_MAX_EVENT_TYPE_VAL+1];
-  static unsigned int _event_flags;
-
-  // initialization
-  static void initialize(int version);
-
-  // enable/disable event notification
-  static inline void enable_event(jint event_type);
-  static inline void disable_event(jint event_type);
-
-  static void post_event(JVMPI_Event* event);
-
-  static void post_event_common(JVMPI_Event* event);
-
-  static void post_event_vm_mode(JVMPI_Event* event, JavaThread* calling_thread);
-
-  // C heap memory allocation/free
-  static inline void* calloc(size_t size);
-  static inline void free(void* ptr);
-
-  // functions exported through the JVMPI
-  static void get_call_trace(JVMPI_CallTrace *trace, jint depth);
-  static jlong get_current_thread_cpu_time();
-  static JVMPI_RawMonitor raw_monitor_create(char *lock_name);
-  static void raw_monitor_enter(JVMPI_RawMonitor lock_id);
-  static void raw_monitor_exit(JVMPI_RawMonitor lock_id);
-  static void raw_monitor_destroy(JVMPI_RawMonitor lock_id);
-  static void raw_monitor_wait(JVMPI_RawMonitor lock_id, jlong ms);
-  static void raw_monitor_notify_all(JVMPI_RawMonitor lock_id);
-  static void suspend_thread(JNIEnv *env);
-  static void suspend_thread_list(jint reqCnt, JNIEnv **reqList, jint *results);
-  static void resume_thread(JNIEnv *env);
-  static void resume_thread_list(jint reqCnt, JNIEnv **reqList, jint *results);
-  static jint get_thread_status(JNIEnv *env);
-  static jboolean thread_has_run(JNIEnv *env);
-  static void run_gc();
-  static void profiler_exit(jint exit_code);
-  static jint create_system_thread(char *name, jint priority, jvmpi_void_function_of_void f);
-  static jint enable_event(jint event_type, void *arg);
-  static jint disable_event(jint event_type, void *arg);
-  static jint request_event(jint event_type, void *arg);
-  static void set_thread_local_storage(JNIEnv *env, void *ptr);
-  static void* get_thread_local_storage(JNIEnv *env);
-  static void disable_gc();
-  static void enable_gc();
-  static jobjectID get_thread_object(JNIEnv *env);
-  static jobjectID get_method_class(jmethodID mid);
-  static jobject   jobjectID_2_jobject(jobjectID);
-  static jobjectID jobject_2_jobjectID(jobject);
-  
- public:
-  // called from JNI to get the JVMPI interface function table
-  static JVMPI_Interface* GetInterface_1(int version);
-
-  // called before VM shutdown
-  static void disengage();
-
-  // test if jvmpi is enabled
-  static inline bool enabled();
-  
-  // per event tests
-  static inline bool is_event_enabled(jint event_type);
-  static inline bool is_event_supported(jint event_type);
-  
-  // support for (interpreter) code generation
-  static inline unsigned int* event_flags_array_at_addr(jint event_type);
-
-  // functions called by other parts of the VM to notify events
-  static void post_vm_initialization_events();
-  static void post_vm_initialized_event();
-  static void post_vm_death_event      ();
-
-  static void post_instruction_start_event(const frame& f);
-
-  static void post_thread_start_event  (JavaThread* thread, jint flag);
-  static void post_thread_start_event  (JavaThread* thread);
-  static void post_thread_end_event    (JavaThread* thread);
-
-  static void fillin_array_class_load_event  (oop k, JVMPI_Event *eventp);
-  static void fillin_class_load_event  (oop k, JVMPI_Event *eventp, bool fillin_jni_ids);
-  static void post_class_load_event    (oop k, jint flag);
-  static void post_class_load_event    (oop k);
-  // ptr to a function that takes an unsigned int param and returns a void *
-  typedef void * (*jvmpi_alloc_func_t)(unsigned int bytecnt);
-  static void post_class_load_hook_event(unsigned char **ptrP,
-    unsigned char **end_ptrP, jvmpi_alloc_func_t malloc_f);
-  static void *jvmpi_alloc(unsigned int bytecnt);
-  static void post_class_unload_events();
-  static void save_class_unload_event_info(oop k);
-
-  static void post_dump_event();
-
-  static void post_new_globalref_event(jobject ref, oop obj, bool post_jvmpi_event);
-  static void post_delete_globalref_event(jobject ref, bool post_jvmpi_event);
-  static void post_new_weakref_event(jobject ref, oop obj);
-  static void post_delete_weakref_event(jobject ref);
-
-  static void post_arena_new_event(int arena_id, const char* arena_name);
-  static void post_arena_delete_event(int arena_id);
-  static void post_object_alloc_event(oop obj, size_t bytesize, jint arena_id, jint flag);
-  static void post_object_free_event(oop obj);
-  static void post_object_move_event(oop oldobj, int old_arena, oop newobj, int new_arena);
-
-  static void post_method_entry2_event(methodOop m, oop receiver);
-  static void post_method_entry_event(methodOop m);
-  static void post_method_exit_event(methodOop m);
-
-  static void post_compiled_method_load_event(compiled_method_t *compiled_method_info);
-  static void post_compiled_method_unload_event(methodOop method);
-
-  static void post_monitor_contended_enter_event(void *mid);
-  static void post_monitor_contended_entered_event(void *mid);
-  static void post_monitor_contended_exit_event(void *mid);
-
-  static void post_monitor_wait_event(oop obj, jlong millis);
-  static void post_monitor_waited_event(oop obj, jlong millis);
-
-  static void post_raw_monitor_contended_enter_event(RawMonitor* o);
-  static void post_raw_monitor_contended_entered_event(RawMonitor* o);
-  static void post_raw_monitor_contended_exit_event(RawMonitor* o);
-
-  static void post_gc_start_event();
-  static void post_gc_finish_event(jlong used_obj_space, jlong total_obj_space);
-
-  static void post_trace_instr_event(unsigned char *pc, unsigned char opcode);
-  static void post_trace_if_event(unsigned char *pc, int is_true);
-  static void post_trace_tableswitch_event(unsigned char *pc, int key, int low, int hi);
-  static void post_trace_lookupswitch_event(unsigned char *pc,
-                                            int chosen_pair_index,
-                                            int pairs_total);
-
-  static void post_object_dump_event(oop obj, int flag);
-  static void post_heap_dump_event_in_safepoint(int level, int flag);
-  static void post_monitor_dump_event_in_safepoint(int flag);
-};
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/share/vm/prims/jvmpi.inline.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)jvmpi.inline.hpp	1.20 07/05/05 17:06:35 JVM"
-#endif
-/*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-inline bool jvmpi::is_event_enabled(jint event_type) {
-  return (event_type >= 31
-	  ? (enabled() && (_event_flags_array[event_type] == JVMPI_EVENT_ENABLED)) 
-	  : (_event_flags & (1 << event_type)) != 0);
-}
-
-
-inline void* jvmpi::calloc(size_t size) {
-  void* p = os::malloc(size);
-  if (p == NULL) {
-    vm_exit_out_of_memory(size, "jvmpi::malloc");
-  }
-  memset(p, 0, size);
-  return p;
-}
-
-
-inline void jvmpi::free(void* ptr) {
-  os::free(ptr);
-}
-
-
-inline void jvmpi::enable_event(jint event_type) {
-  if (event_type < 31) {
-    _event_flags |= 1 << event_type;
-  }
-  _event_flags_array[event_type] = JVMPI_EVENT_ENABLED;
-}
-
-
-inline void jvmpi::disable_event(jint event_type) {
-  if (event_type < 31) {
-    _event_flags &= ~(1 << event_type);
-  }
-  _event_flags_array[event_type] = JVMPI_EVENT_DISABLED;
-}
-
-
-inline bool jvmpi::enabled() {
-  return !!(_event_flags & JVMPI_PROFILING_ON);
-}
-
-
-inline bool jvmpi::is_event_supported(jint event_type) {
-  return ((event_type <= JVMPI_MAX_EVENT_TYPE_VAL) &&
-	  (_event_flags_array[event_type] != JVMPI_EVENT_NOT_SUPPORTED));
-}
-
-
-inline unsigned int* jvmpi::event_flags_array_at_addr(jint event_type) {
-  return &_event_flags_array[event_type];
-}
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)jvmtiEnv.cpp	1.160 07/05/05 17:06:38 JVM"
+#pragma ident "@(#)jvmtiEnv.cpp	1.161 07/05/17 16:03:46 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1266,7 +1266,10 @@
 // value_ptr - pre-checked for NULL
 jvmtiError
 JvmtiEnv::GetLocalObject(JavaThread* java_thread, jint depth, jint slot, jobject* value_ptr) {
-  JavaThread* current_thread = JavaThread::current(); 
+  JavaThread* current_thread = JavaThread::current();
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm(current_thread);
 
   VM_GetOrSetLocal op(java_thread, current_thread, depth, slot);
   VMThread::execute(&op);
@@ -1274,7 +1277,7 @@
   if (err != JVMTI_ERROR_NONE) {
     return err;
   } else {
-    *value_ptr = op.jobj_value();
+    *value_ptr = op.value().l;
     return JVMTI_ERROR_NONE;
   }
 } /* end GetLocalObject */
@@ -1287,6 +1290,10 @@
 // value_ptr - pre-checked for NULL
 jvmtiError
 JvmtiEnv::GetLocalInt(JavaThread* java_thread, jint depth, jint slot, jint* value_ptr) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
+
   VM_GetOrSetLocal op(java_thread, depth, slot, T_INT);
   VMThread::execute(&op);
   *value_ptr = op.value().i;
@@ -1301,6 +1308,10 @@
 // value_ptr - pre-checked for NULL
 jvmtiError
 JvmtiEnv::GetLocalLong(JavaThread* java_thread, jint depth, jint slot, jlong* value_ptr) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
+
   VM_GetOrSetLocal op(java_thread, depth, slot, T_LONG);
   VMThread::execute(&op);
   *value_ptr = op.value().j;
@@ -1315,6 +1326,10 @@
 // value_ptr - pre-checked for NULL
 jvmtiError
 JvmtiEnv::GetLocalFloat(JavaThread* java_thread, jint depth, jint slot, jfloat* value_ptr) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
+
   VM_GetOrSetLocal op(java_thread, depth, slot, T_FLOAT);
   VMThread::execute(&op);
   *value_ptr = op.value().f;
@@ -1329,6 +1344,10 @@
 // value_ptr - pre-checked for NULL
 jvmtiError
 JvmtiEnv::GetLocalDouble(JavaThread* java_thread, jint depth, jint slot, jdouble* value_ptr) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
+
   VM_GetOrSetLocal op(java_thread, depth, slot, T_DOUBLE);
   VMThread::execute(&op);
   *value_ptr = op.value().d;
@@ -1342,10 +1361,12 @@
 // depth - pre-checked as non-negative
 jvmtiError
 JvmtiEnv::SetLocalObject(JavaThread* java_thread, jint depth, jint slot, jobject value) {
-  JavaThread* current_thread  = JavaThread::current();
-  HandleMark hm(current_thread);
-  Handle object_handle = Handle(current_thread, (oop)JNIHandles::resolve(value) );
-  VM_GetOrSetLocal op(java_thread, depth, slot, &object_handle);
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
+  jvalue val;
+  val.l = value;
+  VM_GetOrSetLocal op(java_thread, depth, slot, T_OBJECT, val);
   VMThread::execute(&op);
   return op.result();
 } /* end SetLocalObject */
@@ -1357,6 +1378,9 @@
 // depth - pre-checked as non-negative
 jvmtiError
 JvmtiEnv::SetLocalInt(JavaThread* java_thread, jint depth, jint slot, jint value) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
   jvalue val;
   val.i = value;
   VM_GetOrSetLocal op(java_thread, depth, slot, T_INT, val);
@@ -1371,6 +1395,9 @@
 // depth - pre-checked as non-negative
 jvmtiError
 JvmtiEnv::SetLocalLong(JavaThread* java_thread, jint depth, jint slot, jlong value) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
   jvalue val;
   val.j = value;
   VM_GetOrSetLocal op(java_thread, depth, slot, T_LONG, val);
@@ -1385,6 +1412,9 @@
 // depth - pre-checked as non-negative
 jvmtiError
 JvmtiEnv::SetLocalFloat(JavaThread* java_thread, jint depth, jint slot, jfloat value) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
   jvalue val;
   val.f = value;
   VM_GetOrSetLocal op(java_thread, depth, slot, T_FLOAT, val);
@@ -1399,6 +1429,9 @@
 // depth - pre-checked as non-negative
 jvmtiError
 JvmtiEnv::SetLocalDouble(JavaThread* java_thread, jint depth, jint slot, jdouble value) {
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm;
   jvalue val;
   val.d = value;
   VM_GetOrSetLocal op(java_thread, depth, slot, T_DOUBLE, val);
@@ -2565,11 +2598,7 @@
       int r;
       intptr_t recursion = rmonitor->recursions();
       for (intptr_t i=0; i <= recursion; i++) {
-#ifdef JVMPI_SUPPORT
-        r = rmonitor->raw_exit(thread, false);
-#else // !JVMPI_SUPPORT
         r = rmonitor->raw_exit(thread);
-#endif // JVMPI_SUPPORT
         assert(r == ObjectMonitor::OM_OK, "raw_exit should have worked");
         if (r != ObjectMonitor::OM_OK) {  // robustness
           return JVMTI_ERROR_INTERNAL;
@@ -2616,11 +2645,7 @@
       ThreadInVMfromUnknown __tiv;
       {
         ThreadBlockInVM __tbivm(current_thread);
-#ifdef JVMPI_SUPPORT
-        r = rmonitor->raw_enter(current_thread, false);
-#else // !JVMPI_SUPPORT
         r = rmonitor->raw_enter(current_thread);
-#endif // JVMPI_SUPPORT
       }
 #else
       /* Transition to thread_blocked without entering vm state          */
@@ -2638,11 +2663,7 @@
 	     current_thread->frame_anchor()->walkable(), "Must be walkable");
       current_thread->set_thread_state(_thread_blocked);
 
-#ifdef JVMPI_SUPPORT
-      r = rmonitor->raw_enter(current_thread, false);
-#else // !JVMPI_SUPPORT
       r = rmonitor->raw_enter(current_thread);
-#endif // JVMPI_SUPPORT
       // restore state, still at a safepoint safe state
       current_thread->set_thread_state(state);
 
@@ -2650,11 +2671,7 @@
       assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
     } else {
       if (thread->is_VM_thread() || thread->is_ConcurrentGC_thread()) {
-#ifdef JVMPI_SUPPORT
-	r = rmonitor->raw_enter(thread, false);
-#else // !JVMPI_SUPPORT
 	r = rmonitor->raw_enter(thread);
-#endif // JVMPI_SUPPORT
       } else {
 	ShouldNotReachHere();
       }
@@ -2689,18 +2706,10 @@
       // Not really unknown but ThreadInVMfromNative does more than we want
       ThreadInVMfromUnknown __tiv;
 #endif /* PROPER_TRANSITIONS */
-#ifdef JVMPI_SUPPORT
-      r = rmonitor->raw_exit(current_thread, false);
-#else // !JVMPI_SUPPORT
       r = rmonitor->raw_exit(current_thread);
-#endif // JVMPI_SUPPORT
     } else {
       if (thread->is_VM_thread() || thread->is_ConcurrentGC_thread()) {
-#ifdef JVMPI_SUPPORT
-	r = rmonitor->raw_exit(thread, false);
-#else // !JVMPI_SUPPORT
 	r = rmonitor->raw_exit(thread);
-#endif // JVMPI_SUPPORT
       } else {
 	ShouldNotReachHere();
       }
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)jvmtiEnvBase.cpp	1.88 07/05/05 17:06:37 JVM"
+#pragma ident "@(#)jvmtiEnvBase.cpp	1.89 07/05/17 16:04:59 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -192,6 +192,10 @@
 JvmtiEnvBase::periodic_clean_up() {
   assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
 
+  // JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So 
+  // clean up JvmtiThreadState before deleting JvmtiEnv pointer.
+  JvmtiThreadState::periodic_clean_up();
+
   // Unlink all invalid environments from the list of environments
   // and deallocate them
   JvmtiEnvIterator it; 
@@ -245,7 +249,6 @@
              !is_inside_dying_thread_env_iteration()) {
       _needs_clean_up = false;
       JvmtiEnvBase::periodic_clean_up();
-      JvmtiThreadState::periodic_clean_up();
     }
   }
 }
@@ -1244,29 +1247,32 @@
 // Verifies that the top frame is a java frame in an expected state.
 // Deoptimizes frame if needed.
 // Checks that the frame method signature matches the return type (tos).
+// HandleMark must be defined in the caller only.
+// It is to keep a ret_ob_h handle alive after return to the caller.
 jvmtiError
-JvmtiEnvBase::check_top_frame(JavaThread* java_thread, jvalue value,
-                              TosState tos, oop* ret_oop) {
-  JavaThread* current_thread  = JavaThread::current();
+JvmtiEnvBase::check_top_frame(JavaThread* current_thread, JavaThread* java_thread,
+                              jvalue value, TosState tos, Handle* ret_ob_h) {
   ResourceMark rm(current_thread);
 
   vframe *vf = vframeFor(java_thread, 0);
-  if (vf == NULL) {
-    return JVMTI_ERROR_NO_MORE_FRAMES;
-  }
+  NULL_CHECK(vf, JVMTI_ERROR_NO_MORE_FRAMES);
+
   javaVFrame *jvf = (javaVFrame*) vf;
   if (!vf->is_java_frame() || jvf->method()->is_native()) {
     return JVMTI_ERROR_OPAQUE_FRAME;
   }
 
   // If the frame is a compiled one, need to deoptimize it.
-  if (!jvf->is_interpreted_frame()) {
-    VM_DeoptimizeFrame op(java_thread, jvf->fr().id());
-    VMThread::execute(&op);
+  if (vf->is_compiled_frame()) {
+    if (!vf->fr().can_be_deoptimized()) {
+      return JVMTI_ERROR_OPAQUE_FRAME;
+    }
+    VM_DeoptimizeFrame deopt(java_thread, jvf->fr().id());
+    VMThread::execute(&deopt);
   }
 
   // Get information about method return type
-  symbolHandle signature(current_thread,  jvf->method()->signature());
+  symbolHandle signature(current_thread, jvf->method()->signature());
 
   ResultTypeFinder rtf(signature);
   TosState fr_tos = as_TosState(rtf.type());
@@ -1275,18 +1281,22 @@
       return JVMTI_ERROR_TYPE_MISMATCH;
     }
   }
-  jobject obj = value.l;
-  if (tos == atos) {
-   // Check that it is a valid jni handle
-    *ret_oop = JNIHandles::resolve_external_guard(obj);
-    if (obj != NULL && *ret_oop == NULL) { // NULL reference is allowed
-      return JVMTI_ERROR_INVALID_OBJECT;
+
+  // Check that the jobject class matches the return type signature.
+  jobject jobj = value.l;
+  if (tos == atos && jobj != NULL) { // NULL reference is allowed
+    Handle ob_h = Handle(current_thread, JNIHandles::resolve_external_guard(jobj));
+    NULL_CHECK(ob_h, JVMTI_ERROR_INVALID_OBJECT);
+    KlassHandle ob_kh = KlassHandle(current_thread, ob_h()->klass());
+    NULL_CHECK(ob_kh, JVMTI_ERROR_INVALID_OBJECT);
+
+    // Method return type signature.
+    char* ty_sign = 1 + strchr(signature->as_C_string(), ')');
+
+    if (!VM_GetOrSetLocal::is_assignable(ty_sign, Klass::cast(ob_kh()), current_thread)) {
+      return JVMTI_ERROR_TYPE_MISMATCH;
     }
-    // TBD: Check that the jobject class matches the return type signature.
-    //      Something like the following is needed.
-    // if (!is_assignable_to(obj, signature)) {
-    //   return JVMTI_ERROR_TYPE_MISMATCH;
-    // }
+    *ret_ob_h = ob_h;
   }
   return JVMTI_ERROR_NONE;
 } /* end check_top_frame */
@@ -1304,8 +1314,8 @@
 
 jvmtiError
 JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState tos) {
-  JavaThread* current_thread  = JavaThread::current();
-  HandleMark hm(current_thread);
+  JavaThread* current_thread = JavaThread::current();
+  HandleMark   hm(current_thread);
   uint32_t debug_bits = 0;
 
   // Check if java_thread is fully suspended
@@ -1335,11 +1345,13 @@
       return JVMTI_ERROR_OPAQUE_FRAME;
     }
   }
-  oop ret_oop = NULL;
-  jvmtiError err = check_top_frame(java_thread, value, tos, &ret_oop);
+  Handle ret_ob_h = Handle();
+  jvmtiError err = check_top_frame(current_thread, java_thread, value, tos, &ret_ob_h);
   if (err != JVMTI_ERROR_NONE) {
     return err;
   }
+  assert(tos != atos || value.l == NULL || ret_ob_h() != NULL,
+         "return object oop must not be NULL if jobject is not NULL");
 
   // Update the thread state to reflect that the top frame must be
   // forced to return.
@@ -1348,7 +1360,7 @@
   // (see call_VM_base() in assembler_<cpu>.cpp).
 
   state->set_earlyret_pending();
-  state->set_earlyret_oop(ret_oop);
+  state->set_earlyret_oop(ret_ob_h());
   state->set_earlyret_value(value, tos);
 
   // Set pending step flag for this early return.
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)jvmtiEnvBase.hpp	1.66 07/05/05 17:06:36 JVM"
+#pragma ident "@(#)jvmtiEnvBase.hpp	1.67 07/05/17 16:05:01 JVM"
 #endif
 /*
  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -281,10 +281,9 @@
                                                          jobject *monitor_ptr);
   jvmtiError get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread,
                           GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list);
-  jvmtiError check_top_frame(JavaThread* java_thread, jvalue value,
-                                           TosState tos, oop* ret_oop);
-  jvmtiError force_early_return(JavaThread* java_thread,
-                                           jvalue value, TosState tos);
+  jvmtiError check_top_frame(JavaThread* current_thread, JavaThread* java_thread,
+                             jvalue value, TosState tos, Handle* ret_ob_h);
+  jvmtiError force_early_return(JavaThread* java_thread, jvalue value, TosState tos);
 };
 
 // This class is the only safe means of iterating through environments.
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)jvmtiImpl.cpp	1.62 07/05/05 17:06:39 JVM"
+#pragma ident "@(#)jvmtiImpl.cpp	1.63 07/05/17 16:05:04 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -40,11 +40,7 @@
     ThreadBlockInVM __tbivm(current_java_thread);
     for(int i=0; i< count(); i++) {
       JvmtiRawMonitor *rmonitor = monitors()->at(i);
-#ifdef JVMPI_SUPPORT
-      int r = rmonitor->raw_enter(current_java_thread, false);
-#else // !JVMPI_SUPPORT
       int r = rmonitor->raw_enter(current_java_thread);
-#endif // JVMPI_SUPPORT
       assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
     }
   }
@@ -860,13 +856,13 @@
   , _depth(depth)
   , _index(index)
   , _type(type)
-  , _jobj_value(NULL)
   , _set(false)
+  , _jvf(NULL)
   , _result(JVMTI_ERROR_NONE)
 {  
 }
 
-// Constructor for non-object setter
+// Constructor for object or non-object setter
 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type, jvalue value)
   : _thread(thread)
   , _calling_thread(NULL)
@@ -874,8 +870,8 @@
   , _index(index)
   , _type(type)
   , _value(value)
-  , _jobj_value(NULL)
   , _set(true)
+  , _jvf(NULL)
   , _result(JVMTI_ERROR_NONE)
 {
 }
@@ -887,28 +883,17 @@
   , _depth(depth)
   , _index(index)
   , _type(T_OBJECT)
-  , _jobj_value(NULL)
   , _set(false)
-  , _result(JVMTI_ERROR_NONE)
-{
-}
-
-// Constructor for object setter
-VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, Handle* value)
-  : _thread(thread)
-  , _calling_thread(NULL)
-  , _depth(depth)
-  , _index(index)
-  , _type(T_OBJECT)
-  , _obj(value)
-  , _jobj_value(NULL)
-  , _set(true)
+  , _jvf(NULL)
   , _result(JVMTI_ERROR_NONE)
 {
 }
 
 
 vframe *VM_GetOrSetLocal::get_vframe() {
+  if (!_thread->has_last_Java_frame()) {
+    return NULL;
+  }
   RegisterMap reg_map(_thread);
   vframe *vf = _thread->last_java_vframe(&reg_map);
   int d = 0;
@@ -921,24 +906,56 @@
 
 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
   vframe* vf = get_vframe();
-
   if (vf == NULL) {
     _result = JVMTI_ERROR_NO_MORE_FRAMES;
     return NULL;
   }
-  if (!vf->is_java_frame()) {
-    _result = JVMTI_ERROR_OPAQUE_FRAME;
-    return NULL;
-  }
   javaVFrame *jvf = (javaVFrame*)vf;
 
-  if (jvf->method()->is_native()) {
+  if (!vf->is_java_frame() || jvf->method()->is_native()) {
     _result = JVMTI_ERROR_OPAQUE_FRAME;
     return NULL;
   }
   return jvf;
 }
 
+// Check that the klass is assignable to a type with the given signature.
+// Another solution could be to use the function Klass::is_subtype_of(type).
+// But the type class can be forced to load/initialize eagerly in such a case.
+// This may cause unexpected consequences like CFLH or class-init JVMTI events.
+// It is better to avoid such a behavior.
+bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
+  assert(ty_sign != NULL, "type signature must not be NULL");
+  assert(thread != NULL, "thread must not be NULL");
+  assert(klass != NULL, "klass must not be NULL");
+
+  int len = (int) strlen(ty_sign);
+  if (ty_sign[0] == 'L' && ty_sign[len-1] == ';') { // Need pure class/interface name
+    ty_sign++;
+    len -= 2;
+  }
+  symbolHandle ty_sym = oopFactory::new_symbol_handle(ty_sign, len, thread);
+  if (klass->name() == ty_sym()) {
+    return true;
+  }
+  // Compare primary supers
+  int super_depth = klass->super_depth();
+  int idx;
+  for (idx = 0; idx < super_depth; idx++) {
+    if (Klass::cast(klass->primary_super_of_depth(idx))->name() == ty_sym()) {
+      return true;
+    }
+  }
+  // Compare secondary supers
+  objArrayOop sec_supers = klass->secondary_supers(); 
+  for (idx = 0; idx < sec_supers->length(); idx++) {
+    if (Klass::cast((klassOop) sec_supers->obj_at(idx))->name() == ty_sym()) {
+      return true;
+    }
+  }
+  return false;
+}
+
 // Checks error conditions:
 //   JVMTI_ERROR_INVALID_SLOT
 //   JVMTI_ERROR_TYPE_MISMATCH
@@ -946,7 +963,6 @@
   
 bool VM_GetOrSetLocal::check_slot_type(javaVFrame* jvf) {
   methodOop method_oop = jvf->method();
-
   if (!method_oop->has_localvariable_table()) {
     // Just to check index boundaries
     jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
@@ -971,7 +987,6 @@
 
     // Here we assume that locations of LVT entries
     // with the same slot number cannot be overlapped
-
     if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
       signature_idx = (int) table[i].descriptor_cp_index;
       break;
@@ -981,8 +996,8 @@
     _result = JVMTI_ERROR_INVALID_SLOT;
     return false;	// Incorrect slot index
   }
-  constantPoolOop constants = method_oop->constants();
-  const char *signature = (const char *)constants->symbol_at(signature_idx)->as_utf8();
+  symbolOop   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
+  const char* signature = (const char *) sign_sym->as_utf8();
   BasicType slot_type = char2type(signature[0]);
 
   switch (slot_type) {
@@ -1000,6 +1015,23 @@
     _result = JVMTI_ERROR_TYPE_MISMATCH;
     return false;
   }
+
+  jobject jobj = _value.l;
+  if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
+    // Check that the jobject class matches the return type signature.
+    JavaThread* cur_thread = JavaThread::current();
+    HandleMark hm(cur_thread);
+
+    Handle obj = Handle(cur_thread, JNIHandles::resolve_external_guard(jobj));
+    NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
+    KlassHandle ob_kh = KlassHandle(cur_thread, obj->klass());
+    NULL_CHECK(ob_kh, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
+
+    if (!is_assignable(signature, Klass::cast(ob_kh()), cur_thread)) {
+      _result = JVMTI_ERROR_TYPE_MISMATCH;
+      return false;
+    }
+  }
   return true;
 }
 
@@ -1008,32 +1040,25 @@
 }
 
 bool VM_GetOrSetLocal::doit_prologue() { 
-  if (!_thread->has_last_Java_frame()) {
-    _result = JVMTI_ERROR_NO_MORE_FRAMES;
+  _jvf = get_java_vframe();
+  NULL_CHECK(_jvf, false);
+
+  if (!check_slot_type(_jvf)) {
     return false;
   }
   return true;
 }
 
 void VM_GetOrSetLocal::doit() {
-  javaVFrame* jvf = get_java_vframe();
-
-  if (jvf == NULL) {
-    return;
-  }
-  if (!check_slot_type(jvf)) {
-    return;
-  }
-
   if (_set) {
     // Force deoptimization of frame if compiled because it's
     // possible the compiler emitted some locals as constant values,
     // meaning they are not mutable.
-    if (can_be_deoptimized(jvf)) {
+    if (can_be_deoptimized(_jvf)) {
 
       // Schedule deoptimization so that eventually the local
       // update will be written to an interpreter frame.
-      VM_DeoptimizeFrame deopt(jvf->thread(), jvf->fr().id());
+      VM_DeoptimizeFrame deopt(_jvf->thread(), _jvf->fr().id());
       VMThread::execute(&deopt);
 
       // Now store a new value for the local which will be applied
@@ -1048,32 +1073,36 @@
       // happens. The oop stored in the deferred local will be
       // gc'd on its own.
       if (_type == T_OBJECT) {
-	_value.l = (jobject) (*_obj)();
+        _value.l = (jobject) (JNIHandles::resolve_external_guard(_value.l));
       }
       // Re-read the vframe so we can see that it is deoptimized
       // [ Only need because of assert in update_local() ]
-      jvf = get_java_vframe();
-      ((compiledVFrame*)jvf)->update_local(_type, _index, _value);
-
+      _jvf = get_java_vframe();
+      ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
       return;
     }
-    StackValueCollection *locals = jvf->locals();
+    StackValueCollection *locals = _jvf->locals();
+    HandleMark hm;
 
     switch (_type) {
     case T_INT:    locals->set_int_at   (_index, _value.i); break;
     case T_LONG:   locals->set_long_at  (_index, _value.j); break;
     case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
     case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
-    case T_OBJECT: locals->set_obj_at   (_index, *_obj);    break;
+    case T_OBJECT: {
+      Handle ob_h(JNIHandles::resolve_external_guard(_value.l));
+      locals->set_obj_at (_index, ob_h);
+      break;
+    }
     default: ShouldNotReachHere();
     }
-    jvf->set_locals(locals);
+    _jvf->set_locals(locals);
   } else {
-    StackValueCollection *locals = jvf->locals();
+    StackValueCollection *locals = _jvf->locals();
 
     if (locals->at(_index)->type() == T_CONFLICT) {
       memset(&_value, 0, sizeof(_value));
-      _jobj_value = NULL;
+      _value.l = NULL;
       return;
     }
 
@@ -1086,7 +1115,7 @@
       // Wrap the oop to be returned in a local JNI handle since
       // oops_do() no longer applies after doit() is finished.
       oop obj = locals->obj_at(_index)();
-      _jobj_value = JNIHandles::make_local(_calling_thread, obj);
+      _value.l = JNIHandles::make_local(_calling_thread, obj);
       break;
     }
     default: ShouldNotReachHere();
--- a/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)jvmtiImpl.hpp	1.100 07/05/05 17:06:39 JVM"
+#pragma ident "@(#)jvmtiImpl.hpp	1.101 07/05/17 16:05:07 JVM"
 #endif
 /*
  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -420,8 +420,7 @@
   jint        _index;
   BasicType   _type;
   jvalue      _value;
-  Handle*     _obj;        // For setting objects
-  jobject     _jobj_value; // For getting objects
+  javaVFrame* _jvf;
   bool        _set;
 
   jvmtiError  _result;
@@ -434,18 +433,14 @@
   // Constructor for non-object getter
   VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type);
 
-  // Constructor for non-object setter
+  // Constructor for object or non-object setter
   VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, jvalue value);
 
   // Constructor for object getter
   VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth,
                    int index);
 
-  // Constructor for object setter
-  VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, Handle* value);
-
   jvalue value()       { return _value; }
-  jobject jobj_value() { return _jobj_value; }
   jvmtiError result()  { return _result; }
 
   bool doit_prologue();
@@ -453,6 +448,8 @@
   bool allow_nested_vm_operations() const;
   const char* name() const                       { return "get/set locals"; }
 
+  // Check that the klass is assignable to a type with the given signature.
+  static bool is_assignable(const char* ty_sign, Klass* klass, Thread* thread);
 };
 
 
--- a/hotspot/src/share/vm/prims/rawMonitor.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,113 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)rawMonitor.cpp	1.11 07/05/05 17:06:41 JVM"
-#endif
-/*
- * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_rawMonitor.cpp.incl"
-#ifdef JVMPI_SUPPORT
-// Platform independent support for RawMonitors for JVMPI
-
-//
-// class RawMonitor
-//
-
-RawMonitor::RawMonitor(const char *name, const int magic) {
-  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
-  _magic = magic;
-  _rmnext = NULL;
-  _rmprev = NULL;
-}
-
-RawMonitor::~RawMonitor() {
-  if (_rmprev != NULL || _rmnext != NULL) {
-     // monitor is in the list;
-     remove_from_locked_list();
-  }
-}
-
-// Note: destroy can be called while monitor is still busy.
-int RawMonitor::raw_destroy() {
-  if (_owner != NULL) {
-     return OM_ILLEGAL_MONITOR_STATE; 
-  }
-  _magic = 0;
-  if (_name) {
-      FreeHeap(_name);
-  }
-  if (_rmprev != NULL || _rmnext != NULL) {
-    // monitor is in the list
-    remove_from_locked_list();
-  }
-  delete this;
-  return OM_OK;
-}
-
-
-// Support for linked list of RawMonitors for JVMPI dumps
-// The last element in list has _rmnext == NULL
-// The first element in list has _rmprev == NULL;
-
-// No need to grab Threads_lock as we are locking in the changed thread
-
-// Call this when RawMonitor is locked by _owner
-void RawMonitor::add_to_locked_list() {
-  Thread* thr = (Thread *)_owner;
-  RawMonitor* first = thr->rawmonitor_list();
-  assert (_rmprev == NULL && _rmnext == NULL, "sanity check");
-  if (first != NULL) {
-    _rmnext = first;
-    first->_rmprev = this;
-  }
-  thr->set_rawmonitor_list(this);
-}
-
-
-void RawMonitor::remove_from_locked_list() {
-  Thread* thr = (Thread *)_owner;
-  if ((thr == NULL) || (_rmprev == NULL && _rmnext == NULL && this != thr->rawmonitor_list())) {
-    // This monitor was never added to the list
-    return;
-  }
-
-  if (_rmprev == NULL) {
-    // removing the head of list
-    assert(thr->rawmonitor_list() == this, "must be the first element");
-    thr->set_rawmonitor_list(_rmnext);
-    if (_rmnext != NULL) {
-      _rmnext->_rmprev = NULL;
-    }
-  } else {
-    assert(thr->rawmonitor_list() != this, "sanity check");
-    assert(_rmprev != NULL && _rmprev->_rmnext == this, "incorrect list structure");
-    assert(_rmnext == NULL || _rmnext->_rmprev == this, "incorrect list structure");
-    _rmprev->_rmnext = _rmnext;
-    if (_rmnext != NULL) {
-      _rmnext->_rmprev = _rmprev;
-    }
-  }
-  _rmnext = NULL; _rmprev = NULL; // removed
-}
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/share/vm/prims/rawMonitor.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)rawMonitor.hpp	1.13 07/05/05 17:06:40 JVM"
-#endif
-/*
- * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-//
-//
-// class RawMonitor
-// Used by all JVMPI RawMonitor methods: 
-//   (CreateRawMonitor, EnterRawMonitor, etc.)
-//
-// Wrapper for ObjectMonitor class that saves the Monitor's name
-// and links thread's owned raw monitors
-
-class RawMonitor: public ObjectMonitor {
-private:
-  int           _magic;
-  char *        _name;
-  // maintaining list of locked raw monitors
-  RawMonitor* _rmnext;    
-  RawMonitor* _rmprev;
-
-public:
-  RawMonitor(const char *name, const int magic);
-  ~RawMonitor();                             
-  int       magic()                         { return _magic;  }
-  void      print(outputStream& out)        { out.print(_name); }
-  RawMonitor* next_raw() const              { return _rmnext; }
-  const char *name() const                  { return _name; }
-  void add_to_locked_list();
-  void remove_from_locked_list();
-  int       raw_destroy();
-};
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)unsafe.cpp	1.63 07/05/05 17:06:35 JVM"
+#pragma ident "@(#)unsafe.cpp	1.64 07/05/17 16:05:09 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -488,23 +488,18 @@
     THROW(vmSymbols::java_lang_IllegalArgumentException());
   }
   char* p = (char*) addr_from_java(addr);
-  while ((uintptr_t)p % HeapWordSize && sz > 0) {
-    *p++ = (char) value;
-    sz--;
-  }
-  juint value_word = (juint)value & 0xFF;
-  if (value_word != 0) {
-    value_word |= (value_word << 8);
-    value_word |= (value_word << 16);
+  Copy::fill_to_memory_atomic(p, sz, value);
+UNSAFE_END
+
+UNSAFE_ENTRY(void, Unsafe_SetMemory2(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong size, jbyte value))
+  UnsafeWrapper("Unsafe_SetMemory");
+  size_t sz = (size_t)size;
+  if (sz != (julong)size || size < 0) {
+    THROW(vmSymbols::java_lang_IllegalArgumentException());
   }
-  size_t nw = sz / HeapWordSize;
-  Copy::fill_to_words((HeapWord*)p, nw, value_word);
-  sz -= nw * HeapWordSize;
-  p  += nw * HeapWordSize;
-  while (sz > 0) {
-    *p++ = (char) value;
-    sz--;
-  }
+  oop base = JNIHandles::resolve(obj);
+  void* p = index_oop_from_field_offset_long(base, offset);
+  Copy::fill_to_memory_atomic(p, sz, value);
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_CopyMemory(JNIEnv *env, jobject unsafe, jlong srcAddr, jlong dstAddr, jlong size))
@@ -516,12 +511,31 @@
   if (sz != (julong)size || size < 0) {
     THROW(vmSymbols::java_lang_IllegalArgumentException());
   }
-  if (sz == 0) {
+  void* src = addr_from_java(srcAddr);
+  void* dst = addr_from_java(dstAddr);
+  Copy::conjoint_memory_atomic(src, dst, sz);
+UNSAFE_END
+
+UNSAFE_ENTRY(void, Unsafe_CopyMemory2(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size))
+  UnsafeWrapper("Unsafe_CopyMemory");
+  if (size == 0) {
     return;
   }
-  void* src = addr_from_java(srcAddr);
-  void* dst = addr_from_java(dstAddr);
-  Copy::conjoint_bytes(src, dst, sz);
+  size_t sz = (size_t)size;
+  if (sz != (julong)size || size < 0) {
+    THROW(vmSymbols::java_lang_IllegalArgumentException());
+  }
+  oop srcp = JNIHandles::resolve(srcObj);
+  oop dstp = JNIHandles::resolve(dstObj);
+  if (dstp != NULL && !dstp->is_typeArray()) {
+    // NYI:  This works only for non-oop arrays at present.
+    // Generalizing it would be reasonable, but requires card marking.
+    // Also, autoboxing a Long from 0L in copyMemory(x,y, 0L,z, n) would be bad.
+    THROW(vmSymbols::java_lang_IllegalArgumentException());
+  }
+  void* src = index_oop_from_field_offset_long(srcp, srcOffset);
+  void* dst = index_oop_from_field_offset_long(dstp, dstOffset);
+  Copy::conjoint_memory_atomic(src, dst, sz);
 UNSAFE_END
 
 
@@ -1017,8 +1031,8 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-    {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-    {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
+//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
+//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
     {CC"fieldOffset",        CC"("FLD")I",               FN_PTR(Unsafe_FieldOffset)}, //deprecated
@@ -1065,8 +1079,8 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-    {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-    {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
+//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
+//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
     {CC"objectFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1118,8 +1132,8 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-    {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-    {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
+//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
+//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
     {CC"objectFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1176,8 +1190,8 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-    {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-    {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
+//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
+//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
     {CC"objectFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1214,6 +1228,28 @@
 
 };
 
+JNINativeMethod loadavg_method[] = {
+    {CC"getLoadAverage",            CC"([DI)I",                 FN_PTR(Unsafe_Loadavg)}
+};
+
+JNINativeMethod prefetch_methods[] = {
+    {CC"prefetchRead",       CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchRead)},
+    {CC"prefetchWrite",      CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchWrite)},
+    {CC"prefetchReadStatic", CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchRead)},
+    {CC"prefetchWriteStatic",CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchWrite)}
+};
+
+JNINativeMethod memcopy_methods[] = {
+    {CC"copyMemory",         CC"("OBJ"J"OBJ"JJ)V",       FN_PTR(Unsafe_CopyMemory2)},
+    {CC"setMemory",          CC"("OBJ"JJB)V",            FN_PTR(Unsafe_SetMemory2)}
+};
+
+JNINativeMethod memcopy_methods_15[] = {
+    {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
+    {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)}
+};
+
+
 #undef CC
 #undef FN_PTR
 
@@ -1242,10 +1278,6 @@
   {
     ThreadToNativeFromVM ttnfv(thread);
     {
-      JNINativeMethod loadavg_method[] = {
-//      {CC"getLoadAverage",            CC"([DI)I",                 FN_PTR(Unsafe_Loadavg)}
-        {(char*) "getLoadAverage", (char*) "([DI)I", CAST_FROM_FN_PTR(void*, &Unsafe_Loadavg)}
-      };
       env->RegisterNatives(unsafecls, loadavg_method, sizeof(loadavg_method)/sizeof(JNINativeMethod));
       if (env->ExceptionOccurred()) {
         if (PrintMiscellaneous && (Verbose || WizardMode)) {
@@ -1255,16 +1287,6 @@
       }
     }
     {
-      JNINativeMethod prefetch_methods[] = {
-//      {CC"prefetchRead",       CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchRead)},
-//      {CC"prefetchWrite",      CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchWrite)},
-//      {CC"prefetchReadStatic", CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchRead)},
-//      {CC"prefetchWriteStatic",CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchWrite)}
-        {(char*) "prefetchRead",        (char*) "(Ljava/lang/Object;J)V", CAST_FROM_FN_PTR(void*, &Unsafe_PrefetchRead)},
-        {(char*) "prefetchWrite",       (char*) "(Ljava/lang/Object;J)V", CAST_FROM_FN_PTR(void*, &Unsafe_PrefetchWrite)},
-        {(char*) "prefetchReadStatic",  (char*) "(Ljava/lang/Object;J)V", CAST_FROM_FN_PTR(void*, &Unsafe_PrefetchRead)},
-        {(char*) "prefetchWriteStatic", (char*) "(Ljava/lang/Object;J)V", CAST_FROM_FN_PTR(void*, &Unsafe_PrefetchWrite)}
-      };
       env->RegisterNatives(unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod));
       if (env->ExceptionOccurred()) {
         if (PrintMiscellaneous && (Verbose || WizardMode)) {
@@ -1273,6 +1295,22 @@
         env->ExceptionClear();
       }
     }
+    {
+      env->RegisterNatives(unsafecls, memcopy_methods, sizeof(memcopy_methods)/sizeof(JNINativeMethod));
+      if (env->ExceptionOccurred()) {
+        if (PrintMiscellaneous && (Verbose || WizardMode)) {
+          tty->print_cr("Warning:  SDK 1.7 Unsafe.copyMemory not found.");
+        }
+        env->ExceptionClear();
+        env->RegisterNatives(unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
+        if (env->ExceptionOccurred()) {
+          if (PrintMiscellaneous && (Verbose || WizardMode)) {
+            tty->print_cr("Warning:  SDK 1.5 Unsafe.copyMemory not found.");
+          }
+          env->ExceptionClear();
+        }
+      }
+    }
     int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod));
     if (env->ExceptionOccurred()) {
       if (PrintMiscellaneous && (Verbose || WizardMode)) {
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)arguments.cpp	1.327 07/05/05 17:06:43 JVM"
+#pragma ident "@(#)arguments.cpp	1.328 07/05/17 16:05:13 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -41,6 +41,7 @@
 bool   Arguments::_has_profile                  = false;
 bool   Arguments::_has_alloc_profile            = false;
 uintx  Arguments::_initial_heap_size            = 0;
+uintx  Arguments::_min_heap_size                = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
 const char*  Arguments::_java_vendor_url_bug    = DEFAULT_VENDOR_URL_BUG;
@@ -1043,6 +1044,8 @@
     size_t prev_initial_size = initial_heap_size();
     if (prev_initial_size != 0 && prev_initial_size < min_new+OldSize) {
       set_initial_heap_size(min_new+OldSize);
+      // Currently minimum size and the initial heap sizes are the same.
+      set_min_heap_size(initial_heap_size());
       if (PrintGCDetails && Verbose) {
         warning("Initial heap size increased to " SIZE_FORMAT " M from "
                 SIZE_FORMAT " M; use -XX:NewSize=... for finer control.",
@@ -1133,21 +1136,13 @@
         !UseParNewGC &&
         !DumpSharedSpaces &&
         FLAG_IS_DEFAULT(UseParallelGC)) {
-#ifdef JVMPI_SUPPORT
-      // Except if someone uses -Xrun to load JVMPI or hprof.
-      // This might be too conservative.
-      if (!init_libraries_at_startup()) {
-#endif // JVMPI_SUPPORT
-	if (should_auto_select_low_pause_collector()) {
-          FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
-          set_cms_and_parnew_gc_flags();
-	} else {
-          FLAG_SET_ERGO(bool, UseParallelGC, true);
-	}
-        no_shared_spaces();
-#ifdef JVMPI_SUPPORT
+      if (should_auto_select_low_pause_collector()) {
+        FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
+        set_cms_and_parnew_gc_flags();
+      } else {
+        FLAG_SET_ERGO(bool, UseParallelGC, true);
       }
-#endif // JVMPI_SUPPORT
+      no_shared_spaces();
     }
 
     // This is here because the parallel collector could
@@ -1170,17 +1165,16 @@
       const uint64_t reasonable_fraction = 
 	os::physical_memory() / DefaultMaxRAMFraction;
       const uint64_t maximum_size = (uint64_t) DefaultMaxRAM;
-      size_t reasonable_size = 
+      size_t reasonable_max = 
 	(size_t) os::allocatable_physical_memory(reasonable_fraction);
-      if (reasonable_size > maximum_size) {
-	reasonable_size = maximum_size;
+      if (reasonable_max > maximum_size) {
+	reasonable_max = maximum_size;
       }
       if (PrintGCDetails && Verbose) {
 	// Cannot use gclog_or_tty yet.
 	tty->print_cr("  Max heap size for server class platform "
-		      SIZE_FORMAT, reasonable_size);	
+		      SIZE_FORMAT, reasonable_max);	
       }
-      FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_size);
       // If the initial_heap_size has not been set with -Xms,
       // then set it as fraction of size of physical memory
       // respecting the maximum and minimum sizes of the heap.  
@@ -1190,14 +1184,23 @@
 	const size_t reasonable_initial = 
 	  (size_t) os::allocatable_physical_memory(reasonable_initial_fraction);
 	const size_t minimum_size = NewSize + OldSize;
-	set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_size),
+	set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max),
 				  minimum_size));
+        // Currently the minimum size and the initial heap sizes are the same.
+	set_min_heap_size(initial_heap_size());
 	if (PrintGCDetails && Verbose) {
 	  // Cannot use gclog_or_tty yet.
 	  tty->print_cr("  Initial heap size for server class platform "
 			SIZE_FORMAT, initial_heap_size());	
 	}
+      } else {
+	// An minimum size was specified on the command line.  Be sure
+	// that the maximum size is consistent.
+	if (initial_heap_size() > reasonable_max) {
+	  reasonable_max = initial_heap_size();
+	}
       }
+      FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max);
     }
 
     // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
@@ -1486,9 +1489,11 @@
   }
 
   // Note: only executed in non-PRODUCT mode
-  if (!UseAsyncConcMarkSweepGC && ExplicitGCInvokesConcurrent) {
+  if (!UseAsyncConcMarkSweepGC &&
+      (ExplicitGCInvokesConcurrent ||
+       ExplicitGCInvokesConcurrentAndUnloadsClasses)) {
     jio_fprintf(defaultStream::error_stream(),
-                "error: +ExplictGCInvokesConcurrent conflicts"
+                "error: +ExplictGCInvokesConcurrent[AndUnloadsClasses] conflicts"
                 " with -UseAsyncConcMarkSweepGC");
     status = false;
   }
@@ -1732,6 +1737,8 @@
         return JNI_EINVAL;
       }
       set_initial_heap_size((size_t) long_initial_heap_size);
+      // Currently the minimum size and the initial heap sizes are the same.
+      set_min_heap_size(initial_heap_size());
     // -Xmx
     } else if (match_option(option, "-Xmx", &tail)) {
       jlong long_max_heap_size = 0;
@@ -1980,6 +1987,8 @@
       if (FLAG_IS_DEFAULT(MaxHeapSize)) {
          FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize);
          set_initial_heap_size(MaxHeapSize);
+         // Currently the minimum size and the initial heap sizes are the same.
+         set_min_heap_size(initial_heap_size());
       }
       if (FLAG_IS_DEFAULT(NewSize)) {
          // Make the young generation 3/8ths of the total heap.
@@ -2047,6 +2056,11 @@
       // The last option must always win.
       FLAG_SET_CMDLINE(bool, NeverTenure, false);
       FLAG_SET_CMDLINE(bool, AlwaysTenure, true);
+    } else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled", &tail) ||
+               match_option(option, "-XX:-CMSPermGenSweepingEnabled", &tail)) {
+      jio_fprintf(defaultStream::error_stream(),
+        "Please use CMSClassUnloadingEnabled in place of "
+        "CMSPermGenSweepingEnabled in the future\n");
     } else if (match_option(option, "-XX:+UseGCTimeLimit", &tail)) {    
       FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, true);
       jio_fprintf(defaultStream::error_stream(),
@@ -2218,9 +2232,12 @@
     SOLARIS_ONLY(FLAG_SET_DEFAULT(UseMPSS, false));
     SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false));
   }
+#else
+  if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
+    FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
+  }
 #endif
 
-
   if (!check_vm_args_consistency()) {
     return JNI_ERR;
   }
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)arguments.hpp	1.101 07/05/05 17:06:42 JVM"
+#pragma ident "@(#)arguments.hpp	1.102 07/05/17 16:05:17 JVM"
 #endif
 /*
  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -258,6 +258,7 @@
   static bool   _has_alloc_profile;
   static const char*  _gc_log_filename;
   static uintx  _initial_heap_size;
+  static uintx  _min_heap_size;
 
   // -Xrun arguments
   static AgentLibraryList _libraryList;
@@ -429,6 +430,8 @@
   // -Xms , -Xmx
   static uintx initial_heap_size()          { return _initial_heap_size; }
   static void  set_initial_heap_size(uintx v) { _initial_heap_size = v;  }
+  static uintx min_heap_size()              { return _min_heap_size; }
+  static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
   // -Xrun
   static AgentLibrary* libraries()          { return _libraryList.first(); }
--- a/hotspot/src/share/vm/runtime/biasedLocking.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/biasedLocking.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)biasedLocking.cpp	1.13 07/05/05 17:06:43 JVM"
+#pragma ident "@(#)biasedLocking.cpp	1.14 07/05/17 16:05:19 JVM"
 #endif
 
 /*
@@ -30,12 +30,7 @@
 # include "incls/_biasedLocking.cpp.incl"
 
 static bool _biased_locking_enabled = false;
-static int _biased_lock_entry_count = 0;
-static int _anonymously_biased_lock_entry_count = 0;
-static int _rebiased_lock_entry_count = 0;
-static int _revoked_lock_entry_count = 0;
-static int _fast_path_entry_count = 0;
-static int _slow_path_entry_count = 0;
+BiasedLockingCounters BiasedLocking::_counters;
 
 static GrowableArray<Handle>*  _preserved_oop_stack  = NULL;
 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
@@ -731,18 +726,34 @@
 }
 
 
-int* BiasedLocking::biased_lock_entry_count_addr()             { return &_biased_lock_entry_count; }
-int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; }
-int* BiasedLocking::rebiased_lock_entry_count_addr()           { return &_rebiased_lock_entry_count; }
-int* BiasedLocking::revoked_lock_entry_count_addr()            { return &_revoked_lock_entry_count; }
-int* BiasedLocking::fast_path_entry_count_addr()               { return &_fast_path_entry_count; }
-int* BiasedLocking::slow_path_entry_count_addr()               { return &_slow_path_entry_count; }
+int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
+int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
+int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
+int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
+int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
+int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
+int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
+
+
+// BiasedLockingCounters
 
-void BiasedLocking::print_counters() {
+int BiasedLockingCounters::slow_path_entry_count() {
+  if (_slow_path_entry_count != 0) {
+    return _slow_path_entry_count;
+  }
+  int sum = _biased_lock_entry_count   + _anonymously_biased_lock_entry_count +
+            _rebiased_lock_entry_count + _revoked_lock_entry_count +
+            _fast_path_entry_count;
+
+  return _total_entry_count - sum;
+}
+
+void BiasedLockingCounters::print_on(outputStream* st) {
+  tty->print_cr("# total entries: %d", _total_entry_count);
   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
-  tty->print_cr("# slow path lock entries: %d", _slow_path_entry_count);
+  tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
 }
--- a/hotspot/src/share/vm/runtime/biasedLocking.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/biasedLocking.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)biasedLocking.hpp	1.10 07/05/05 17:06:43 JVM"
+#pragma ident "@(#)biasedLocking.hpp	1.11 07/05/17 16:05:21 JVM"
 #endif
 /*
  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -104,8 +104,50 @@
 // causes the bias to be revoked without reaching a safepoint or,
 // again, a bulk heap sweep.
 
+// Biased locking counters
+class BiasedLockingCounters VALUE_OBJ_CLASS_SPEC {
+ private:
+  int _total_entry_count;
+  int _biased_lock_entry_count;
+  int _anonymously_biased_lock_entry_count;
+  int _rebiased_lock_entry_count;
+  int _revoked_lock_entry_count;
+  int _fast_path_entry_count;
+  int _slow_path_entry_count;
+
+ public:
+  BiasedLockingCounters() :
+    _total_entry_count(0),
+    _biased_lock_entry_count(0),
+    _anonymously_biased_lock_entry_count(0),
+    _rebiased_lock_entry_count(0),
+    _revoked_lock_entry_count(0),
+    _fast_path_entry_count(0),
+    _slow_path_entry_count(0) {}
+
+  int slow_path_entry_count(); // Compute this field if necessary
+
+  int* total_entry_count_addr()                   { return &_total_entry_count; }
+  int* biased_lock_entry_count_addr()             { return &_biased_lock_entry_count; }
+  int* anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; }
+  int* rebiased_lock_entry_count_addr()           { return &_rebiased_lock_entry_count; }
+  int* revoked_lock_entry_count_addr()            { return &_revoked_lock_entry_count; }
+  int* fast_path_entry_count_addr()               { return &_fast_path_entry_count; }
+  int* slow_path_entry_count_addr()               { return &_slow_path_entry_count; }
+
+  bool nonzero() { return _total_entry_count > 0; }
+
+  void print_on(outputStream* st);
+  void print() { print_on(tty); }
+};
+
+
 class BiasedLocking : AllStatic {
+private:
+  static BiasedLockingCounters _counters;
+
 public:
+  static int* total_entry_count_addr();
   static int* biased_lock_entry_count_addr();
   static int* anonymously_biased_lock_entry_count_addr();
   static int* rebiased_lock_entry_count_addr();
@@ -137,7 +179,8 @@
   static void revoke_at_safepoint(Handle obj);
   static void revoke_at_safepoint(GrowableArray<Handle>* objs);
 
-  static void print_counters();
+  static void print_counters() { _counters.print(); }
+  static BiasedLockingCounters* counters() { return &_counters; }
 
   // These routines are GC-related and should not be called by end
   // users. GCs which do not do preservation of mark words do not need
--- a/hotspot/src/share/vm/runtime/classFileError.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)classFileError.cpp	1.12 07/05/05 17:06:44 JVM"
-#endif
-/*
- * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_classFileError.cpp.incl"
-
-// Keep these in a separate file to prevent inlining
-
-void ClassFileParser::classfile_parse_error(const char* msg, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
-                       msg, _class_name->as_C_string());
-}
-
-void ClassFileParser::classfile_parse_error(const char* msg, int index, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
-                       msg, index, _class_name->as_C_string());
-}
-
-void ClassFileParser::classfile_parse_error(const char* msg, const char *name, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
-                       msg, name, _class_name->as_C_string());
-}
-
-void ClassFileParser::classfile_parse_error(const char* msg, int index, const char *name, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbolHandles::java_lang_ClassFormatError(),
-                       msg, index, name, _class_name->as_C_string());
-}
-
-void StackMapStream::stackmap_format_error(const char* msg, TRAPS) {
-  ResourceMark rm(THREAD);
-  Exceptions::fthrow(
-    THREAD_AND_LOCATION,
-    vmSymbolHandles::java_lang_ClassFormatError(),
-    "StackMapTable format error: %s", msg
-  );
-}
--- a/hotspot/src/share/vm/runtime/classFileParser.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4033 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)classFileParser.cpp	1.277 07/05/05 17:06:46 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_classFileParser.cpp.incl"
-
-// We generally try to create the oops directly when parsing, rather than allocating
-// temporary data structures and copying the bytes twice. A temporary area is only
-// needed when parsing utf8 entries in the constant pool and when parsing line number
-// tables.
-
-// We add assert in debug mode when class format is not checked.
-
-#define JAVA_CLASSFILE_MAGIC              0xCAFEBABE
-#define JAVA_MIN_SUPPORTED_VERSION        45
-#define JAVA_MAX_SUPPORTED_VERSION        50
-#define JAVA_MAX_SUPPORTED_MINOR_VERSION  0
-
-// Used for two backward compatibility reasons:
-// - to check for new additions to the class file format in JDK1.5
-// - to check for bug fixes in the format checker in JDK1.5
-#define JAVA_1_5_VERSION                  49
-
-// Used for backward compatibility reasons:
-// - to check for javac bug fixes that happened after 1.5
-#define JAVA_6_VERSION                    50 
-
-
-void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS) {
-  // Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
-  // this function (_current can be allocated in a register, with scalar
-  // replacement of aggregates). The _current pointer is copied back to
-  // stream() when this function returns. DON'T call another method within
-  // this method that uses stream().
-  ClassFileStream* cfs0 = stream();
-  ClassFileStream cfs1 = *cfs0;
-  ClassFileStream* cfs = &cfs1;
-#ifdef ASSERT
-  u1* old_current = cfs0->current();
-#endif
-
-  // Used for batching symbol allocations.
-  const char* names[SymbolTable::symbol_alloc_batch_size];
-  int lengths[SymbolTable::symbol_alloc_batch_size];
-  int indices[SymbolTable::symbol_alloc_batch_size];
-  unsigned int hashValues[SymbolTable::symbol_alloc_batch_size];
-  int names_count = 0;
-    
-  // parsing  Index 0 is unused
-  for (int index = 1; index < length; index++) {
-    // Each of the following case guarantees one more byte in the stream
-    // for the following tag or the access_flags following constant pool,
-    // so we don't need bounds-check for reading tag.
-    u1 tag = cfs->get_u1_fast();
-    switch (tag) {
-      case JVM_CONSTANT_Class :
-        {
-          cfs->guarantee_more(3, CHECK);  // name_index, tag/access_flags
-          u2 name_index = cfs->get_u2_fast();
-          cp->klass_index_at_put(index, name_index);
-        }
-        break;
-      case JVM_CONSTANT_Fieldref :
-        {
-          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
-          u2 class_index = cfs->get_u2_fast();
-          u2 name_and_type_index = cfs->get_u2_fast();
-          cp->field_at_put(index, class_index, name_and_type_index);
-        }
-        break;
-      case JVM_CONSTANT_Methodref :
-        {
-          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
-          u2 class_index = cfs->get_u2_fast();
-          u2 name_and_type_index = cfs->get_u2_fast();
-          cp->method_at_put(index, class_index, name_and_type_index);
-        }
-        break;
-      case JVM_CONSTANT_InterfaceMethodref :
-        {
-          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
-          u2 class_index = cfs->get_u2_fast();
-          u2 name_and_type_index = cfs->get_u2_fast();
-          cp->interface_method_at_put(index, class_index, name_and_type_index);
-        }
-        break;
-      case JVM_CONSTANT_String :
-        {
-          cfs->guarantee_more(3, CHECK);  // string_index, tag/access_flags
-          u2 string_index = cfs->get_u2_fast();
-          cp->string_index_at_put(index, string_index);
-        }
-        break;
-      case JVM_CONSTANT_Integer :
-        {
-          cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
-          u4 bytes = cfs->get_u4_fast();
-          cp->int_at_put(index, (jint) bytes);
-        }
-        break;
-      case JVM_CONSTANT_Float :
-        {
-          cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
-          u4 bytes = cfs->get_u4_fast();
-          cp->float_at_put(index, *(jfloat*)&bytes);
-        }
-        break;
-      case JVM_CONSTANT_Long :
-        // A mangled type might cause you to overrun allocated memory
-        guarantee_property(index+1 < length, 
-                           "Invalid constant pool entry %u in class file %s", 
-                           index, CHECK);
-        {
-          cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
-          u8 bytes = cfs->get_u8_fast();
-          cp->long_at_put(index, bytes);
-        }
-        index++;   // Skip entry following eigth-byte constant, see JVM book p. 98
-        break;
-      case JVM_CONSTANT_Double :
-        // A mangled type might cause you to overrun allocated memory
-        guarantee_property(index+1 < length, 
-                           "Invalid constant pool entry %u in class file %s", 
-                           index, CHECK);
-        {
-          cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
-          u8 bytes = cfs->get_u8_fast();
-          cp->double_at_put(index, *(jdouble*)&bytes);
-        }
-        index++;   // Skip entry following eigth-byte constant, see JVM book p. 98
-        break;
-      case JVM_CONSTANT_NameAndType :
-        {
-          cfs->guarantee_more(5, CHECK);  // name_index, signature_index, tag/access_flags
-          u2 name_index = cfs->get_u2_fast();
-          u2 signature_index = cfs->get_u2_fast();
-          cp->name_and_type_at_put(index, name_index, signature_index);
-        }
-        break;
-      case JVM_CONSTANT_Utf8 :
-        {
-          cfs->guarantee_more(2, CHECK);  // utf8_length
-          u2  utf8_length = cfs->get_u2_fast();
-          u1* utf8_buffer = cfs->get_u1_buffer();
-          assert(utf8_buffer != NULL, "null utf8 buffer");
-          // Got utf8 string, guarantee utf8_length+1 bytes, set stream position forward.
-          cfs->guarantee_more(utf8_length+1, CHECK);  // utf8 string, tag/access_flags
-          cfs->skip_u1_fast(utf8_length);
-          // Before storing the symbol, make sure it's legal
-          if (_need_verify) {
-            verify_legal_utf8((unsigned char*)utf8_buffer, utf8_length, CHECK);
-          }
-
-          unsigned int hash;
-          symbolOop result = SymbolTable::lookup_only((char*)utf8_buffer, utf8_length, hash);
-          if (result == NULL) {
-            names[names_count] = (char*)utf8_buffer;
-            lengths[names_count] = utf8_length;
-            indices[names_count] = index;
-            hashValues[names_count++] = hash;
-            if (names_count == SymbolTable::symbol_alloc_batch_size) {
-              oopFactory::new_symbols(cp, names_count, names, lengths, indices, hashValues, CHECK);
-              names_count = 0;
-            }
-          } else {
-            cp->symbol_at_put(index, result);
-          }
-        }
-        break;
-      default:
-        classfile_parse_error(
-          "Unknown constant tag %u in class file %s", tag, CHECK);
-        break;
-    }
-  }
-
-  // Allocate the remaining symbols
-  if (names_count > 0) {
-    oopFactory::new_symbols(cp, names_count, names, lengths, indices, hashValues, CHECK);
-  }
-
-  // Copy _current pointer of local copy back to stream().
-#ifdef ASSERT
-  assert(cfs0->current() == old_current, "non-exclusive use of stream()");
-#endif
-  cfs0->set_current(cfs1.current());
-}
-
-bool inline valid_cp_range(int index, int length) { return (index > 0 && index < length); }
-
-constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
-  ClassFileStream* cfs = stream();
-  constantPoolHandle nullHandle;
-
-  cfs->guarantee_more(3, CHECK_(nullHandle)); // length, first cp tag
-  u2 length = cfs->get_u2_fast();
-  guarantee_property(
-    length >= 1, "Illegal constant pool size %u in class file %s", 
-    length, CHECK_(nullHandle));
-  constantPoolOop constant_pool =
-                      oopFactory::new_constantPool(length, CHECK_(nullHandle));
-  constantPoolHandle cp (THREAD, constant_pool);
-  
-  cp->set_partially_loaded();    // Enables heap verify to work on partial constantPoolOops
-
-  // parsing constant pool entries
-  parse_constant_pool_entries(cp, length, CHECK_(nullHandle));
-
-  int index = 1;  // declared outside of loops for portability
-
-  // first verification pass - validate cross references and fixup class and string constants
-  for (index = 1; index < length; index++) {          // Index 0 is unused
-    switch (cp->tag_at(index).value()) {
-      case JVM_CONSTANT_Class :
-        ShouldNotReachHere();     // Only JVM_CONSTANT_ClassIndex should be present
-        break;
-      case JVM_CONSTANT_Fieldref :
-        // fall through
-      case JVM_CONSTANT_Methodref :
-        // fall through
-      case JVM_CONSTANT_InterfaceMethodref : {
-        if (!_need_verify) break;
-        int klass_ref_index = cp->klass_ref_index_at(index);
-        int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
-        check_property(valid_cp_range(klass_ref_index, length) &&
-                       cp->tag_at(klass_ref_index).is_klass_reference(), 
-                       "Invalid constant pool index %u in class file %s", 
-                       klass_ref_index, 
-                       CHECK_(nullHandle));
-        check_property(valid_cp_range(name_and_type_ref_index, length) &&
-                       cp->tag_at(name_and_type_ref_index).is_name_and_type(), 
-                       "Invalid constant pool index %u in class file %s", 
-                       name_and_type_ref_index,
-                       CHECK_(nullHandle));
-        break;
-      }
-      case JVM_CONSTANT_String :
-        ShouldNotReachHere();     // Only JVM_CONSTANT_StringIndex should be present
-        break;
-      case JVM_CONSTANT_Integer :
-        break;
-      case JVM_CONSTANT_Float :
-        break;
-      case JVM_CONSTANT_Long :
-      case JVM_CONSTANT_Double :
-        index++;
-        check_property(
-          (index < length && cp->tag_at(index).is_invalid()), 
-          "Improper constant pool long/double index %u in class file %s", 
-          index, CHECK_(nullHandle));
-        break;
-      case JVM_CONSTANT_NameAndType : {
-        if (!_need_verify) break;
-        int name_ref_index = cp->name_ref_index_at(index);
-        int signature_ref_index = cp->signature_ref_index_at(index);
-        check_property(
-          valid_cp_range(name_ref_index, length) && 
-            cp->tag_at(name_ref_index).is_utf8(), 
-          "Invalid constant pool index %u in class file %s", 
-          name_ref_index, CHECK_(nullHandle));
-        check_property(
-          valid_cp_range(signature_ref_index, length) && 
-            cp->tag_at(signature_ref_index).is_utf8(), 
-          "Invalid constant pool index %u in class file %s", 
-          signature_ref_index, CHECK_(nullHandle));
-        break;
-      }
-      case JVM_CONSTANT_Utf8 :
-        break;
-      case JVM_CONSTANT_UnresolvedClass :	  // fall-through
-      case JVM_CONSTANT_UnresolvedClassInError:
-        ShouldNotReachHere();     // Only JVM_CONSTANT_ClassIndex should be present
-        break;
-      case JVM_CONSTANT_ClassIndex :
-        {
-          int class_index = cp->klass_index_at(index);
-          check_property(
-            valid_cp_range(class_index, length) && 
-              cp->tag_at(class_index).is_utf8(), 
-            "Invalid constant pool index %u in class file %s", 
-            class_index, CHECK_(nullHandle));
-          cp->unresolved_klass_at_put(index, cp->symbol_at(class_index));
-        }
-        break;
-      case JVM_CONSTANT_UnresolvedString :
-        ShouldNotReachHere();     // Only JVM_CONSTANT_StringIndex should be present
-        break;
-      case JVM_CONSTANT_StringIndex :
-        {
-          int string_index = cp->string_index_at(index);
-          check_property(
-            valid_cp_range(string_index, length) && 
-              cp->tag_at(string_index).is_utf8(), 
-            "Invalid constant pool index %u in class file %s", 
-            string_index, CHECK_(nullHandle));
-          symbolOop sym = cp->symbol_at(string_index);
-          cp->unresolved_string_at_put(index, sym);
-        }
-        break;
-      default:
-        fatal1("bad constant pool tag value %u", cp->tag_at(index).value());
-        ShouldNotReachHere();
-        break;
-    } // end of switch
-  } // end of for
-
-  if (!_need_verify) {
-    return cp;
-  }
-
-  // second verification pass - checks the strings are of the right format.
-  for (index = 1; index < length; index++) {
-    jbyte tag = cp->tag_at(index).value();
-    switch (tag) {
-      case JVM_CONSTANT_UnresolvedClass: {
-        symbolHandle class_name(THREAD, cp->unresolved_klass_at(index));
-        verify_legal_class_name(class_name, CHECK_(nullHandle));
-        break;
-      }
-      case JVM_CONSTANT_Fieldref:
-      case JVM_CONSTANT_Methodref:
-      case JVM_CONSTANT_InterfaceMethodref: {
-        int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
-        // already verified to be utf8
-        int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);  
-        // already verified to be utf8
-        int signature_ref_index = cp->signature_ref_index_at(name_and_type_ref_index); 
-        symbolHandle name(THREAD, cp->symbol_at(name_ref_index));
-        symbolHandle signature(THREAD, cp->symbol_at(signature_ref_index));
-        if (tag == JVM_CONSTANT_Fieldref) {
-          verify_legal_field_name(name, CHECK_(nullHandle));
-          verify_legal_field_signature(name, signature, CHECK_(nullHandle));
-        } else {
-          verify_legal_method_name(name, CHECK_(nullHandle));
-          verify_legal_method_signature(name, signature, CHECK_(nullHandle));
-          if (tag == JVM_CONSTANT_Methodref) {
-            // 4509014: If a class method name begins with '<', it must be "<init>".
-            assert(!name.is_null(), "method name in constant pool is null");
-            unsigned int name_len = name->utf8_length();
-            assert(name_len > 0, "bad method name");  // already verified as legal name
-            if (name->byte_at(0) == '<') {
-              if (name() != vmSymbols::object_initializer_name()) {
-                classfile_parse_error(
-                  "Bad method name at constant pool index %u in class file %s", 
-                  name_ref_index, CHECK_(nullHandle));
-              }
-            }
-          }
-        }
-        break;
-      }                                                  
-    }  // end of switch
-  }  // end of for
-  
-  return cp;
-}
-
-
-class NameSigHash: public ResourceObj {
- public:
-  symbolOop     _name;       // name
-  symbolOop     _sig;        // signature
-  NameSigHash*  _next;       // Next entry in hash table
-};
-
-
-#define HASH_ROW_SIZE 256
-
-unsigned int hash(symbolOop name, symbolOop sig) {
-  unsigned int raw_hash = 0;
-  raw_hash += ((unsigned int)(uintptr_t)name) >> (LogHeapWordSize + 2);
-  raw_hash += ((unsigned int)(uintptr_t)sig) >> LogHeapWordSize;
-
-  return (raw_hash + (unsigned int)(uintptr_t)name) % HASH_ROW_SIZE;
-}
-
-
-void initialize_hashtable(NameSigHash** table) {
-  memset((void*)table, 0, sizeof(NameSigHash*) * HASH_ROW_SIZE);
-}
-
-// Return false if the name/sig combination is found in table.
-// Return true if no duplicate is found. And name/sig is added as a new entry in table.
-// The old format checker uses heap sort to find duplicates.
-// NOTE: caller should guarantee that GC doesn't happen during the life cycle
-// of table since we don't expect symbolOop's to move.
-bool put_after_lookup(symbolOop name, symbolOop sig, NameSigHash** table) {
-  assert(name != NULL, "name in constant pool is NULL");
-
-  // First lookup for duplicates
-  int index = hash(name, sig);
-  NameSigHash* entry = table[index];
-  while (entry != NULL) {
-    if (entry->_name == name && entry->_sig == sig) {
-      return false;
-    }
-    entry = entry->_next;
-  }
-
-  // No duplicate is found, allocate a new entry and fill it.
-  entry = new NameSigHash();
-  entry->_name = name;
-  entry->_sig = sig;
- 
-  // Insert into hash table
-  entry->_next = table[index];
-  table[index] = entry;
-
-  return true;
-}
-
-
-objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp,
-                                                 int length,
-                                                 Handle class_loader, 
-                                                 Handle protection_domain,
-                                                 PerfTraceTime* vmtimer,
-                                                 symbolHandle class_name,
-                                                 TRAPS) {  
-  ClassFileStream* cfs = stream();
-  assert(length > 0, "only called for length>0");
-  objArrayHandle nullHandle;
-  objArrayOop interface_oop = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
-  objArrayHandle interfaces (THREAD, interface_oop);
-
-  int index;
-  for (index = 0; index < length; index++) {
-    u2 interface_index = cfs->get_u2(CHECK_(nullHandle));
-    check_property(
-      valid_cp_range(interface_index, cp->length()) && 
-        cp->tag_at(interface_index).is_unresolved_klass(), 
-      "Interface name has bad constant pool index %u in class file %s", 
-      interface_index, CHECK_(nullHandle));
-    symbolHandle unresolved_klass (THREAD, cp->klass_name_at(interface_index));
-
-    // Don't need to check legal name because it's checked when parsing constant pool.
-    // But need to make sure it's not an array type.
-    guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY, 
-                       "Bad interface name in class file %s", CHECK_(nullHandle));
-
-    vmtimer->suspend();  // do not count recursive loading twice
-    // Call resolve_super so classcircularity is checked
-    klassOop k = SystemDictionary::resolve_super_or_fail(class_name,
-                  unresolved_klass, class_loader, protection_domain, 
-                  false, CHECK_(nullHandle));
-    KlassHandle interf (THREAD, k);
-    vmtimer->resume();
-
-    if (!Klass::cast(interf())->is_interface()) {
-      THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", nullHandle);
-    }
-    interfaces->obj_at_put(index, interf());
-  }
-
-  if (!_need_verify || length <= 1) {
-    return interfaces;
-  }
-
-  // Check if there's any duplicates in interfaces
-  ResourceMark rm(THREAD);
-  NameSigHash** interface_names = NEW_RESOURCE_ARRAY_IN_THREAD(
-    THREAD, NameSigHash*, HASH_ROW_SIZE);
-  initialize_hashtable(interface_names);
-  bool dup = false;
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-    for (index = 0; index < length; index++) {
-      klassOop k = (klassOop)interfaces->obj_at(index);
-      symbolOop name = instanceKlass::cast(k)->name();
-      // If no duplicates, add (name, NULL) in hashtable interface_names.
-      if (!put_after_lookup(name, NULL, interface_names)) {
-        dup = true;
-        break;
-      }
-    }
-  }
-  if (dup) {
-    classfile_parse_error("Duplicate interface name in class file %s",
-                          CHECK_(nullHandle));
-  }
-
-  return interfaces;
-}
-
-
-void ClassFileParser::verify_constantvalue(int constantvalue_index, int signature_index, constantPoolHandle cp, TRAPS) {
-  // Make sure the constant pool entry is of a type appropriate to this field
-  guarantee_property(
-    (constantvalue_index > 0 && 
-      constantvalue_index < cp->length()), 
-    "Bad initial value index %u in ConstantValue attribute in class file %s", 
-    constantvalue_index, CHECK); 
-  constantTag value_type = cp->tag_at(constantvalue_index);
-  switch ( cp->basic_type_for_signature_at(signature_index) ) {
-    case T_LONG:
-      guarantee_property(value_type.is_long(), "Inconsistent constant value type in class file %s", CHECK);
-      break;
-    case T_FLOAT:
-      guarantee_property(value_type.is_float(), "Inconsistent constant value type in class file %s", CHECK);
-      break;
-    case T_DOUBLE:
-      guarantee_property(value_type.is_double(), "Inconsistent constant value type in class file %s", CHECK);
-      break;
-    case T_BYTE: case T_CHAR: case T_SHORT: case T_BOOLEAN: case T_INT:
-      guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
-      break;
-    case T_OBJECT: 
-      guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;", 18) 
-                         && (value_type.is_string() || value_type.is_unresolved_string())),
-                         "Bad string initial value in class file %s", CHECK);
-      break;
-    default:
-      classfile_parse_error(
-        "Unable to set initial value %u in class file %s", 
-        constantvalue_index, CHECK);
-  }
-}
-
-
-// Parse attributes for a field.
-void ClassFileParser::parse_field_attributes(constantPoolHandle cp,
-                                             u2 attributes_count,
-                                             bool is_static, u2 signature_index,
-                                             u2* constantvalue_index_addr,
-                                             bool* is_synthetic_addr,
-                                             u2* generic_signature_index_addr,
-                                             typeArrayHandle* field_annotations,
-                                             TRAPS) {
-  ClassFileStream* cfs = stream();
-  assert(attributes_count > 0, "length should be greater than 0");
-  u2 constantvalue_index = 0;
-  u2 generic_signature_index = 0;
-  bool is_synthetic = false;
-  u1* runtime_visible_annotations = NULL;
-  int runtime_visible_annotations_length = 0;
-  u1* runtime_invisible_annotations = NULL;
-  int runtime_invisible_annotations_length = 0;
-  while (attributes_count--) {
-    cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
-    u2 attribute_name_index = cfs->get_u2_fast();
-    u4 attribute_length = cfs->get_u4_fast();
-    check_property(valid_cp_range(attribute_name_index, cp->length()) &&
-                   cp->tag_at(attribute_name_index).is_utf8(), 
-                   "Invalid field attribute index %u in class file %s", 
-                   attribute_name_index,
-                   CHECK);
-    symbolOop attribute_name = cp->symbol_at(attribute_name_index);
-    if (is_static && attribute_name == vmSymbols::tag_constant_value()) { 
-      // ignore if non-static   
-      if (constantvalue_index != 0) {
-        classfile_parse_error("Duplicate ConstantValue attribute in class file %s", CHECK);
-      }
-      check_property(
-        attribute_length == 2, 
-        "Invalid ConstantValue field attribute length %u in class file %s", 
-        attribute_length, CHECK);
-      constantvalue_index = cfs->get_u2(CHECK);
-      if (_need_verify) { 
-        verify_constantvalue(constantvalue_index, signature_index, cp, CHECK); 
-      }
-    } else if (attribute_name == vmSymbols::tag_synthetic()) {
-      if (attribute_length != 0) {
-        classfile_parse_error(
-          "Invalid Synthetic field attribute length %u in class file %s", 
-          attribute_length, CHECK);
-      }
-      is_synthetic = true;
-    } else if (attribute_name == vmSymbols::tag_deprecated()) { // 4276120
-      if (attribute_length != 0) {
-        classfile_parse_error(
-          "Invalid Deprecated field attribute length %u in class file %s", 
-          attribute_length, CHECK);
-      }
-    } else if (_major_version >= JAVA_1_5_VERSION) {
-      if (attribute_name == vmSymbols::tag_signature()) {
-        if (attribute_length != 2) {
-          classfile_parse_error(
-            "Wrong size %u for field's Signature attribute in class file %s", 
-            attribute_length, CHECK);
-        }
-        generic_signature_index = cfs->get_u2(CHECK);
-      } else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
-        runtime_visible_annotations_length = attribute_length;
-        runtime_visible_annotations = cfs->get_u1_buffer();
-        assert(runtime_visible_annotations != NULL, "null visible annotations");
-        cfs->skip_u1(runtime_visible_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
-        runtime_invisible_annotations_length = attribute_length;
-        runtime_invisible_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_annotations != NULL, "null invisible annotations");
-        cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
-      } else {
-        cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
-      }
-    } else {
-      cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes			
-    }
-  }
-
-  *constantvalue_index_addr = constantvalue_index;
-  *is_synthetic_addr = is_synthetic;
-  *generic_signature_index_addr = generic_signature_index;
-  *field_annotations = assemble_annotations(runtime_visible_annotations,
-                                            runtime_visible_annotations_length,
-                                            runtime_invisible_annotations,
-                                            runtime_invisible_annotations_length,
-                                            CHECK);
-  return;
-}
-  
-
-// Field allocation types. Used for computing field offsets.
-
-enum FieldAllocationType {
-  STATIC_OOP,		// Oops
-  STATIC_BYTE,		// Boolean, Byte, char
-  STATIC_SHORT,		// shorts
-  STATIC_WORD,		// ints
-  STATIC_DOUBLE,	// long or double
-  STATIC_ALIGNED_DOUBLE,// aligned long or double
-  NONSTATIC_OOP,	 
-  NONSTATIC_BYTE,
-  NONSTATIC_SHORT,
-  NONSTATIC_WORD,
-  NONSTATIC_DOUBLE,
-  NONSTATIC_ALIGNED_DOUBLE
-};
-
-
-struct FieldAllocationCount {
-  int static_oop_count;
-  int static_byte_count;
-  int static_short_count;
-  int static_word_count;
-  int static_double_count;
-  int nonstatic_oop_count;
-  int nonstatic_byte_count;
-  int nonstatic_short_count;
-  int nonstatic_word_count;
-  int nonstatic_double_count;
-};
-
-typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface, 
-                                              struct FieldAllocationCount *fac,
-                                              objArrayHandle* fields_annotations, TRAPS) {
-  ClassFileStream* cfs = stream();
-  typeArrayHandle nullHandle;
-  cfs->guarantee_more(2, CHECK_(nullHandle));  // length
-  u2 length = cfs->get_u2_fast();
-  // Tuples of shorts [access, name index, sig index, initial value index, byte offset, generic signature index]
-  typeArrayOop new_fields = oopFactory::new_permanent_shortArray(length*instanceKlass::next_offset, CHECK_(nullHandle));
-  typeArrayHandle fields(THREAD, new_fields);
- 
-  int index = 0;
-  typeArrayHandle field_annotations;
-  for (int n = 0; n < length; n++) {
-    cfs->guarantee_more(8, CHECK_(nullHandle));  // access_flags, name_index, descriptor_index, attributes_count
-
-    AccessFlags access_flags;
-    jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_FIELD_MODIFIERS;
-    verify_legal_field_modifiers(flags, is_interface, CHECK_(nullHandle));
-    access_flags.set_flags(flags);
-
-    u2 name_index = cfs->get_u2_fast();
-    int cp_size = cp->length();
-    check_property(
-      valid_cp_range(name_index, cp_size) && cp->tag_at(name_index).is_utf8(), 
-      "Invalid constant pool index %u for field name in class file %s", 
-      name_index, CHECK_(nullHandle));
-    symbolHandle name(THREAD, cp->symbol_at(name_index));
-    verify_legal_field_name(name, CHECK_(nullHandle));
-
-    u2 signature_index = cfs->get_u2_fast();
-    check_property(
-      valid_cp_range(signature_index, cp_size) && 
-        cp->tag_at(signature_index).is_utf8(), 
-      "Invalid constant pool index %u for field signature in class file %s", 
-      signature_index, CHECK_(nullHandle));
-    symbolHandle sig(THREAD, cp->symbol_at(signature_index));
-    verify_legal_field_signature(name, sig, CHECK_(nullHandle));
-
-    u2 constantvalue_index = 0;
-    bool is_synthetic = false;
-    u2 generic_signature_index = 0;
-    bool is_static = access_flags.is_static();
-
-    u2 attributes_count = cfs->get_u2_fast();
-    if (attributes_count > 0) {
-      parse_field_attributes(cp, attributes_count, is_static, signature_index,
-                             &constantvalue_index, &is_synthetic,
-                             &generic_signature_index, &field_annotations,
-                             CHECK_(nullHandle));
-      if (field_annotations.not_null()) {
-        if (fields_annotations->is_null()) {
-          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
-          *fields_annotations = objArrayHandle(THREAD, md);
-        }
-        (*fields_annotations)->obj_at_put(n, field_annotations());
-      }
-      if (is_synthetic) {
-        access_flags.set_is_synthetic();
-      }
-    }
-    
-    fields->short_at_put(index++, access_flags.as_short());
-    fields->short_at_put(index++, name_index);
-    fields->short_at_put(index++, signature_index);
-    fields->short_at_put(index++, constantvalue_index);	
-
-    // Remember how many oops we encountered and compute allocation type
-    BasicType type = cp->basic_type_for_signature_at(signature_index);
-    FieldAllocationType atype;
-    if ( is_static ) {
-      switch ( type ) {
-        case  T_BOOLEAN:
-        case  T_BYTE:
-          fac->static_byte_count++;
-          atype = STATIC_BYTE;
-          break;
-        case  T_LONG:
-        case  T_DOUBLE:
-          if (Universe::field_type_should_be_aligned(type)) {
-            atype = STATIC_ALIGNED_DOUBLE;
-          } else {
-            atype = STATIC_DOUBLE;
-          }
-          fac->static_double_count++;
-          break;
-        case  T_CHAR:     
-        case  T_SHORT: 
-          fac->static_short_count++;
-          atype = STATIC_SHORT;
-          break;
-        case  T_FLOAT:
-        case  T_INT:
-          fac->static_word_count++;
-          atype = STATIC_WORD;
-          break;
-        case  T_ARRAY: 
-        case  T_OBJECT:
-          fac->static_oop_count++;
-          atype = STATIC_OOP;
-          break;
-        case  T_ADDRESS: 
-        case  T_VOID:
-        default: 
-          assert(0, "bad field type");
-      }
-    } else {
-      switch ( type ) {
-        case  T_BOOLEAN:
-        case  T_BYTE:
-          fac->nonstatic_byte_count++;
-          atype = NONSTATIC_BYTE;
-          break;
-        case  T_LONG:
-        case  T_DOUBLE:
-          if (Universe::field_type_should_be_aligned(type)) {
-            atype = NONSTATIC_ALIGNED_DOUBLE;
-          } else {
-            atype = NONSTATIC_DOUBLE;
-          }
-          fac->nonstatic_double_count++;
-          break;
-        case  T_CHAR:     
-        case  T_SHORT: 
-          fac->nonstatic_short_count++;
-          atype = NONSTATIC_SHORT;
-          break;
-        case  T_FLOAT:
-        case  T_INT:
-          fac->nonstatic_word_count++;
-          atype = NONSTATIC_WORD;
-          break;
-        case  T_ARRAY: 
-        case  T_OBJECT:
-          fac->nonstatic_oop_count++;
-          atype = NONSTATIC_OOP;
-          break;
-        case  T_ADDRESS: 
-        case  T_VOID:
-        default: 
-          assert(0, "bad field type");
-      }
-    }
-
-    // The correct offset is computed later (all oop fields will be located together)
-    // We temporarily store the allocation type in the offset field
-    fields->short_at_put(index++, atype);
-    fields->short_at_put(index++, 0);  // Clear out high word of byte offset
-    fields->short_at_put(index++, generic_signature_index);
-  }
-
-  if (_need_verify && length > 1) {
-    // Check duplicated fields
-    ResourceMark rm(THREAD);
-    NameSigHash** names_and_sigs = NEW_RESOURCE_ARRAY_IN_THREAD(
-      THREAD, NameSigHash*, HASH_ROW_SIZE);
-    initialize_hashtable(names_and_sigs);
-    bool dup = false;
-    {
-      debug_only(No_Safepoint_Verifier nsv;)
-      for (int i = 0; i < length*instanceKlass::next_offset; i += instanceKlass::next_offset) {
-        int name_index = fields->ushort_at(i + instanceKlass::name_index_offset);
-        symbolOop name = cp->symbol_at(name_index);
-        int sig_index = fields->ushort_at(i + instanceKlass::signature_index_offset);
-        symbolOop sig = cp->symbol_at(sig_index);
-        // If no duplicates, add name/signature in hashtable names_and_sigs.
-        if (!put_after_lookup(name, sig, names_and_sigs)) {
-          dup = true;
-          break;
-        }
-      }
-    }
-    if (dup) {
-      classfile_parse_error("Duplicate field name&signature in class file %s",
-                            CHECK_(nullHandle));
-    }
-  }
-
-  return fields;
-}
-
-
-static void copy_u2_with_conversion(u2* dest, u2* src, int length) {
-  while (length-- > 0) {
-    *dest++ = Bytes::get_Java_u2((u1*) (src++));
-  }
-}
-
-
-typeArrayHandle ClassFileParser::parse_exception_table(u4 code_length, 
-                                                       u4 exception_table_length, 
-                                                       constantPoolHandle cp, 
-                                                       TRAPS) {
-  ClassFileStream* cfs = stream();
-  typeArrayHandle nullHandle;
-
-  // 4-tuples of ints [start_pc, end_pc, handler_pc, catch_type index]
-  typeArrayOop eh = oopFactory::new_permanent_intArray(exception_table_length*4, CHECK_(nullHandle));
-  typeArrayHandle exception_handlers = typeArrayHandle(THREAD, eh);
-  
-  int index = 0;
-  cfs->guarantee_more(8 * exception_table_length, CHECK_(nullHandle)); // start_pc, end_pc, handler_pc, catch_type_index
-  for (unsigned int i = 0; i < exception_table_length; i++) {
-    u2 start_pc = cfs->get_u2_fast();
-    u2 end_pc = cfs->get_u2_fast();
-    u2 handler_pc = cfs->get_u2_fast();
-    u2 catch_type_index = cfs->get_u2_fast();
-    // Will check legal target after parsing code array in verifier.
-    if (_need_verify) {
-      guarantee_property((start_pc < end_pc) && (end_pc <= code_length), 
-                         "Illegal exception table range in class file %s", CHECK_(nullHandle)); 
-      guarantee_property(handler_pc < code_length, 
-                         "Illegal exception table handler in class file %s", CHECK_(nullHandle)); 
-      if (catch_type_index != 0) {
-        guarantee_property(valid_cp_range(catch_type_index, cp->length()) && 
-                          (cp->tag_at(catch_type_index).is_klass() || 
-                           cp->tag_at(catch_type_index).is_unresolved_klass()),
-                           "Catch type in exception table has bad constant type in class file %s", CHECK_(nullHandle));
-      }
-    }	      
-    exception_handlers->int_at_put(index++, start_pc); 
-    exception_handlers->int_at_put(index++, end_pc);  
-    exception_handlers->int_at_put(index++, handler_pc);  
-    exception_handlers->int_at_put(index++, catch_type_index);  
-  }
-  return exception_handlers;
-}
-
-u_char* ClassFileParser::parse_linenumber_table(u4 code_attribute_length, 
-                                                u4 code_length,
-                                                int* compressed_linenumber_table_size, 
-                                                TRAPS) {
-  ClassFileStream* cfs = stream();
-  cfs->guarantee_more(2, CHECK_NULL);  // linenumber_table_length
-  unsigned int linenumber_table_length = cfs->get_u2_fast();
-
-  // Verify line number attribute and table length
-  if (_need_verify) {
-    guarantee_property(code_attribute_length ==
-                       (sizeof(u2) /* linenumber table length */ +
-                        linenumber_table_length*(sizeof(u2) /* start_pc */ +
-                        sizeof(u2) /* line_number */)),
-                       "LineNumberTable attribute has wrong length in class file %s", CHECK_NULL);
-  }          
-  
-  u_char* compressed_linenumber_table = NULL;
-  if (linenumber_table_length > 0) {
-    // initial_size large enough
-    int initial_size = linenumber_table_length * sizeof(u2) * 2;
-    CompressedLineNumberWriteStream c_stream =
-      (initial_size <= fixed_buffer_size) ? 
-      CompressedLineNumberWriteStream(_fixed_buffer, fixed_buffer_size) :
-      CompressedLineNumberWriteStream(initial_size);
-    cfs->guarantee_more(4 * linenumber_table_length, CHECK_NULL);  // bci, line
-    while (linenumber_table_length-- > 0) {
-      u2 bci  = cfs->get_u2_fast(); // start_pc
-      u2 line = cfs->get_u2_fast(); // line_number
-      guarantee_property(bci < code_length,
-                         "Invalid pc in LineNumberTable in class file %s", CHECK_NULL);
-      c_stream.write_pair(bci, line);
-    }
-    c_stream.write_terminator();
-    *compressed_linenumber_table_size = c_stream.position();
-    compressed_linenumber_table = c_stream.buffer();
-  }
-  return compressed_linenumber_table;
-}
-
-
-// Class file LocalVariableTable elements.
-class Classfile_LVT_Element VALUE_OBJ_CLASS_SPEC {
- public:
-  u2 start_bci;
-  u2 length;
-  u2 name_cp_index;
-  u2 descriptor_cp_index;
-  u2 slot;
-};
-
-
-class LVT_Hash: public CHeapObj {
- public:
-  LocalVariableTableElement  *_elem;  // element
-  LVT_Hash*                   _next;  // Next entry in hash table
-};
-
-unsigned int hash(LocalVariableTableElement *elem) {
-  unsigned int raw_hash = elem->start_bci;
-
-  raw_hash = elem->length        + raw_hash * 37;
-  raw_hash = elem->name_cp_index + raw_hash * 37;
-  raw_hash = elem->slot          + raw_hash * 37;
-
-  return raw_hash % HASH_ROW_SIZE;
-}
-
-void initialize_hashtable(LVT_Hash** table) {
-  for (int i = 0; i < HASH_ROW_SIZE; i++) {
-    table[i] = NULL;
-  }
-}
-
-void clear_hashtable(LVT_Hash** table) {
-  for (int i = 0; i < HASH_ROW_SIZE; i++) {
-    LVT_Hash* current = table[i];
-    LVT_Hash* next;
-    while (current != NULL) {
-      next = current->_next;
-      current->_next = NULL;
-      delete(current);
-      current = next;
-    }
-    table[i] = NULL;
-  }
-}
-
-LVT_Hash* LVT_lookup(LocalVariableTableElement *elem, int index, LVT_Hash** table) {
-  LVT_Hash* entry = table[index];
-
-  /*
-   * 3-tuple start_bci/length/slot has to be unique key,
-   * so the following comparison seems to be redundant:
-   *       && elem->name_cp_index == entry->_elem->name_cp_index
-   */
-  while (entry != NULL) {
-    if (elem->start_bci           == entry->_elem->start_bci
-     && elem->length              == entry->_elem->length 
-     && elem->name_cp_index       == entry->_elem->name_cp_index
-     && elem->slot                == entry->_elem->slot
-    ) {
-      return entry;
-    }
-    entry = entry->_next;
-  }
-  return NULL;
-}
-
-// Return false if the local variable is found in table.
-// Return true if no duplicate is found.
-// And local variable is added as a new entry in table.
-bool LVT_put_after_lookup(LocalVariableTableElement *elem, LVT_Hash** table) {
-  // First lookup for duplicates
-  int index = hash(elem);
-  LVT_Hash* entry = LVT_lookup(elem, index, table);
-
-  if (entry != NULL) {
-      return false;
-  }
-  // No duplicate is found, allocate a new entry and fill it.
-  if ((entry = new LVT_Hash()) == NULL) {
-    return false;
-  }
-  entry->_elem = elem;
- 
-  // Insert into hash table
-  entry->_next = table[index];
-  table[index] = entry;
-
-  return true;
-}
-
-void copy_lvt_element(Classfile_LVT_Element *src, LocalVariableTableElement *lvt) {
-  lvt->start_bci           = Bytes::get_Java_u2((u1*) &src->start_bci);
-  lvt->length              = Bytes::get_Java_u2((u1*) &src->length);
-  lvt->name_cp_index       = Bytes::get_Java_u2((u1*) &src->name_cp_index);
-  lvt->descriptor_cp_index = Bytes::get_Java_u2((u1*) &src->descriptor_cp_index);
-  lvt->signature_cp_index  = 0;
-  lvt->slot                = Bytes::get_Java_u2((u1*) &src->slot);
-}
-
-// Function is used to parse both attributes:
-//       LocalVariableTable (LVT) and LocalVariableTypeTable (LVTT)
-u2* ClassFileParser::parse_localvariable_table(u4 code_length,
-                                               u2 max_locals,
-                                               u4 code_attribute_length,
-                                               constantPoolHandle cp,
-                                               u2* localvariable_table_length,
-                                               bool isLVTT,
-                                               TRAPS) {
-  ClassFileStream* cfs = stream();
-  const char * tbl_name = (isLVTT) ? "LocalVariableTypeTable" : "LocalVariableTable";
-  *localvariable_table_length = cfs->get_u2(CHECK_NULL);
-  unsigned int size = (*localvariable_table_length) * sizeof(Classfile_LVT_Element) / sizeof(u2);
-  // Verify local variable table attribute has right length
-  if (_need_verify) {
-    guarantee_property(code_attribute_length == (sizeof(*localvariable_table_length) + size * sizeof(u2)),
-                       "%s has wrong length in class file %s", tbl_name, CHECK_NULL);
-  }
-  u2* localvariable_table_start = cfs->get_u2_buffer();
-  assert(localvariable_table_start != NULL, "null local variable table");
-  if (!_need_verify) { 
-    cfs->skip_u2_fast(size);
-  } else {
-    cfs->guarantee_more(size * 2, CHECK_NULL);
-    for(int i = 0; i < (*localvariable_table_length); i++) {
-      u2 start_pc = cfs->get_u2_fast();
-      u2 length = cfs->get_u2_fast();
-      u2 name_index = cfs->get_u2_fast();
-      u2 descriptor_index = cfs->get_u2_fast();
-      u2 index = cfs->get_u2_fast();
-      // Assign to a u4 to avoid overflow
-      u4 end_pc = (u4)start_pc + (u4)length;
-
-      if (start_pc >= code_length) {
-        classfile_parse_error(
-          "Invalid start_pc %u in %s in class file %s", 
-          start_pc, tbl_name, CHECK_NULL);
-      }
-      if (end_pc > code_length) {
-        classfile_parse_error(
-          "Invalid length %u in %s in class file %s", 
-          length, tbl_name, CHECK_NULL);
-      }
-      int cp_size = cp->length();
-      guarantee_property(
-        valid_cp_range(name_index, cp_size) && 
-          cp->tag_at(name_index).is_utf8(),
-        "Name index %u in %s has bad constant type in class file %s",
-        name_index, tbl_name, CHECK_NULL);
-      guarantee_property(
-        valid_cp_range(descriptor_index, cp_size) &&
-          cp->tag_at(descriptor_index).is_utf8(),
-        "Signature index %u in %s has bad constant type in class file %s",
-        descriptor_index, tbl_name, CHECK_NULL);
-
-      symbolHandle name(THREAD, cp->symbol_at(name_index));
-      symbolHandle sig(THREAD, cp->symbol_at(descriptor_index));
-      verify_legal_field_name(name, CHECK_NULL);
-      u2 extra_slot = 0;
-      if (!isLVTT) {
-        verify_legal_field_signature(name, sig, CHECK_NULL);
-
-        // 4894874: check special cases for double and long local variables
-        if (sig() == vmSymbols::type_signature(T_DOUBLE) || 
-            sig() == vmSymbols::type_signature(T_LONG)) {
-          extra_slot = 1;
-        }
-      }
-      guarantee_property((index + extra_slot) < max_locals,
-                          "Invalid index %u in %s in class file %s",
-                          index, tbl_name, CHECK_NULL);
-    }
-  }
-  return localvariable_table_start;
-}
-
-
-void ClassFileParser::parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
-                                      u1* u1_array, u2* u2_array, constantPoolHandle cp, TRAPS) {
-  ClassFileStream* cfs = stream();
-  u2 index = 0; // index in the array with long/double occupying two slots
-  u4 i1 = *u1_index;
-  u4 i2 = *u2_index + 1;  
-  for(int i = 0; i < array_length; i++) {
-    u1 tag = u1_array[i1++] = cfs->get_u1(CHECK);
-    index++;
-    if (tag == ITEM_Long || tag == ITEM_Double) {
-      index++; 
-    } else if (tag == ITEM_Object) {
-      u2 class_index = u2_array[i2++] = cfs->get_u2(CHECK);
-      guarantee_property(valid_cp_range(class_index, cp->length()) &&
-                         cp->tag_at(class_index).is_unresolved_klass(), 
-                         "Bad class index %u in StackMap in class file %s", 
-                         class_index, CHECK);
-    } else if (tag == ITEM_Uninitialized) {
-      u2 offset = u2_array[i2++] = cfs->get_u2(CHECK);
-      guarantee_property(
-        offset < code_length, 
-        "Bad uninitialized type offset %u in StackMap in class file %s", 
-        offset, CHECK);
-    } else {
-      guarantee_property(
-        tag <= (u1)ITEM_Uninitialized,
-        "Unknown variable type %u in StackMap in class file %s", 
-        tag, CHECK);
-    }
-  }
-  u2_array[*u2_index] = index; 
-  *u1_index = i1;
-  *u2_index = i2;
-}
-
-typeArrayOop ClassFileParser::parse_stackmap_table(
-    u4 code_attribute_length, TRAPS) {
-  if (code_attribute_length == 0) 
-    return NULL;
-  
-  ClassFileStream* cfs = stream();
-  u1* stackmap_table_start = cfs->get_u1_buffer();
-  assert(stackmap_table_start != NULL, "null stackmap table");
-
-  // check code_attribute_length first
-  stream()->skip_u1(code_attribute_length, CHECK_NULL);
-
-  if (!_need_verify && !DumpSharedSpaces) {
-    return NULL;
-  }
-
-  typeArrayOop stackmap_data = 
-    oopFactory::new_permanent_byteArray(code_attribute_length, CHECK_NULL);
-
-  stackmap_data->set_length(code_attribute_length);
-  memcpy((void*)stackmap_data->byte_at_addr(0), 
-         (void*)stackmap_table_start, code_attribute_length);
-  return stackmap_data;
-}
-
-u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length, 
-                                              u4 method_attribute_length,
-                                              constantPoolHandle cp, TRAPS) {
-  ClassFileStream* cfs = stream();
-  cfs->guarantee_more(2, CHECK_NULL);  // checked_exceptions_length
-  *checked_exceptions_length = cfs->get_u2_fast();
-  unsigned int size = (*checked_exceptions_length) * sizeof(CheckedExceptionElement) / sizeof(u2);
-  u2* checked_exceptions_start = cfs->get_u2_buffer();
-  assert(checked_exceptions_start != NULL, "null checked exceptions");
-  if (!_need_verify) { 
-    cfs->skip_u2_fast(size);
-  } else {
-    // Verify each value in the checked exception table
-    u2 checked_exception;
-    u2 len = *checked_exceptions_length;
-    cfs->guarantee_more(2 * len, CHECK_NULL);
-    for (int i = 0; i < len; i++) {
-      checked_exception = cfs->get_u2_fast();
-      check_property(
-        valid_cp_range(checked_exception, cp->length()) &&
-        cp->tag_at(checked_exception).is_klass_reference(), 
-        "Exception name has bad type at constant pool %u in class file %s", 
-        checked_exception, CHECK_NULL);
-    }
-  }
-  // check exceptions attribute length
-  if (_need_verify) {
-    guarantee_property(method_attribute_length == (sizeof(*checked_exceptions_length) +
-                                                   sizeof(u2) * size),
-                      "Exceptions attribute has wrong length in class file %s", CHECK_NULL);
-  }
-  return checked_exceptions_start;
-}
-
-
-#define MAX_ARGS_SIZE 255
-#define MAX_CODE_SIZE 65535
-#define INITIAL_MAX_LVT_NUMBER 256
-
-// Note: the parse_method below is big and clunky because all parsing of the code and exceptions
-// attribute is inlined. This is curbersome to avoid since we inline most of the parts in the
-// methodOop to save footprint, so we only know the size of the resulting methodOop when the
-// entire method attribute is parsed.
-//
-// The promoted_flags parameter is used to pass relevant access_flags
-// from the method back up to the containing klass. These flag values
-// are added to klass's access_flags.
-
-methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interface,
-                                           AccessFlags *promoted_flags,
-                                           typeArrayHandle* method_annotations,
-                                           typeArrayHandle* method_parameter_annotations,
-                                           typeArrayHandle* method_default_annotations,
-                                           TRAPS) {
-  ClassFileStream* cfs = stream();
-  methodHandle nullHandle;
-  ResourceMark rm(THREAD);
-  // Parse fixed parts
-  cfs->guarantee_more(8, CHECK_(nullHandle)); // access_flags, name_index, descriptor_index, attributes_count
-
-  int flags = cfs->get_u2_fast();
-  u2 name_index = cfs->get_u2_fast();
-  int cp_size = cp->length();
-  check_property(
-    valid_cp_range(name_index, cp_size) && 
-      cp->tag_at(name_index).is_utf8(), 
-    "Illegal constant pool index %u for method name in class file %s", 
-    name_index, CHECK_(nullHandle));
-  symbolHandle name(THREAD, cp->symbol_at(name_index));
-  verify_legal_method_name(name, CHECK_(nullHandle));  
-
-  u2 signature_index = cfs->get_u2_fast();
-  check_property(
-    valid_cp_range(signature_index, cp_size) &&
-      cp->tag_at(signature_index).is_utf8(), 
-    "Illegal constant pool index %u for method signature in class file %s", 
-    signature_index, CHECK_(nullHandle));
-  symbolHandle signature(THREAD, cp->symbol_at(signature_index));
-
-  AccessFlags access_flags;  
-  if (name == vmSymbols::class_initializer_name()) {
-    // We ignore the access flags for a class initializer. (JVM Spec. p. 116)
-    flags = JVM_ACC_STATIC;
-  } else {
-    verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
-  }
-
-  int args_size = -1;  // only used when _need_verify is true
-  if (_need_verify) {
-    args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) + 
-                 verify_legal_method_signature(name, signature, CHECK_(nullHandle));
-    if (args_size > MAX_ARGS_SIZE) {
-      classfile_parse_error("Too many arguments in method signature in class file %s", CHECK_(nullHandle));
-    }
-  }
-        
-  access_flags.set_flags(flags & JVM_RECOGNIZED_METHOD_MODIFIERS);
-  
-  // Default values for code and exceptions attribute elements
-  u2 max_stack = 0;
-  u2 max_locals = 0;
-  u4 code_length = 0;
-  u1* code_start = 0;
-  u2 exception_table_length = 0;
-  typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array());
-  u2 checked_exceptions_length = 0;
-  u2* checked_exceptions_start = NULL;
-  int compressed_linenumber_table_size = 0;
-  u_char* compressed_linenumber_table = NULL;
-  int total_lvt_length = 0;
-  u2 lvt_cnt = 0;
-  u2 lvtt_cnt = 0;
-  bool lvt_allocated = false;
-  u2 max_lvt_cnt = INITIAL_MAX_LVT_NUMBER;
-  u2 max_lvtt_cnt = INITIAL_MAX_LVT_NUMBER;
-  u2* localvariable_table_length;
-  u2** localvariable_table_start;
-  u2* localvariable_type_table_length;
-  u2** localvariable_type_table_start;
-  bool parsed_code_attribute = false;
-  bool parsed_checked_exceptions_attribute = false;
-  bool parsed_stackmap_attribute = false;
-  // stackmap attribute - JDK1.5
-  typeArrayHandle stackmap_data;
-  u2 generic_signature_index = 0;
-  u1* runtime_visible_annotations = NULL;
-  int runtime_visible_annotations_length = 0;
-  u1* runtime_invisible_annotations = NULL;
-  int runtime_invisible_annotations_length = 0;
-  u1* runtime_visible_parameter_annotations = NULL;
-  int runtime_visible_parameter_annotations_length = 0;
-  u1* runtime_invisible_parameter_annotations = NULL;
-  int runtime_invisible_parameter_annotations_length = 0;
-  u1* annotation_default = NULL;
-  int annotation_default_length = 0;
-
-  // Parse code and exceptions attribute
-  u2 method_attributes_count = cfs->get_u2_fast();
-  while (method_attributes_count--) {   
-    cfs->guarantee_more(6, CHECK_(nullHandle));  // method_attribute_name_index, method_attribute_length
-    u2 method_attribute_name_index = cfs->get_u2_fast();
-    u4 method_attribute_length = cfs->get_u4_fast();
-    check_property(
-      valid_cp_range(method_attribute_name_index, cp_size) &&
-        cp->tag_at(method_attribute_name_index).is_utf8(), 
-      "Invalid method attribute name index %u in class file %s", 
-      method_attribute_name_index, CHECK_(nullHandle));
-
-    symbolOop method_attribute_name = cp->symbol_at(method_attribute_name_index);
-    if (method_attribute_name == vmSymbols::tag_code()) {
-      // Parse Code attribute
-      if (_need_verify) {
-        guarantee_property(!access_flags.is_native() && !access_flags.is_abstract(), 
-                        "Code attribute in native or abstract methods in class file %s", 
-                         CHECK_(nullHandle));
-      }
-      if (parsed_code_attribute) {
-        classfile_parse_error("Multiple Code attributes in class file %s", CHECK_(nullHandle));
-      }
-      parsed_code_attribute = true;
-
-      // Stack size, locals size, and code size
-      if (_major_version == 45 && _minor_version <= 2) {
-        cfs->guarantee_more(4, CHECK_(nullHandle));
-        max_stack = cfs->get_u1_fast();
-        max_locals = cfs->get_u1_fast();
-        code_length = cfs->get_u2_fast();
-      } else {
-        cfs->guarantee_more(8, CHECK_(nullHandle));
-        max_stack = cfs->get_u2_fast();
-        max_locals = cfs->get_u2_fast();
-        code_length = cfs->get_u4_fast();
-      }
-      if (_need_verify) {
-        guarantee_property(args_size <= max_locals, 
-                           "Arguments can't fit into locals in class file %s", CHECK_(nullHandle));
-        guarantee_property(code_length > 0 && code_length <= MAX_CODE_SIZE, 
-                           "Invalid method Code length %u in class file %s", 
-                           code_length, CHECK_(nullHandle));
-      }
-      // Code pointer
-      code_start = cfs->get_u1_buffer();
-      assert(code_start != NULL, "null code start");
-      cfs->guarantee_more(code_length, CHECK_(nullHandle));
-      cfs->skip_u1_fast(code_length);
-
-      // Exception handler table
-      cfs->guarantee_more(2, CHECK_(nullHandle));  // exception_table_length
-      exception_table_length = cfs->get_u2_fast();
-      if (exception_table_length > 0) {
-        exception_handlers = 
-              parse_exception_table(code_length, exception_table_length, cp, CHECK_(nullHandle));
-      }
-
-      // Parse additional attributes in code attribute
-      cfs->guarantee_more(2, CHECK_(nullHandle));  // code_attributes_count
-      u2 code_attributes_count = cfs->get_u2_fast();
-      unsigned int calculated_attribute_length = sizeof(max_stack) + 
-                                                 sizeof(max_locals) + 
-                                                 sizeof(code_length) +
-                                                 code_length + 
-                                                 sizeof(exception_table_length) +
-                                                 sizeof(code_attributes_count) +
-                                                 exception_table_length*(sizeof(u2) /* start_pc */+
-                                                                         sizeof(u2) /* end_pc */  +
-                                                                         sizeof(u2) /* handler_pc */ +
-                                                                         sizeof(u2) /* catch_type_index */);
-
-      while (code_attributes_count--) {
-        cfs->guarantee_more(6, CHECK_(nullHandle));  // code_attribute_name_index, code_attribute_length
-        u2 code_attribute_name_index = cfs->get_u2_fast();
-        u4 code_attribute_length = cfs->get_u4_fast();
-        calculated_attribute_length += code_attribute_length + 
-                                       sizeof(code_attribute_name_index) +
-                                       sizeof(code_attribute_length);
-        check_property(valid_cp_range(code_attribute_name_index, cp_size) &&
-                       cp->tag_at(code_attribute_name_index).is_utf8(), 
-                       "Invalid code attribute name index %u in class file %s", 
-                       code_attribute_name_index,
-                       CHECK_(nullHandle));
-        if (LoadLineNumberTables && 
-            cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) {
-          // Parse and compress line number table
-          compressed_linenumber_table = parse_linenumber_table(code_attribute_length, 
-                                                               code_length,
-                                                               &compressed_linenumber_table_size, 
-                                                               CHECK_(nullHandle));
-                                         
-        } else if (LoadLocalVariableTables && 
-                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) {
-          // Parse local variable table
-          if (!lvt_allocated) {
-            localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
-            localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
-            localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
-            localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
-            lvt_allocated = true;
-          }
-          if (lvt_cnt == max_lvt_cnt) {
-            max_lvt_cnt <<= 1;
-            REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt);
-            REALLOC_RESOURCE_ARRAY(u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt);
-          }
-          localvariable_table_start[lvt_cnt] =
-            parse_localvariable_table(code_length,
-                                      max_locals,
-                                      code_attribute_length,
-                                      cp,
-                                      &localvariable_table_length[lvt_cnt],
-                                      false,	// is not LVTT
-                                      CHECK_(nullHandle));
-          total_lvt_length += localvariable_table_length[lvt_cnt];
-          lvt_cnt++;
-        } else if (LoadLocalVariableTypeTables && 
-                   _major_version >= JAVA_1_5_VERSION &&
-                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) {
-          if (!lvt_allocated) {
-            localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
-            localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
-            localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
-            localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
-            lvt_allocated = true;
-          }
-          // Parse local variable type table
-          if (lvtt_cnt == max_lvtt_cnt) {
-            max_lvtt_cnt <<= 1;
-            REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt);
-            REALLOC_RESOURCE_ARRAY(u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt);
-          }
-          localvariable_type_table_start[lvtt_cnt] =
-            parse_localvariable_table(code_length,
-                                      max_locals,
-                                      code_attribute_length,
-                                      cp,
-                                      &localvariable_type_table_length[lvtt_cnt],
-                                      true,	// is LVTT
-                                      CHECK_(nullHandle));
-          lvtt_cnt++;
-        } else if (UseSplitVerifier &&
-                   _major_version >= Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION &&
-                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_stack_map_table()) {
-          // Stack map is only needed by the new verifier in JDK1.5.
-          if (parsed_stackmap_attribute) {
-            classfile_parse_error("Multiple StackMapTable attributes in class file %s", CHECK_(nullHandle));
-          }
-          typeArrayOop sm = 
-            parse_stackmap_table(code_attribute_length, CHECK_(nullHandle));
-          stackmap_data = typeArrayHandle(THREAD, sm);
-          parsed_stackmap_attribute = true;
-        } else {
-          // Skip unknown attributes
-          cfs->skip_u1(code_attribute_length, CHECK_(nullHandle));
-        }
-      }
-      // check method attribute length
-      if (_need_verify) {
-        guarantee_property(method_attribute_length == calculated_attribute_length,
-                           "Code segment has wrong length in class file %s", CHECK_(nullHandle));
-      }
-    } else if (method_attribute_name == vmSymbols::tag_exceptions()) {
-      // Parse Exceptions attribute
-      if (parsed_checked_exceptions_attribute) {
-        classfile_parse_error("Multiple Exceptions attributes in class file %s", CHECK_(nullHandle));
-      }
-      parsed_checked_exceptions_attribute = true;
-      checked_exceptions_start =
-            parse_checked_exceptions(&checked_exceptions_length, 
-                                     method_attribute_length, 
-                                     cp, CHECK_(nullHandle));
-    } else if (method_attribute_name == vmSymbols::tag_synthetic()) {
-      if (method_attribute_length != 0) {
-        classfile_parse_error(
-          "Invalid Synthetic method attribute length %u in class file %s", 
-          method_attribute_length, CHECK_(nullHandle));
-      }
-      // Should we check that there hasn't already been a synthetic attribute?
-      access_flags.set_is_synthetic();
-    } else if (method_attribute_name == vmSymbols::tag_deprecated()) { // 4276120
-      if (method_attribute_length != 0) {
-        classfile_parse_error(
-          "Invalid Deprecated method attribute length %u in class file %s", 
-          method_attribute_length, CHECK_(nullHandle));
-      }
-    } else if (_major_version >= JAVA_1_5_VERSION) {
-      if (method_attribute_name == vmSymbols::tag_signature()) {
-        if (method_attribute_length != 2) {
-          classfile_parse_error(
-            "Invalid Signature attribute length %u in class file %s", 
-            method_attribute_length, CHECK_(nullHandle));
-        }
-        cfs->guarantee_more(2, CHECK_(nullHandle));  // generic_signature_index
-        generic_signature_index = cfs->get_u2_fast();
-      } else if (method_attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
-        runtime_visible_annotations_length = method_attribute_length;
-        runtime_visible_annotations = cfs->get_u1_buffer();
-        assert(runtime_visible_annotations != NULL, "null visible annotations");
-        cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
-      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
-        runtime_invisible_annotations_length = method_attribute_length;
-        runtime_invisible_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_annotations != NULL, "null invisible annotations");
-        cfs->skip_u1(runtime_invisible_annotations_length, CHECK_(nullHandle));
-      } else if (method_attribute_name == vmSymbols::tag_runtime_visible_parameter_annotations()) {
-        runtime_visible_parameter_annotations_length = method_attribute_length;
-        runtime_visible_parameter_annotations = cfs->get_u1_buffer();
-        assert(runtime_visible_parameter_annotations != NULL, "null visible parameter annotations");
-        cfs->skip_u1(runtime_visible_parameter_annotations_length, CHECK_(nullHandle));
-      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_parameter_annotations()) {
-        runtime_invisible_parameter_annotations_length = method_attribute_length;
-        runtime_invisible_parameter_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_parameter_annotations != NULL, "null invisible parameter annotations");
-        cfs->skip_u1(runtime_invisible_parameter_annotations_length, CHECK_(nullHandle));
-      } else if (method_attribute_name == vmSymbols::tag_annotation_default()) {
-        annotation_default_length = method_attribute_length;
-        annotation_default = cfs->get_u1_buffer();
-        assert(annotation_default != NULL, "null annotation default");
-        cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
-      } else {
-        // Skip unknown attributes
-        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
-      }
-    } else {
-      // Skip unknown attributes
-      cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
-    }      
-  }
-  // Make sure there's at least one Code attribute in non-native/non-abstract method
-  if (_need_verify) {
-    guarantee_property(access_flags.is_native() || access_flags.is_abstract() || parsed_code_attribute,
-                      "Absent Code attribute in method that is not native or abstract in class file %s", CHECK_(nullHandle));
-  }
-
-  // All sizing information for a methodOop is finally available, now create it
-  methodOop m_oop  = oopFactory::new_method(code_length, access_flags,
-                               compressed_linenumber_table_size, 
-                               total_lvt_length, 
-                               checked_exceptions_length, 
-                               CHECK_(nullHandle));
-  methodHandle m (THREAD, m_oop);
-
-  ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize);
-
-  // Fill in information from fixed part (access_flags already set)
-  m->set_constants(cp());
-  m->set_name_index(name_index);
-  m->set_signature_index(signature_index);
-  m->set_generic_signature_index(generic_signature_index);
-#ifdef CC_INTERP
-  // hmm is there a gc issue here??
-  ResultTypeFinder rtf(cp->symbol_at(signature_index));
-  m->set_result_index(rtf.type());
-#endif
-
-  if (args_size >= 0) {
-    m->set_size_of_parameters(args_size);
-  } else { 
-    m->compute_size_of_parameters(THREAD);
-  }
-#ifdef ASSERT
-  if (args_size >= 0) {
-    m->compute_size_of_parameters(THREAD);
-    assert(args_size == m->size_of_parameters(), "");
-  }
-#endif
-
-  // Fill in code attribute information
-  m->set_max_stack(max_stack);
-  m->set_max_locals(max_locals);
-  m->constMethod()->set_stackmap_data(stackmap_data());
-
-  /**
-   * The exception_table field is the flag used to indicate
-   * that the methodOop and it's associated constMethodOop are partially 
-   * initialized and thus are exempt from pre/post GC verification.  Once 
-   * the field is set, the oops are considered fully initialized so make 
-   * sure that the oops can pass verification when this field is set. 
-   */
-  m->set_exception_table(exception_handlers());
-
-  // Copy byte codes
-  if (code_length > 0) {
-    memcpy(m->code_base(), code_start, code_length);
-  }
-  // Copy line number table
-  if (compressed_linenumber_table_size > 0) {
-    memcpy(m->compressed_linenumber_table(), compressed_linenumber_table, compressed_linenumber_table_size);
-  }
-  // Copy checked exceptions
-  if (checked_exceptions_length > 0) {
-    int size = checked_exceptions_length * sizeof(CheckedExceptionElement) / sizeof(u2);
-    copy_u2_with_conversion((u2*) m->checked_exceptions_start(), checked_exceptions_start, size);
-  }
-
-  /* Copy class file LVT's/LVTT's into the HotSpot internal LVT.
-   *
-   * Rules for LVT's and LVTT's are:
-   *   - There can be any number of LVT's and LVTT's.
-   *   - If there are n LVT's, it is the same as if there was just
-   *     one LVT containing all the entries from the n LVT's.
-   *   - There may be no more than one LVT entry per local variable.
-   *     Two LVT entries are 'equal' if these fields are the same:
-   *        start_pc, length, name, slot
-   *   - There may be no more than one LVTT entry per each LVT entry.
-   *     Each LVTT entry has to match some LVT entry.
-   *   - HotSpot internal LVT keeps natural ordering of class file LVT entries.
-   */
-  if (total_lvt_length > 0) {  
-    int tbl_no, idx;
-
-    promoted_flags->set_has_localvariable_table();
-
-    LVT_Hash** lvt_Hash = NEW_RESOURCE_ARRAY(LVT_Hash*, HASH_ROW_SIZE);
-    initialize_hashtable(lvt_Hash);
-
-    // To fill LocalVariableTable in
-    Classfile_LVT_Element*  cf_lvt;
-    LocalVariableTableElement* lvt = m->localvariable_table_start();
-
-    for (tbl_no = 0; tbl_no < lvt_cnt; tbl_no++) {
-      cf_lvt = (Classfile_LVT_Element *) localvariable_table_start[tbl_no];
-      for (idx = 0; idx < localvariable_table_length[tbl_no]; idx++, lvt++) {
-        copy_lvt_element(&cf_lvt[idx], lvt);
-        // If no duplicates, add LVT elem in hashtable lvt_Hash.
-        if (LVT_put_after_lookup(lvt, lvt_Hash) == false 
-          && _need_verify 
-          && _major_version >= JAVA_1_5_VERSION ) {
-          clear_hashtable(lvt_Hash);
-          classfile_parse_error("Duplicated LocalVariableTable attribute "
-                                "entry for '%s' in class file %s",
-                                 cp->symbol_at(lvt->name_cp_index)->as_utf8(),
-                                 CHECK_(nullHandle));
-        }
-      }
-    }
-
-    // To merge LocalVariableTable and LocalVariableTypeTable
-    Classfile_LVT_Element* cf_lvtt;
-    LocalVariableTableElement lvtt_elem;
-
-    for (tbl_no = 0; tbl_no < lvtt_cnt; tbl_no++) {
-      cf_lvtt = (Classfile_LVT_Element *) localvariable_type_table_start[tbl_no];
-      for (idx = 0; idx < localvariable_type_table_length[tbl_no]; idx++) {
-        copy_lvt_element(&cf_lvtt[idx], &lvtt_elem);
-        int index = hash(&lvtt_elem);
-        LVT_Hash* entry = LVT_lookup(&lvtt_elem, index, lvt_Hash);
-        if (entry == NULL) {
-          if (_need_verify) {
-            clear_hashtable(lvt_Hash);
-            classfile_parse_error("LVTT entry for '%s' in class file %s "
-                                  "does not match any LVT entry",
-                                   cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
-                                   CHECK_(nullHandle));
-          }
-        } else if (entry->_elem->signature_cp_index != 0 && _need_verify) {
-          clear_hashtable(lvt_Hash);
-          classfile_parse_error("Duplicated LocalVariableTypeTable attribute "
-                                "entry for '%s' in class file %s",
-                                 cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
-                                 CHECK_(nullHandle));
-        } else {
-          // to add generic signatures into LocalVariableTable
-          entry->_elem->signature_cp_index = lvtt_elem.descriptor_cp_index;
-        }
-      }
-    }
-    clear_hashtable(lvt_Hash);
-  }
-
-  *method_annotations = assemble_annotations(runtime_visible_annotations,
-                                             runtime_visible_annotations_length,
-                                             runtime_invisible_annotations,
-                                             runtime_invisible_annotations_length,
-                                             CHECK_(nullHandle));
-  *method_parameter_annotations = assemble_annotations(runtime_visible_parameter_annotations,
-                                                       runtime_visible_parameter_annotations_length,
-                                                       runtime_invisible_parameter_annotations,
-                                                       runtime_invisible_parameter_annotations_length,
-                                                       CHECK_(nullHandle));
-  *method_default_annotations = assemble_annotations(annotation_default,
-                                                     annotation_default_length,
-                                                     NULL,
-                                                     0,
-                                                     CHECK_(nullHandle));
-
-  if (name() == vmSymbols::finalize_method_name() &&
-      signature() == vmSymbols::void_method_signature()) {
-    if (m->is_empty_method()) {
-      _has_empty_finalizer = true;
-    } else {
-      _has_finalizer = true;
-    }
-  }
-  if (name() == vmSymbols::object_initializer_name() &&
-      signature() == vmSymbols::void_method_signature() &&
-      m->is_vanilla_constructor()) {
-    _has_vanilla_constructor = true;
-  }
-
-  return m;
-}
-
-  
-// The promoted_flags parameter is used to pass relevant access_flags
-// from the methods back up to the containing klass. These flag values
-// are added to klass's access_flags.
-
-objArrayHandle ClassFileParser::parse_methods(constantPoolHandle cp, bool is_interface, 
-                                              AccessFlags* promoted_flags,
-                                              bool* has_final_method,
-                                              objArrayOop* methods_annotations_oop,
-                                              objArrayOop* methods_parameter_annotations_oop,
-                                              objArrayOop* methods_default_annotations_oop,
-                                              TRAPS) {
-  ClassFileStream* cfs = stream();
-  objArrayHandle nullHandle;
-  typeArrayHandle method_annotations;
-  typeArrayHandle method_parameter_annotations;
-  typeArrayHandle method_default_annotations;
-  cfs->guarantee_more(2, CHECK_(nullHandle));  // length
-  u2 length = cfs->get_u2_fast();
-  if (length == 0) {
-    return objArrayHandle(THREAD, Universe::the_empty_system_obj_array());
-  } else {
-    objArrayOop m = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
-    objArrayHandle methods(THREAD, m);
-    HandleMark hm(THREAD);
-    objArrayHandle methods_annotations;
-    objArrayHandle methods_parameter_annotations;
-    objArrayHandle methods_default_annotations;
-    for (int index = 0; index < length; index++) {
-      methodHandle method = parse_method(cp, is_interface, 
-                                         promoted_flags,
-                                         &method_annotations,
-                                         &method_parameter_annotations,
-                                         &method_default_annotations,
-                                         CHECK_(nullHandle));
-      if (method->is_final()) {
-        *has_final_method = true;
-      }
-      methods->obj_at_put(index, method());  
-      if (method_annotations.not_null()) {
-        if (methods_annotations.is_null()) {
-          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
-          methods_annotations = objArrayHandle(THREAD, md);
-        }
-        methods_annotations->obj_at_put(index, method_annotations());
-      }
-      if (method_parameter_annotations.not_null()) {
-        if (methods_parameter_annotations.is_null()) {
-          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
-          methods_parameter_annotations = objArrayHandle(THREAD, md);
-        }
-        methods_parameter_annotations->obj_at_put(index, method_parameter_annotations());
-      }
-      if (method_default_annotations.not_null()) {
-        if (methods_default_annotations.is_null()) {
-          objArrayOop md = oopFactory::new_system_objArray(length, CHECK_(nullHandle));
-          methods_default_annotations = objArrayHandle(THREAD, md);
-        }
-        methods_default_annotations->obj_at_put(index, method_default_annotations());
-      }
-    }
-    if (_need_verify && length > 1) {
-      // Check duplicated methods
-      ResourceMark rm(THREAD);
-      NameSigHash** names_and_sigs = NEW_RESOURCE_ARRAY_IN_THREAD(
-        THREAD, NameSigHash*, HASH_ROW_SIZE);
-      initialize_hashtable(names_and_sigs);
-      bool dup = false;
-      {
-        debug_only(No_Safepoint_Verifier nsv;)
-        for (int i = 0; i < length; i++) {
-          methodOop m = (methodOop)methods->obj_at(i);
-          // If no duplicates, add name/signature in hashtable names_and_sigs.
-          if (!put_after_lookup(m->name(), m->signature(), names_and_sigs)) {
-            dup = true;
-            break;
-          }
-        }
-      }
-      if (dup) {
-        classfile_parse_error("Duplicate method name&signature in class file %s",
-                              CHECK_(nullHandle));
-      }
-    }
-
-    *methods_annotations_oop = methods_annotations();
-    *methods_parameter_annotations_oop = methods_parameter_annotations();
-    *methods_default_annotations_oop = methods_default_annotations();
-
-    return methods;
-  }
-}
-
-
-typeArrayHandle ClassFileParser::sort_methods(objArrayHandle methods,
-                                              objArrayHandle methods_annotations,
-                                              objArrayHandle methods_parameter_annotations,
-                                              objArrayHandle methods_default_annotations,
-                                              TRAPS) {
-  typeArrayHandle nullHandle;
-  int length = methods()->length();
-  // If JVMTI original method ordering is enabled we have to 
-  // remember the original class file ordering.
-  // We temporarily use the vtable_index field in the methodOop to store the
-  // class file index, so we can read in after calling qsort.
-  if (JvmtiExport::can_maintain_original_method_order()) {
-    for (int index = 0; index < length; index++) {
-      methodOop m = methodOop(methods->obj_at(index));
-      assert(!m->valid_vtable_index(), "vtable index should not be set");
-      m->set_vtable_index(index);
-    }
-  }
-  // Sort method array by ascending method name (for faster lookups & vtable construction)
-  // Note that the ordering is not alphabetical, see symbolOopDesc::fast_compare
-  methodOopDesc::sort_methods(methods(),
-                              methods_annotations(),
-                              methods_parameter_annotations(),
-                              methods_default_annotations());
-
-  // If JVMTI original method ordering is enabled construct int array remembering the original ordering
-  if (JvmtiExport::can_maintain_original_method_order()) {
-    typeArrayOop new_ordering = oopFactory::new_permanent_intArray(length, CHECK_(nullHandle));
-    typeArrayHandle method_ordering(THREAD, new_ordering);
-    for (int index = 0; index < length; index++) {
-      methodOop m = methodOop(methods->obj_at(index));
-      int old_index = m->vtable_index();
-      assert(old_index >= 0 && old_index < length, "invalid method index");
-      method_ordering->int_at_put(index, old_index);
-      m->set_vtable_index(methodOopDesc::invalid_vtable_index);
-    }
-    return method_ordering;
-  } else {
-    return typeArrayHandle(THREAD, Universe::the_empty_int_array());
-  }
-}
-
-
-void ClassFileParser::parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
-  ClassFileStream* cfs = stream();
-  cfs->guarantee_more(2, CHECK);  // sourcefile_index
-  u2 sourcefile_index = cfs->get_u2_fast();
-  check_property(
-    valid_cp_range(sourcefile_index, cp->length()) &&
-      cp->tag_at(sourcefile_index).is_utf8(), 
-    "Invalid SourceFile attribute at constant pool index %u in class file %s", 
-    sourcefile_index, CHECK);  
-  k->set_source_file_name(cp->symbol_at(sourcefile_index));
-}
-
-
-
-void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, 
-                                                                       instanceKlassHandle k, 
-                                                                       int length, TRAPS) {
-  ClassFileStream* cfs = stream();
-  u1* sde_buffer = cfs->get_u1_buffer();
-  assert(sde_buffer != NULL, "null sde buffer");
-
-  // Don't bother storing it if there is no way to retrieve it
-  if (JvmtiExport::can_get_source_debug_extension()) {
-    // Optimistically assume that only 1 byte UTF format is used
-    // (common case)
-    symbolOop sde_symbol = oopFactory::new_symbol((char*)sde_buffer, 
-                                                  length, CHECK);
-    k->set_source_debug_extension(sde_symbol);
-  }
-  // Got utf8 string, set stream position forward
-  cfs->skip_u1(length, CHECK);
-}
-
-
-// Inner classes can be static, private or protected (classic VM does this)
-#define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
-
-// Return number of classes in the inner classes attribute table
-u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {  
-  ClassFileStream* cfs = stream();
-  cfs->guarantee_more(2, CHECK_0);  // length
-  u2 length = cfs->get_u2_fast();
-
-  // 4-tuples of shorts [inner_class_info_index, outer_class_info_index, inner_name_index, inner_class_access_flags]
-  typeArrayOop ic = oopFactory::new_permanent_shortArray(length*4, CHECK_0);  
-  typeArrayHandle inner_classes(THREAD, ic);
-  int index = 0;
-  int cp_size = cp->length();
-  cfs->guarantee_more(8 * length, CHECK_0);  // 4-tuples of u2
-  for (int n = 0; n < length; n++) {
-    // Inner class index
-    u2 inner_class_info_index = cfs->get_u2_fast();
-    check_property(
-      inner_class_info_index == 0 || 
-        (valid_cp_range(inner_class_info_index, cp_size) && 
-        cp->tag_at(inner_class_info_index).is_klass_reference()), 
-      "inner_class_info_index %u has bad constant type in class file %s", 
-      inner_class_info_index, CHECK_0);
-    // Outer class index
-    u2 outer_class_info_index = cfs->get_u2_fast();
-    check_property(
-      outer_class_info_index == 0 || 
-        (valid_cp_range(outer_class_info_index, cp_size) &&
-        cp->tag_at(outer_class_info_index).is_klass_reference()), 
-      "outer_class_info_index %u has bad constant type in class file %s", 
-      outer_class_info_index, CHECK_0);
-    // Inner class name
-    u2 inner_name_index = cfs->get_u2_fast();
-    check_property(
-      inner_name_index == 0 || (valid_cp_range(inner_name_index, cp_size) &&
-        cp->tag_at(inner_name_index).is_utf8()), 
-      "inner_name_index %u has bad constant type in class file %s", 
-      inner_name_index, CHECK_0);    
-    if (_need_verify) {
-      guarantee_property(inner_class_info_index != outer_class_info_index, 
-                         "Class is both outer and inner class in class file %s", CHECK_0);
-    }
-    // Access flags
-    AccessFlags inner_access_flags;
-    jint flags = cfs->get_u2_fast() & RECOGNIZED_INNER_CLASS_MODIFIERS;
-    if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) {
-      // Set abstract bit for old class files for backward compatibility
-      flags |= JVM_ACC_ABSTRACT;
-    }
-    verify_legal_class_modifiers(flags, CHECK_0);
-    inner_access_flags.set_flags(flags);
-
-    inner_classes->short_at_put(index++, inner_class_info_index);
-    inner_classes->short_at_put(index++, outer_class_info_index);
-    inner_classes->short_at_put(index++, inner_name_index);	
-    inner_classes->short_at_put(index++, inner_access_flags.as_short());
-  }
-
-  // 4347400: make sure there's no duplicate entry in the classes array
-  if (_need_verify && _major_version >= JAVA_1_5_VERSION) {
-    for(int i = 0; i < inner_classes->length(); i += 4) {
-      for(int j = i + 4; j < inner_classes->length(); j += 4) {
-        guarantee_property((inner_classes->ushort_at(i)   != inner_classes->ushort_at(j) ||
-                            inner_classes->ushort_at(i+1) != inner_classes->ushort_at(j+1) ||
-                            inner_classes->ushort_at(i+2) != inner_classes->ushort_at(j+2) ||
-                            inner_classes->ushort_at(i+3) != inner_classes->ushort_at(j+3)),
-                            "Duplicate entry in InnerClasses in class file %s",
-                            CHECK_0);
-      }
-    }  
-  }  
-
-  // Update instanceKlass with inner class info.  
-  k->set_inner_classes(inner_classes());
-  return length;  
-}
-
-void ClassFileParser::parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
-  k->set_is_synthetic();
-}
-
-void ClassFileParser::parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
-  ClassFileStream* cfs = stream();
-  u2 signature_index = cfs->get_u2(CHECK);
-  check_property(
-    valid_cp_range(signature_index, cp->length()) &&
-      cp->tag_at(signature_index).is_utf8(), 
-    "Invalid constant pool index %u in Signature attribute in class file %s", 
-    signature_index, CHECK);    
-  k->set_generic_signature(cp->symbol_at(signature_index));
-}
-
-void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
-  ClassFileStream* cfs = stream();
-  // Set inner classes attribute to default sentinel
-  k->set_inner_classes(Universe::the_empty_short_array());
-  cfs->guarantee_more(2, CHECK);  // attributes_count
-  u2 attributes_count = cfs->get_u2_fast();
-  bool parsed_sourcefile_attribute = false;
-  bool parsed_innerclasses_attribute = false;
-  bool parsed_enclosingmethod_attribute = false;
-  u1* runtime_visible_annotations = NULL;
-  int runtime_visible_annotations_length = 0;
-  u1* runtime_invisible_annotations = NULL;
-  int runtime_invisible_annotations_length = 0;
-  // Iterate over attributes
-  while (attributes_count--) {    
-    cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
-    u2 attribute_name_index = cfs->get_u2_fast();
-    u4 attribute_length = cfs->get_u4_fast();
-    check_property(
-      valid_cp_range(attribute_name_index, cp->length()) &&
-        cp->tag_at(attribute_name_index).is_utf8(), 
-      "Attribute name has bad constant pool index %u in class file %s", 
-      attribute_name_index, CHECK);
-    symbolOop tag = cp->symbol_at(attribute_name_index);
-    if (tag == vmSymbols::tag_source_file()) {
-      // Check for SourceFile tag
-      if (_need_verify) {
-        guarantee_property(attribute_length == 2, "Wrong SourceFile attribute length in class file %s", CHECK);
-      }
-      if (parsed_sourcefile_attribute) {
-        classfile_parse_error("Multiple SourceFile attributes in class file %s", CHECK);
-      } else {
-        parsed_sourcefile_attribute = true;
-      }
-      parse_classfile_sourcefile_attribute(cp, k, CHECK);
-    } else if (tag == vmSymbols::tag_source_debug_extension()) {
-      // Check for SourceDebugExtension tag
-      parse_classfile_source_debug_extension_attribute(cp, k, (int)attribute_length, CHECK);
-    } else if (tag == vmSymbols::tag_inner_classes()) {
-      // Check for InnerClasses tag
-      if (parsed_innerclasses_attribute) {
-        classfile_parse_error("Multiple InnerClasses attributes in class file %s", CHECK);
-      } else {
-        parsed_innerclasses_attribute = true;
-      }
-      u2 num_of_classes = parse_classfile_inner_classes_attribute(cp, k, CHECK);
-      if (_need_verify && _major_version >= JAVA_1_5_VERSION) {
-        guarantee_property(attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes,
-                          "Wrong InnerClasses attribute length in class file %s", CHECK);
-      }
-    } else if (tag == vmSymbols::tag_synthetic()) {
-      // Check for Synthetic tag
-      // Shouldn't we check that the synthetic flags wasn't already set? - not required in spec
-      if (attribute_length != 0) {
-        classfile_parse_error(
-          "Invalid Synthetic classfile attribute length %u in class file %s", 
-          attribute_length, CHECK);
-      }
-      parse_classfile_synthetic_attribute(cp, k, CHECK);
-    } else if (tag == vmSymbols::tag_deprecated()) {
-      // Check for Deprecatd tag - 4276120
-      if (attribute_length != 0) {
-        classfile_parse_error(
-          "Invalid Deprecated classfile attribute length %u in class file %s", 
-          attribute_length, CHECK);
-      }
-    } else if (_major_version >= JAVA_1_5_VERSION) {
-      if (tag == vmSymbols::tag_signature()) {
-        if (attribute_length != 2) {
-          classfile_parse_error(
-            "Wrong Signature attribute length %u in class file %s", 
-            attribute_length, CHECK);
-        }
-        parse_classfile_signature_attribute(cp, k, CHECK);
-      } else if (tag == vmSymbols::tag_runtime_visible_annotations()) {
-        runtime_visible_annotations_length = attribute_length;
-        runtime_visible_annotations = cfs->get_u1_buffer();
-        assert(runtime_visible_annotations != NULL, "null visible annotations");
-        cfs->skip_u1(runtime_visible_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_annotations()) {
-        runtime_invisible_annotations_length = attribute_length;
-        runtime_invisible_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_annotations != NULL, "null invisible annotations");
-        cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
-      } else if (tag == vmSymbols::tag_enclosing_method()) {
-        if (parsed_enclosingmethod_attribute) {
-          classfile_parse_error("Multiple EnclosingMethod attributes in class file %s", CHECK);
-        }   else {
-          parsed_enclosingmethod_attribute = true;
-        }
-        cfs->guarantee_more(4, CHECK);  // class_index, method_index
-        u2 class_index  = cfs->get_u2_fast();
-        u2 method_index = cfs->get_u2_fast();
-        if (class_index == 0) {
-          classfile_parse_error("Invalid class index in EnclosingMethod attribute in class file %s", CHECK);
-        }
-        // Validate the constant pool indices and types
-        if (!cp->is_within_bounds(class_index) ||
-            !cp->tag_at(class_index).is_klass_reference()) {
-          classfile_parse_error("Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK);
-        }
-        if (method_index != 0 &&
-            (!cp->is_within_bounds(method_index) ||
-             !cp->tag_at(method_index).is_name_and_type())) {
-          classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
-        }           
-        k->set_enclosing_method_indices(class_index, method_index);
-      } else {
-        // Unknown attribute
-        cfs->skip_u1(attribute_length, CHECK);
-      }
-    } else {
-      // Unknown attribute
-      cfs->skip_u1(attribute_length, CHECK);
-    }
-  }
-  typeArrayHandle annotations = assemble_annotations(runtime_visible_annotations,
-                                                     runtime_visible_annotations_length,
-                                                     runtime_invisible_annotations,
-                                                     runtime_invisible_annotations_length,
-                                                     CHECK);
-  k->set_class_annotations(annotations());
-}
-
-
-typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annotations,
-                                                      int runtime_visible_annotations_length,
-                                                      u1* runtime_invisible_annotations,
-                                                      int runtime_invisible_annotations_length, TRAPS) {
-  typeArrayHandle annotations;
-  if (runtime_visible_annotations != NULL ||
-      runtime_invisible_annotations != NULL) {
-    typeArrayOop anno = oopFactory::new_permanent_byteArray(runtime_visible_annotations_length +
-                                                            runtime_invisible_annotations_length, CHECK_(annotations));
-    annotations = typeArrayHandle(THREAD, anno);
-    if (runtime_visible_annotations != NULL) {
-      memcpy(annotations->byte_at_addr(0), runtime_visible_annotations, runtime_visible_annotations_length);
-    }
-    if (runtime_invisible_annotations != NULL) {
-      memcpy(annotations->byte_at_addr(runtime_visible_annotations_length), runtime_invisible_annotations, runtime_invisible_annotations_length);
-    }
-  }
-  return annotations;
-}
-
-
-static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
-  KlassHandle h_k (THREAD, fd->field_holder());
-  assert(h_k.not_null() && fd->is_static(), "just checking");
-  if (fd->has_initial_value()) {
-    BasicType t = fd->field_type();
-    switch (t) {
-      case T_BYTE:
-        h_k()->byte_field_put(fd->offset(), fd->int_initial_value());
-	      break;
-      case T_BOOLEAN:
-        h_k()->bool_field_put(fd->offset(), fd->int_initial_value());
-	      break;
-      case T_CHAR:
-        h_k()->char_field_put(fd->offset(), fd->int_initial_value());
-	      break;
-      case T_SHORT:
-        h_k()->short_field_put(fd->offset(), fd->int_initial_value());
-	      break;
-      case T_INT:
-        h_k()->int_field_put(fd->offset(), fd->int_initial_value());
-        break;
-      case T_FLOAT:
-        h_k()->float_field_put(fd->offset(), fd->float_initial_value());
-        break;
-      case T_DOUBLE:
-        h_k()->double_field_put(fd->offset(), fd->double_initial_value());
-        break;
-      case T_LONG:
-        h_k()->long_field_put(fd->offset(), fd->long_initial_value());
-        break;
-      case T_OBJECT:
-        {
-          #ifdef ASSERT      
-          symbolOop sym = oopFactory::new_symbol("Ljava/lang/String;", CHECK);
-          assert(fd->signature() == sym, "just checking");      
-          #endif
-          oop string = fd->string_initial_value(CHECK);
-          h_k()->obj_field_put(fd->offset(), string);
-        }
-        break;
-      default:
-        THROW_MSG(vmSymbols::java_lang_ClassFormatError(), 
-                  "Illegal ConstantValue attribute in class file");
-    }
-  }
-}
-
-
-void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr,
-  constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS) {
-  // This code is for compatibility with earlier jdk's that do not
-  // have the "discovered" field in java.lang.ref.Reference.  For 1.5
-  // the check for the "discovered" field should issue a warning if
-  // the field is not found.  For 1.6 this code should be issue a
-  // fatal error if the "discovered" field is not found.
-  //
-  // Increment fac.nonstatic_oop_count so that the start of the
-  // next type of non-static oops leaves room for the fake oop.
-  // Do not increment next_nonstatic_oop_offset so that the
-  // fake oop is place after the java.lang.ref.Reference oop
-  // fields.
-  //
-  // Check the fields in java.lang.ref.Reference for the "discovered"
-  // field.  If it is not present, artifically create a field for it.
-  // This allows this VM to run on early JDK where the field is not
-  // present.
-
-  //
-  // Increment fac.nonstatic_oop_count so that the start of the 
-  // next type of non-static oops leaves room for the fake oop.
-  // Do not increment next_nonstatic_oop_offset so that the
-  // fake oop is place after the java.lang.ref.Reference oop
-  // fields.
-  //
-  // Check the fields in java.lang.ref.Reference for the "discovered"
-  // field.  If it is not present, artifically create a field for it.
-  // This allows this VM to run on early JDK where the field is not
-  // present.
-  int reference_sig_index = 0;
-  int reference_name_index = 0;
-  int reference_index = 0;
-  int extra = java_lang_ref_Reference::number_of_fake_oop_fields;
-  const int n = (*fields_ptr)()->length();
-  for (int i = 0; i < n; i += instanceKlass::next_offset ) {
-    int name_index = 
-    (*fields_ptr)()->ushort_at(i + instanceKlass::name_index_offset);
-    int sig_index  = 
-      (*fields_ptr)()->ushort_at(i + instanceKlass::signature_index_offset);
-    symbolOop f_name = cp->symbol_at(name_index);
-    symbolOop f_sig  = cp->symbol_at(sig_index);
-    if (f_sig == vmSymbols::reference_signature() && reference_index == 0) {
-      // Save the index for reference signature for later use.
-      // The fake discovered field does not entries in the
-      // constant pool so the index for its signature cannot
-      // be extracted from the constant pool.  It will need 
-      // later, however.  It's signature is vmSymbols::reference_signature()
-      // so same an index for that signature.
-      reference_sig_index = sig_index;
-      reference_name_index = name_index;
-      reference_index = i;
-    }
-    if (f_name == vmSymbols::reference_discovered_name() &&
-      f_sig == vmSymbols::reference_signature()) {
-      // The values below are fake but will force extra
-      // non-static oop fields and a corresponding non-static 
-      // oop map block to be allocated.
-      extra = 0;
-      break;
-    }
-  }
-  if (extra != 0) { 
-    fac_ptr->nonstatic_oop_count += extra;
-    // Add the additional entry to "fields" so that the klass
-    // contains the "discoverd" field and the field will be initialized
-    // in instances of the object.
-    int fields_with_fix_length = (*fields_ptr)()->length() + 
-      instanceKlass::next_offset;
-    typeArrayOop ff = oopFactory::new_permanent_shortArray(
-                                                fields_with_fix_length, CHECK);
-    typeArrayHandle fields_with_fix(THREAD, ff);
-
-    // Take everything from the original but the length.
-    for (int idx = 0; idx < (*fields_ptr)->length(); idx++) {
-      fields_with_fix->ushort_at_put(idx, (*fields_ptr)->ushort_at(idx));
-    }
-
-    // Add the fake field at the end.
-    int i = (*fields_ptr)->length();
-    // There is no name index for the fake "discovered" field nor 
-    // signature but a signature is needed so that the field will
-    // be properly initialized.  Use one found for
-    // one of the other reference fields. Be sure the index for the
-    // name is 0.  In fieldDescriptor::initialize() the index of the
-    // name is checked.  That check is by passed for the last nonstatic
-    // oop field in a java.lang.ref.Reference which is assumed to be
-    // this artificial "discovered" field.  An assertion checks that
-    // the name index is 0.
-    assert(reference_index != 0, "Missing signature for reference");
-
-    int j;
-    for (j = 0; j < instanceKlass::next_offset; j++) {
-      fields_with_fix->ushort_at_put(i + j, 
-	(*fields_ptr)->ushort_at(reference_index +j));
-    }
-    // Clear the public access flag and set the private access flag.
-    short flags;
-    flags = 
-      fields_with_fix->ushort_at(i + instanceKlass::access_flags_offset);
-    assert(!(flags & JVM_RECOGNIZED_FIELD_MODIFIERS), "Unexpected access flags set");
-    flags = flags & (~JVM_ACC_PUBLIC);
-    flags = flags | JVM_ACC_PRIVATE;
-    AccessFlags access_flags;
-    access_flags.set_flags(flags);
-    assert(!access_flags.is_public(), "Failed to clear public flag");
-    assert(access_flags.is_private(), "Failed to set private flag");
-    fields_with_fix->ushort_at_put(i + instanceKlass::access_flags_offset, 
-      flags);
-
-    assert(fields_with_fix->ushort_at(i + instanceKlass::name_index_offset) 
-      == reference_name_index, "The fake reference name is incorrect");
-    assert(fields_with_fix->ushort_at(i + instanceKlass::signature_index_offset)
-      == reference_sig_index, "The fake reference signature is incorrect");
-    // The type of the field is stored in the low_offset entry during
-    // parsing.
-    assert(fields_with_fix->ushort_at(i + instanceKlass::low_offset) ==
-      NONSTATIC_OOP, "The fake reference type is incorrect");
-
-    // "fields" is allocated in the permanent generation.  Disgard
-    // it and let it be collected.
-    (*fields_ptr) = fields_with_fix;
-  }
-  return;
-}
-
-
-void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr, 
-  FieldAllocationCount *fac_ptr, TRAPS) {
-  // Add fake fields for java.lang.Class instances
-  //
-  // This is not particularly nice. We should consider adding a
-  // private transient object field at the Java level to
-  // java.lang.Class. Alternatively we could add a subclass of
-  // instanceKlass which provides an accessor and size computer for
-  // this field, but that appears to be more code than this hack.
-  //
-  // NOTE that we wedge these in at the beginning rather than the
-  // end of the object because the Class layout changed between JDK
-  // 1.3 and JDK 1.4 with the new reflection implementation; some
-  // nonstatic oop fields were added at the Java level. The offsets
-  // of these fake fields can't change between these two JDK
-  // versions because when the offsets are computed at bootstrap
-  // time we don't know yet which version of the JDK we're running in.
-
-  // The values below are fake but will force two non-static oop fields and 
-  // a corresponding non-static oop map block to be allocated.
-  const int extra = java_lang_Class::number_of_fake_oop_fields;
-  fac_ptr->nonstatic_oop_count += extra;
-}
-
-
-void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_ptr) {
-  // Cause the extra fake fields in java.lang.Class to show up before
-  // the Java fields for layout compatibility between 1.3 and 1.4
-  // Incrementing next_nonstatic_oop_offset here advances the 
-  // location where the real java fields are placed.
-  const int extra = java_lang_Class::number_of_fake_oop_fields;
-  (*next_nonstatic_oop_offset_ptr) += (extra * wordSize);
-}
-
-
-instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name, 
-                                                    Handle class_loader, 
-                                                    Handle protection_domain, 
-                                                    symbolHandle& parsed_name,
-                                                    TRAPS) {
-  // So that JVMTI can cache class file in the state before retransformable agents
-  // have modified it
-  unsigned char *cached_class_file_bytes = NULL;
-  jint cached_class_file_length;
-
-  ClassFileStream* cfs = stream();
-  // Timing
-  PerfTraceTime vmtimer(ClassLoader::perf_accumulated_time());
-
-  _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
-
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_CLASS_LOAD_HOOK)) {
-    unsigned char* ptr = cfs->buffer();
-    unsigned char* end_ptr = cfs->buffer() + cfs->length();
-
-    jvmpi::post_class_load_hook_event(&ptr, &end_ptr, jvmpi::jvmpi_alloc);
-
-    if (ptr != cfs->buffer()) {
-      cfs = new ClassFileStream(ptr, end_ptr - ptr, cfs->source());
-      set_stream(cfs);
-    }
-  }
-#endif // JVMPI_SUPPORT
-
-  if (JvmtiExport::should_post_class_file_load_hook()) {
-    unsigned char* ptr = cfs->buffer();
-    unsigned char* end_ptr = cfs->buffer() + cfs->length();
-
-    JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, 
-                                           &ptr, &end_ptr,
-                                           &cached_class_file_bytes, 
-                                           &cached_class_file_length);
-
-    if (ptr != cfs->buffer()) {
-      // JVMTI agent has modified class file data.
-      // Set new class file stream using JVMTI agent modified
-      // class file data.       
-      cfs = new ClassFileStream(ptr, end_ptr - ptr, cfs->source());
-      set_stream(cfs);
-    }
-  }
-
-
-  instanceKlassHandle nullHandle;
-
-  // Figure out whether we can skip format checking (matching classic VM behavior)
-  _need_verify = Verifier::should_verify_for(class_loader());
-  
-  // Set the verify flag in stream
-  cfs->set_verify(_need_verify);
-
-  // Save the class file name for easier error message printing.
-  _class_name = name.not_null()? name : vmSymbolHandles::unknown_class_name();
-
-  cfs->guarantee_more(8, CHECK_(nullHandle));  // magic, major, minor
-  // Magic value
-  u4 magic = cfs->get_u4_fast();
-  guarantee_property(magic == JAVA_CLASSFILE_MAGIC, 
-                     "Incompatible magic value %u in class file %s", 
-                     magic, CHECK_(nullHandle));
-
-  // Version numbers  
-  u2 minor_version = cfs->get_u2_fast();
-  u2 major_version = cfs->get_u2_fast();
-
-  // Check version numbers - we check this even with verifier off
-  if (!is_supported_version(major_version, minor_version)) {
-    if (name.is_null()) {
-      Exceptions::fthrow( 
-        THREAD_AND_LOCATION,
-        vmSymbolHandles::java_lang_UnsupportedClassVersionError(), 
-        "Unsupported major.minor version %u.%u",
-        major_version, 
-        minor_version);
-    } else {
-      ResourceMark rm(THREAD);
-      Exceptions::fthrow( 
-        THREAD_AND_LOCATION,
-        vmSymbolHandles::java_lang_UnsupportedClassVersionError(), 
-        "%s : Unsupported major.minor version %u.%u",
-        name->as_C_string(),
-        major_version, 
-        minor_version);
-    }
-    return nullHandle;
-  }
-
-  _major_version = major_version;
-  _minor_version = minor_version;
-
-
-  // Check if verification needs to be relaxed for this class file
-  // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376)
-  _relax_verify = Verifier::relax_verify_for(class_loader());
-
-  // Constant pool
-  constantPoolHandle cp = parse_constant_pool(CHECK_(nullHandle));
-  int cp_size = cp->length();
-
-  cfs->guarantee_more(8, CHECK_(nullHandle));  // flags, this_class, super_class, infs_len
-
-  // Access flags
-  AccessFlags access_flags;
-  jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS;
-
-  if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) {
-    // Set abstract bit for old class files for backward compatibility
-    flags |= JVM_ACC_ABSTRACT;
-  }
-  verify_legal_class_modifiers(flags, CHECK_(nullHandle));
-  access_flags.set_flags(flags);
-
-  // This class and superclass
-  instanceKlassHandle super_klass;
-  u2 this_class_index = cfs->get_u2_fast();
-  check_property(
-    valid_cp_range(this_class_index, cp_size) &&
-      cp->tag_at(this_class_index).is_unresolved_klass(), 
-    "Invalid this class index %u in constant pool in class file %s", 
-    this_class_index, CHECK_(nullHandle));
-
-  symbolHandle class_name (THREAD, cp->unresolved_klass_at(this_class_index));
-  assert(class_name.not_null(), "class_name can't be null");
-
-  // It's important to set parsed_name *before* resolving the super class.
-  // (it's used for cleanup by the caller if parsing fails)
-  parsed_name = class_name;
-
-  // Update _class_name which could be null previously to be class_name
-  _class_name = class_name;
-
-  // Don't need to check whether this class name is legal or not.
-  // It has been checked when constant pool is parsed.
-  // However, make sure it is not an array type.
-  if (_need_verify) {
-    guarantee_property(class_name->byte_at(0) != JVM_SIGNATURE_ARRAY, 
-                       "Bad class name in class file %s", 
-                       CHECK_(nullHandle));
-  }
-  
-  klassOop preserve_this_klass;   // for storing result across HandleMark
-
-  // release all handles when parsing is done
-  { HandleMark hm(THREAD);
-
-    // Checks if name in class file matches requested name
-    if (name.not_null() && class_name() != name()) {
-      ResourceMark rm(THREAD);
-      Exceptions::fthrow(
-        THREAD_AND_LOCATION,
-        vmSymbolHandles::java_lang_NoClassDefFoundError(), 
-        "%s (wrong name: %s)", 
-        name->as_C_string(), 
-        class_name->as_C_string()
-      );
-      return nullHandle;
-    }
-
-    if (TraceClassLoadingPreorder) {
-      tty->print("[Loading %s", name()->as_klass_external_name());
-      if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
-      tty->print_cr("]");
-    }
-
-    u2 super_class_index = cfs->get_u2_fast();
-    if (super_class_index == 0) {
-      check_property(class_name() == vmSymbols::java_lang_Object(),
-                     "Invalid superclass index %u in class file %s", 
-                     super_class_index,
-                     CHECK_(nullHandle));
-    } else {
-      check_property(valid_cp_range(super_class_index, cp_size) &&
-                     cp->tag_at(super_class_index).is_unresolved_klass(), 
-                     "Invalid superclass index %u in class file %s", 
-                     super_class_index,
-                     CHECK_(nullHandle));
-      // The class name should be legal because it is checked when parsing constant pool.
-      // However, make sure it is not an array type.
-      if (_need_verify) {
-        guarantee_property(cp->unresolved_klass_at(super_class_index)->byte_at(0) != JVM_SIGNATURE_ARRAY, 
-                          "Bad superclass name in class file %s", CHECK_(nullHandle));
-      }
-    }
-
-    // Interfaces
-    u2 itfs_len = cfs->get_u2_fast();
-    objArrayHandle local_interfaces;
-    if (itfs_len == 0) {
-      local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array());
-    } else {
-      local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, &vmtimer, _class_name, CHECK_(nullHandle));
-    }
-
-    // Fields (offsets are filled in later)
-    struct FieldAllocationCount fac = {0,0,0,0,0,0,0,0,0,0};
-    objArrayHandle fields_annotations;
-    typeArrayHandle fields = parse_fields(cp, access_flags.is_interface(), &fac, &fields_annotations, CHECK_(nullHandle));
-    // Methods
-    bool has_final_method = false;
-    AccessFlags promoted_flags;
-    promoted_flags.set_flags(0);
-    // These need to be oop pointers because they are allocated lazily
-    // inside parse_methods inside a nested HandleMark
-    objArrayOop methods_annotations_oop = NULL;
-    objArrayOop methods_parameter_annotations_oop = NULL;
-    objArrayOop methods_default_annotations_oop = NULL;
-    objArrayHandle methods = parse_methods(cp, access_flags.is_interface(), 
-                                           &promoted_flags,
-                                           &has_final_method,
-                                           &methods_annotations_oop,
-                                           &methods_parameter_annotations_oop,
-                                           &methods_default_annotations_oop,
-                                           CHECK_(nullHandle));
-
-    objArrayHandle methods_annotations(THREAD, methods_annotations_oop);
-    objArrayHandle methods_parameter_annotations(THREAD, methods_parameter_annotations_oop);
-    objArrayHandle methods_default_annotations(THREAD, methods_default_annotations_oop);
-
-    // We check super class after class file is parsed and format is checked
-    if (super_class_index > 0) {
-      symbolHandle sk (THREAD, cp->klass_name_at(super_class_index));
-      if (access_flags.is_interface()) {
-        // Before attempting to resolve the superclass, check for class format
-        // errors not checked yet.
-        guarantee_property(sk() == vmSymbols::java_lang_Object(),
-                           "Interfaces must have java.lang.Object as superclass in class file %s",
-                           CHECK_(nullHandle));
-      }
-      klassOop k = SystemDictionary::resolve_super_or_fail(class_name,
-                                                           sk, 
-                                                           class_loader, 
-                                                           protection_domain, 
-                                                           true,
-                                                           CHECK_(nullHandle));
-      KlassHandle kh (THREAD, k);
-      super_klass = instanceKlassHandle(THREAD, kh());
-      if (super_klass->is_interface()) {
-        ResourceMark rm(THREAD);
-        Exceptions::fthrow(
-          THREAD_AND_LOCATION,
-          vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
-          "class %s has interface %s as super class",
-          class_name->as_klass_external_name(),
-          super_klass->external_name()
-        );
-        return nullHandle;
-      }
-      // Make sure super class is not final
-      if (super_klass->is_final()) {
-        THROW_MSG_(vmSymbols::java_lang_VerifyError(), "Cannot inherit from final class", nullHandle);
-      }
-    }
-
-    // Compute the transitive list of all unique interfaces implemented by this class
-    objArrayHandle transitive_interfaces = compute_transitive_interfaces(super_klass, local_interfaces, CHECK_(nullHandle));
-
-    // sort methods
-    typeArrayHandle method_ordering = sort_methods(methods,
-                                                   methods_annotations,
-                                                   methods_parameter_annotations,
-                                                   methods_default_annotations,
-                                                   CHECK_(nullHandle));
-
-    // promote flags from parse_methods() to the klass' flags
-    access_flags.add_promoted_flags(promoted_flags.as_int());
-
-    // Size of Java vtable (in words)
-    int vtable_size = 0;    
-    int itable_size = 0;
-    int num_miranda_methods = 0;
-
-    klassVtable::compute_vtable_size_and_num_mirandas(vtable_size, 
-                                                      num_miranda_methods, 
-                                                      super_klass(),
-                                                      methods(),
-                                                      access_flags,
-                                                      class_loader(),
-                                                      class_name(), 
-                                                      local_interfaces());  
-       
-    // Size of Java itable (in words)
-    itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);  
-    
-    // Field size and offset computation
-    int nonstatic_field_size = super_klass() == NULL ? 0 : super_klass->nonstatic_field_size();
-#ifndef PRODUCT
-    int orig_nonstatic_field_size = 0;
-#endif
-    int static_field_size = 0;
-    int next_static_oop_offset;
-    int next_static_double_offset;
-    int next_static_word_offset;
-    int next_static_short_offset;
-    int next_static_byte_offset;
-    int next_static_type_offset;
-    int next_nonstatic_oop_offset;
-    int next_nonstatic_double_offset;
-    int next_nonstatic_word_offset;
-    int next_nonstatic_short_offset;
-    int next_nonstatic_byte_offset;
-    int next_nonstatic_type_offset;
-    int first_nonstatic_oop_offset;
-    int first_nonstatic_field_offset;
-    int next_nonstatic_field_offset;
-
-    // Calculate the starting byte offsets
-    next_static_oop_offset      = (instanceKlass::header_size() + 
-		 		  align_object_offset(vtable_size) + 
-				  align_object_offset(itable_size)) * wordSize;
-    next_static_double_offset   = next_static_oop_offset + 
-			 	  (fac.static_oop_count * oopSize);
-    if ( fac.static_double_count && 
-	 (Universe::field_type_should_be_aligned(T_DOUBLE) || 
- 	  Universe::field_type_should_be_aligned(T_LONG)) ) {
-      next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
-    }
-
-    next_static_word_offset     = next_static_double_offset + 
-				  (fac.static_double_count * BytesPerLong);
-    next_static_short_offset    = next_static_word_offset + 
-				  (fac.static_word_count * BytesPerInt);
-    next_static_byte_offset     = next_static_short_offset + 
-				  (fac.static_short_count * BytesPerShort);
-    next_static_type_offset     = align_size_up((next_static_byte_offset +
-			          fac.static_byte_count ), wordSize );
-    static_field_size 	        = (next_static_type_offset - 
-			          next_static_oop_offset) / wordSize;
-    first_nonstatic_field_offset = (instanceOopDesc::header_size() + 
-				    nonstatic_field_size) * wordSize;
-    next_nonstatic_field_offset = first_nonstatic_field_offset;
-
-    // Add fake fields for java.lang.Class instances (also see below)
-    if (class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) {
-      java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle));
-    }
-
-    // Add a fake "discovered" field if it is not present 
-    // for compatibility with earlier jdk's.
-    if (class_name() == vmSymbols::java_lang_ref_Reference() 
-      && class_loader.is_null()) {
-      java_lang_ref_Reference_fix_pre(&fields, cp, &fac, CHECK_(nullHandle));
-    }
-    // end of "discovered" field compactibility fix
-
-    int nonstatic_double_count = fac.nonstatic_double_count;
-    int nonstatic_word_count   = fac.nonstatic_word_count;
-    int nonstatic_short_count  = fac.nonstatic_short_count;
-    int nonstatic_byte_count   = fac.nonstatic_byte_count;
-    int nonstatic_oop_count    = fac.nonstatic_oop_count;
-
-    // Prepare list of oops for oop maps generation.
-    u2* nonstatic_oop_offsets;
-    u2* nonstatic_oop_length;
-    int nonstatic_oop_map_count = 0;
-
-    nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  nonstatic_oop_count+1);
-    nonstatic_oop_length  = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  nonstatic_oop_count+1);
-
-    // Add fake fields for java.lang.Class instances (also see above).
-    // FieldsAllocationStyle and CompactFields values will be reset to default.
-    if(class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) {
-      java_lang_Class_fix_post(&next_nonstatic_field_offset);
-      nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
-      int fake_oop_count       = (( next_nonstatic_field_offset -
-                                    first_nonstatic_field_offset ) / oopSize);
-      nonstatic_oop_length [0] = (u2)fake_oop_count;
-      nonstatic_oop_map_count  = 1;
-      nonstatic_oop_count     -= fake_oop_count;
-      first_nonstatic_oop_offset = first_nonstatic_field_offset;
-    } else {
-      first_nonstatic_oop_offset = 0; // will be set for first oop field
-    }
-
-#ifndef PRODUCT
-    if( PrintCompactFieldsSavings ) {
-      next_nonstatic_double_offset = next_nonstatic_field_offset + 
-                                     (nonstatic_oop_count * oopSize);
-      if ( nonstatic_double_count > 0 ) {
-        next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong); 
-      }
-      next_nonstatic_word_offset  = next_nonstatic_double_offset + 
-                                    (nonstatic_double_count * BytesPerLong);
-      next_nonstatic_short_offset = next_nonstatic_word_offset + 
-                                    (nonstatic_word_count * BytesPerInt);
-      next_nonstatic_byte_offset  = next_nonstatic_short_offset + 
-                                    (nonstatic_short_count * BytesPerShort);
-      next_nonstatic_type_offset  = align_size_up((next_nonstatic_byte_offset +
-                                    nonstatic_byte_count ), wordSize );
-      orig_nonstatic_field_size   = nonstatic_field_size + 
-        ((next_nonstatic_type_offset - first_nonstatic_field_offset)/wordSize);
-    }
-#endif
-    bool compact_fields   = CompactFields;
-    int  allocation_style = FieldsAllocationStyle;
-    if( allocation_style < 0 || allocation_style > 1 ) { // Out of range?
-      assert(false, "0 <= FieldsAllocationStyle <= 1");
-      allocation_style = 1; // Optimistic
-    }
-
-    // The next classes have predefined hard-coded fields offsets
-    // (see in JavaClasses::compute_hard_coded_offsets()).
-    // Use default fields allocation order for them.
-    if( (allocation_style != 0 || compact_fields ) && class_loader.is_null() &&
-        (class_name() == vmSymbols::java_lang_AssertionStatusDirectives() ||
-         class_name() == vmSymbols::java_lang_Class() ||
-         class_name() == vmSymbols::java_lang_ClassLoader() ||
-         class_name() == vmSymbols::java_lang_ref_Reference() ||
-         class_name() == vmSymbols::java_lang_ref_SoftReference() ||
-         class_name() == vmSymbols::java_lang_StackTraceElement() ||
-         class_name() == vmSymbols::java_lang_String() ||
-         class_name() == vmSymbols::java_lang_Throwable()) ) {
-      allocation_style = 0;     // Allocate oops first
-      compact_fields   = false; // Don't compact fields
-    }
-
-    if( allocation_style == 0 ) {
-      // Fields order: oops, longs/doubles, ints, shorts/chars, bytes
-      next_nonstatic_oop_offset    = next_nonstatic_field_offset;
-      next_nonstatic_double_offset = next_nonstatic_oop_offset + 
-			 	     (nonstatic_oop_count * oopSize);
-    } else if( allocation_style == 1 ) {
-      // Fields order: longs/doubles, ints, shorts/chars, bytes, oops
-      next_nonstatic_double_offset = next_nonstatic_field_offset;
-    } else {
-      ShouldNotReachHere();
-    }
-
-    int nonstatic_oop_space_count   = 0;
-    int nonstatic_word_space_count  = 0;
-    int nonstatic_short_space_count = 0;
-    int nonstatic_byte_space_count  = 0;
-    int nonstatic_oop_space_offset;
-    int nonstatic_word_space_offset;
-    int nonstatic_short_space_offset;
-    int nonstatic_byte_space_offset;
-
-    if( nonstatic_double_count > 0 ) {
-      int offset = next_nonstatic_double_offset;
-      next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
-      if( compact_fields && offset != next_nonstatic_double_offset ) {
-        // Allocate available fields into the gap before double field.
-        int length = next_nonstatic_double_offset - offset;
-        assert(length == BytesPerInt, "");
-        nonstatic_word_space_offset = offset;
-        if( nonstatic_word_count > 0 ) {
-          nonstatic_word_count      -= 1;
-          nonstatic_word_space_count = 1; // Only one will fit
-          length -= BytesPerInt;
-          offset += BytesPerInt;
-        }
-        nonstatic_short_space_offset = offset;
-        while( length >= BytesPerShort && nonstatic_short_count > 0 ) {
-          nonstatic_short_count       -= 1;
-          nonstatic_short_space_count += 1;
-          length -= BytesPerShort;
-          offset += BytesPerShort;
-        }
-        nonstatic_byte_space_offset = offset;
-        while( length > 0 && nonstatic_byte_count > 0 ) {
-          nonstatic_byte_count       -= 1;
-          nonstatic_byte_space_count += 1;
-          length -= 1;
-        }
-        // Allocate oop field in the gap if there are no other fields for that.
-        nonstatic_oop_space_offset = offset;
-        if( length >= oopSize && nonstatic_oop_count > 0 &&  
-            allocation_style != 0 ) { // when oop fields not first
-          nonstatic_oop_count      -= 1;
-          nonstatic_oop_space_count = 1; // Only one will fit
-          length -= oopSize;
-          offset += oopSize;
-        }
-      }
-    }
-
-    next_nonstatic_word_offset  = next_nonstatic_double_offset + 
-                                  (nonstatic_double_count * BytesPerLong);
-    next_nonstatic_short_offset = next_nonstatic_word_offset + 
-                                  (nonstatic_word_count * BytesPerInt);
-    next_nonstatic_byte_offset  = next_nonstatic_short_offset + 
-                                  (nonstatic_short_count * BytesPerShort);
-
-    int notaligned_offset;
-    if( allocation_style == 0 ) {
-      notaligned_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
-    } else { // allocation_style == 1 
-      next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
-      if( nonstatic_oop_count > 0 ) {
-        notaligned_offset = next_nonstatic_oop_offset;
-        next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, oopSize);
-      }
-      notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * oopSize);
-    }
-    next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
-    nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
-                                      - first_nonstatic_field_offset)/wordSize);
-
-    // Iterate over fields again and compute correct offsets.
-    // The field allocation type was temporarily stored in the offset slot.
-    // oop fields are located before non-oop fields (static and non-static).
-    int len = fields->length();
-    for (int i = 0; i < len; i += instanceKlass::next_offset) {
-      int real_offset;
-      FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i+4);
-      switch (atype) {
-        case STATIC_OOP:
-          real_offset = next_static_oop_offset;
-          next_static_oop_offset += oopSize;
-          break;
-        case STATIC_BYTE:
-          real_offset = next_static_byte_offset;
-          next_static_byte_offset += 1;
-          break;
-        case STATIC_SHORT:
-          real_offset = next_static_short_offset;
-          next_static_short_offset += BytesPerShort;
-          break;
-        case STATIC_WORD:
-          real_offset = next_static_word_offset;
-          next_static_word_offset += BytesPerInt;
-          break;
-        case STATIC_ALIGNED_DOUBLE:
-        case STATIC_DOUBLE:
-          real_offset = next_static_double_offset;
-          next_static_double_offset += BytesPerLong;
-          break;
-        case NONSTATIC_OOP:
-          if( nonstatic_oop_space_count > 0 ) {
-            real_offset = nonstatic_oop_space_offset;
-            nonstatic_oop_space_offset += oopSize;
-            nonstatic_oop_space_count  -= 1;
-          } else {
-            real_offset = next_nonstatic_oop_offset;
-            next_nonstatic_oop_offset += oopSize;
-          }
-          // Update oop maps
-          if( nonstatic_oop_map_count > 0 &&
-              nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == 
-              (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) {
-            // Extend current oop map
-            nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
-          } else {
-            // Create new oop map
-            nonstatic_oop_offsets[nonstatic_oop_map_count] = (u2)real_offset;
-            nonstatic_oop_length [nonstatic_oop_map_count] = 1;
-            nonstatic_oop_map_count += 1;
-            if( first_nonstatic_oop_offset == 0 ) { // Undefined
-              first_nonstatic_oop_offset = real_offset;
-            }
-          }
-          break;
-        case NONSTATIC_BYTE:
-          if( nonstatic_byte_space_count > 0 ) {
-            real_offset = nonstatic_byte_space_offset;
-            nonstatic_byte_space_offset += 1;
-            nonstatic_byte_space_count  -= 1;
-          } else {
-            real_offset = next_nonstatic_byte_offset;
-            next_nonstatic_byte_offset += 1;
-          }
-          break;
-        case NONSTATIC_SHORT:
-          if( nonstatic_short_space_count > 0 ) {
-            real_offset = nonstatic_short_space_offset;
-            nonstatic_short_space_offset += BytesPerShort;
-            nonstatic_short_space_count  -= 1;
-          } else {
-            real_offset = next_nonstatic_short_offset;
-            next_nonstatic_short_offset += BytesPerShort;
-          }
-          break;
-        case NONSTATIC_WORD:
-          if( nonstatic_word_space_count > 0 ) {
-            real_offset = nonstatic_word_space_offset;
-            nonstatic_word_space_offset += BytesPerInt;
-            nonstatic_word_space_count  -= 1;
-          } else {
-            real_offset = next_nonstatic_word_offset;
-            next_nonstatic_word_offset += BytesPerInt;
-          }
-          break;
-        case NONSTATIC_ALIGNED_DOUBLE:
-        case NONSTATIC_DOUBLE:
-          real_offset = next_nonstatic_double_offset;
-          next_nonstatic_double_offset += BytesPerLong;
-          break;
-        default:
-          ShouldNotReachHere();
-      }
-      fields->short_at_put(i+4, extract_low_short_from_int(real_offset) );
-      fields->short_at_put(i+5, extract_high_short_from_int(real_offset) ); 
-    }
-
-    // Size of instances
-    int instance_size;
-
-    instance_size = align_object_size(next_nonstatic_type_offset / wordSize);
-
-    assert(instance_size == align_object_size(instanceOopDesc::header_size() + nonstatic_field_size), "consistent layout helper value");
-
-    // Size of non-static oop map blocks (in words) allocated at end of klass
-    int nonstatic_oop_map_size = compute_oop_map_size(super_klass, nonstatic_oop_map_count, first_nonstatic_oop_offset);
-
-    // Compute reference type
-    ReferenceType rt;
-    if (super_klass() == NULL) {
-      rt = REF_NONE;
-    } else {
-      rt = super_klass->reference_type();
-    }
-
-    // We can now create the basic klassOop for this klass    
-    klassOop ik = oopFactory::new_instanceKlass(
-                                    vtable_size, itable_size, 
-                                    static_field_size, nonstatic_oop_map_size, 
-                                    rt, CHECK_(nullHandle));
-    instanceKlassHandle this_klass (THREAD, ik); 
-
-    assert(this_klass->static_field_size() == static_field_size && 
-           this_klass->nonstatic_oop_map_size() == nonstatic_oop_map_size, "sanity check");
-    
-    // Fill in information already parsed
-    this_klass->set_access_flags(access_flags);
-    jint lh = Klass::instance_layout_helper(instance_size, false);
-    this_klass->set_layout_helper(lh);
-    assert(this_klass->oop_is_instance(), "layout is correct");
-    assert(this_klass->size_helper() == instance_size, "correct size_helper");
-    // Not yet: supers are done below to support the new subtype-checking fields
-    //this_klass->set_super(super_klass());  
-    this_klass->set_class_loader(class_loader());    
-    this_klass->set_nonstatic_field_size(nonstatic_field_size);
-    this_klass->set_static_oop_field_size(fac.static_oop_count);       
-    cp->set_pool_holder(this_klass());
-    this_klass->set_constants(cp());
-    this_klass->set_local_interfaces(local_interfaces());
-    this_klass->set_fields(fields());
-    this_klass->set_methods(methods());
-    if (has_final_method) {
-      this_klass->set_has_final_method();
-    }
-    this_klass->set_method_ordering(method_ordering());
-    this_klass->set_initial_method_idnum(methods->length());
-    this_klass->set_name(cp->klass_name_at(this_class_index));
-    this_klass->set_protection_domain(protection_domain());
-    this_klass->set_fields_annotations(fields_annotations());
-    this_klass->set_methods_annotations(methods_annotations());
-    this_klass->set_methods_parameter_annotations(methods_parameter_annotations());
-    this_klass->set_methods_default_annotations(methods_default_annotations());
-
-    this_klass->set_minor_version(minor_version);
-    this_klass->set_major_version(major_version);
-
-    if (cached_class_file_bytes != NULL) {
-      // JVMTI: we have an instanceKlass now, tell it about the cached bytes
-      this_klass->set_cached_class_file(cached_class_file_bytes, 
-                                        cached_class_file_length);
-    }
-      
-    // Miranda methods
-    if ((num_miranda_methods > 0) || 
-	// if this class introduced new miranda methods or
-	(super_klass.not_null() && (super_klass->has_miranda_methods()))
-	// super class exists and this class inherited miranda methods
-	) {
-      this_klass->set_has_miranda_methods(); // then set a flag
-    }
-
-    // Additional attributes
-    parse_classfile_attributes(cp, this_klass, CHECK_(nullHandle));
-
-    // Make sure this is the end of class file stream
-    guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
-
-    // Initialize static fields
-    this_klass->do_local_static_fields(&initialize_static_field, CHECK_(nullHandle));
-
-    // VerifyOops believes that once this has been set, the object is completely loaded.
-    // Compute transitive closure of interfaces this class implements
-    this_klass->set_transitive_interfaces(transitive_interfaces());    
-
-    // Fill in information needed to compute superclasses.
-    this_klass->initialize_supers(super_klass(), CHECK_(nullHandle));
-
-    // Initialize itable offset tables
-    klassItable::setup_itable_offset_table(this_klass);
-
-    // Do final class setup
-    fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_length);
-
-    set_precomputed_flags(this_klass);
-
-    // reinitialize modifiers, using the InnerClasses attribute
-    int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle));
-    this_klass->set_modifier_flags(computed_modifiers);
-
-    // check if this class can access its super class
-    check_super_class_access(this_klass, CHECK_(nullHandle));
-
-    // check if this class can access its superinterfaces
-    check_super_interface_access(this_klass, CHECK_(nullHandle));
-
-    // check if this class overrides any final method
-    check_final_method_override(this_klass, CHECK_(nullHandle));
-
-    // check that if this class is an interface then it doesn't have static methods
-    if (this_klass->is_interface()) {
-      check_illegal_static_method(this_klass, CHECK_(nullHandle));
-    }
-
-    ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), 
-                                             false /* not shared class */);
-	  
-    if (TraceClassLoading) {
-      // print in a single call to reduce interleaving of output
-      if (cfs->source() != NULL) {
-        tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
-                   cfs->source());
-      } else if (class_loader.is_null()) {
-        if (THREAD->is_Java_thread()) {
-          klassOop caller = ((JavaThread*)THREAD)->security_get_caller_class(1);
-          tty->print("[Loaded %s by instance of %s]\n",
-                     this_klass->external_name(),
-                     instanceKlass::cast(caller)->external_name());
-        } else {
-          tty->print("[Loaded %s]\n", this_klass->external_name());
-        }
-      } else {
-        ResourceMark rm;
-        tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
-                   instanceKlass::cast(class_loader->klass())->external_name());
-      }
-    }
-
-    if (TraceClassResolution) {
-      // print out the superclass.
-      const char * from = Klass::cast(this_klass())->external_name();
-      if (this_klass->java_super() != NULL) {
-        tty->print("RESOLVE %s %s\n", from, instanceKlass::cast(this_klass->java_super())->external_name());
-      }
-      // print out each of the interface classes referred to by this class.
-      objArrayHandle local_interfaces(THREAD, this_klass->local_interfaces());
-      if (!local_interfaces.is_null()) {
-        int length = local_interfaces->length();
-        for (int i = 0; i < length; i++) {
-          klassOop k = klassOop(local_interfaces->obj_at(i)); 
-          instanceKlass* to_class = instanceKlass::cast(k);
-          const char * to = to_class->external_name();
-          tty->print("RESOLVE %s %s\n", from, to);
-        }
-      }
-    }
-
-#ifndef PRODUCT
-    if( PrintCompactFieldsSavings ) {
-      if( nonstatic_field_size < orig_nonstatic_field_size ) {
-        tty->print("[Saved %d of %3d words in %s]\n", 
-                 orig_nonstatic_field_size - nonstatic_field_size,
-                 orig_nonstatic_field_size, this_klass->external_name());
-      } else if( nonstatic_field_size > orig_nonstatic_field_size ) {
-        tty->print("[Wasted %d over %3d words in %s]\n", 
-                 nonstatic_field_size - orig_nonstatic_field_size,
-                 orig_nonstatic_field_size, this_klass->external_name());
-      }
-    }
-#endif
-
-    // preserve result across HandleMark  
-    preserve_this_klass = this_klass();    
-  }
-
-  // Create new handle outside HandleMark
-  instanceKlassHandle this_klass (THREAD, preserve_this_klass);
-  debug_only(this_klass->as_klassOop()->verify();)
-
-  return this_klass;
-}
-
-
-int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_map_count, int first_nonstatic_oop_offset) {
-  int map_size = super.is_null() ? 0 : super->nonstatic_oop_map_size();
-  if (nonstatic_oop_map_count > 0) {
-    // We have oops to add to map
-    if (map_size == 0) {
-      map_size = nonstatic_oop_map_count;
-    } else {
-      // Check whether we should add a new map block or whether the last one can be extended
-      OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
-      OopMapBlock* last_map = first_map + map_size - 1;
-
-      int next_offset = last_map->offset() + (last_map->length() * oopSize);
-      if (next_offset == first_nonstatic_oop_offset) {
-        // There is no gap bettwen superklass's last oop field and first 
-        // local oop field, merge maps.
-        nonstatic_oop_map_count -= 1;
-      } else {
-        // Superklass didn't end with a oop field, add extra maps
-        assert(next_offset<first_nonstatic_oop_offset, "just checking");
-      }
-      map_size += nonstatic_oop_map_count;
-    }
-  }
-  return map_size;
-}
-
-
-void ClassFileParser::fill_oop_maps(instanceKlassHandle k, 
-                        int nonstatic_oop_map_count, 
-                        u2* nonstatic_oop_offsets, u2* nonstatic_oop_length) {
-  OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
-  OopMapBlock* last_oop_map = this_oop_map + k->nonstatic_oop_map_size();
-  instanceKlass* super = k->superklass();
-  if (super != NULL) {
-    int super_oop_map_size     = super->nonstatic_oop_map_size();
-    OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
-    // Copy maps from superklass
-    while (super_oop_map_size-- > 0) {
-      *this_oop_map++ = *super_oop_map++;
-    }
-  }
-  if (nonstatic_oop_map_count > 0) {
-    if (this_oop_map + nonstatic_oop_map_count > last_oop_map) {
-      // Calculated in compute_oop_map_size() number of oop maps is less then 
-      // collected oop maps since there is no gap between superklass's last oop 
-      // field and first local oop field. Extend the last oop map copied 
-      // from the superklass instead of creating new one.
-      nonstatic_oop_map_count--;
-      nonstatic_oop_offsets++;
-      this_oop_map--;
-      this_oop_map->set_length(this_oop_map->length() + *nonstatic_oop_length++);
-      this_oop_map++;
-    }
-    assert((this_oop_map + nonstatic_oop_map_count) == last_oop_map, "just checking");
-    // Add new map blocks, fill them
-    while (nonstatic_oop_map_count-- > 0) {
-      this_oop_map->set_offset(*nonstatic_oop_offsets++);
-      this_oop_map->set_length(*nonstatic_oop_length++);
-      this_oop_map++;
-    }
-  }
-}
-
-
-void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) {
-  klassOop super = k->super();
-
-  // Check if this klass has an empty finalize method (i.e. one with return bytecode only),
-  // in which case we don't have to register objects as finalizable
-  if (!_has_empty_finalizer) {
-    if (_has_finalizer ||
-        (super != NULL && super->klass_part()->has_finalizer())) {
-      k->set_has_finalizer();
-    }
-  }
-
-#ifdef ASSERT
-  bool f = false;
-  methodOop m = k->lookup_method(vmSymbols::finalize_method_name(),
-                                 vmSymbols::void_method_signature());
-  if (m != NULL && !m->is_empty_method()) {
-    f = true;
-  }
-  assert(f == k->has_finalizer(), "inconsistent has_finalizer");
-#endif
-
-  // Check if this klass supports the java.lang.Cloneable interface
-  if (SystemDictionary::cloneable_klass_loaded()) {
-    if (k->is_subtype_of(SystemDictionary::cloneable_klass())) {
-      k->set_is_cloneable();
-    }
-  }
-
-  // Check if this klass has a vanilla default constructor
-  if (super == NULL) {
-    // java.lang.Object has empty default constructor
-    k->set_has_vanilla_constructor();
-  } else {
-    if (Klass::cast(super)->has_vanilla_constructor() &&
-        _has_vanilla_constructor) {
-      k->set_has_vanilla_constructor();
-    }
-#ifdef ASSERT
-    bool v = false;
-    if (Klass::cast(super)->has_vanilla_constructor()) {
-      methodOop constructor = k->find_method(vmSymbols::object_initializer_name(
-), vmSymbols::void_method_signature());
-      if (constructor != NULL && constructor->is_vanilla_constructor()) {
-        v = true;
-      }
-    }
-    assert(v == k->has_vanilla_constructor(), "inconsistent has_vanilla_constructor");
-#endif
-  }
-
-  // If it cannot be fast-path allocated, set a bit in the layout helper.
-  // See documentation of instanceKlass::can_be_fastpath_allocated().
-  assert(k->size_helper() > 0, "layout_helper is initialized");
-  if ((!RegisterFinalizersAtInit && k->has_finalizer())
-      || k->is_abstract() || k->is_interface()
-      || (k->name() == vmSymbols::java_lang_Class()
-          && k->class_loader() == NULL)
-      || k->size_helper() >= FastAllocateSizeLimit) {
-    // Forbid fast-path allocation.
-    jint lh = Klass::instance_layout_helper(k->size_helper(), true);
-    k->set_layout_helper(lh);
-  }
-}
-
-
-// utility method for appending and array with check for duplicates
-
-void append_interfaces(objArrayHandle result, int& index, objArrayOop ifs) {
-  // iterate over new interfaces
-  for (int i = 0; i < ifs->length(); i++) {
-    oop e = ifs->obj_at(i);
-    assert(e->is_klass() && instanceKlass::cast(klassOop(e))->is_interface(), "just checking");
-    // check for duplicates
-    bool duplicate = false;
-    for (int j = 0; j < index; j++) {
-      if (result->obj_at(j) == e) {
-        duplicate = true;
-        break;
-      }
-    }
-    // add new interface
-    if (!duplicate) {
-      result->obj_at_put(index++, e);
-    }
-  }
-}
-
-objArrayHandle ClassFileParser::compute_transitive_interfaces(instanceKlassHandle super, objArrayHandle local_ifs, TRAPS) {
-  // Compute maximum size for transitive interfaces
-  int max_transitive_size = 0;
-  int super_size = 0;
-  // Add superclass transitive interfaces size
-  if (super.not_null()) {
-    super_size = super->transitive_interfaces()->length();
-    max_transitive_size += super_size;
-  }
-  // Add local interfaces' super interfaces  
-  int local_size = local_ifs->length();
-  for (int i = 0; i < local_size; i++) {
-    klassOop l = klassOop(local_ifs->obj_at(i));
-    max_transitive_size += instanceKlass::cast(l)->transitive_interfaces()->length();
-  }
-  // Finally add local interfaces
-  max_transitive_size += local_size;
-  // Construct array
-  objArrayHandle result;
-  if (max_transitive_size == 0) {
-    // no interfaces, use canonicalized array
-    result = objArrayHandle(THREAD, Universe::the_empty_system_obj_array());
-  } else if (max_transitive_size == super_size) {
-    // no new local interfaces added, share superklass' transitive interface array
-    result = objArrayHandle(THREAD, super->transitive_interfaces());
-  } else if (max_transitive_size == local_size) {
-    // only local interfaces added, share local interface array
-    result = local_ifs;
-  } else {
-    objArrayHandle nullHandle;
-    objArrayOop new_objarray = oopFactory::new_system_objArray(max_transitive_size, CHECK_(nullHandle));
-    result = objArrayHandle(THREAD, new_objarray);
-    int index = 0;
-    // Copy down from superclass
-    if (super.not_null()) {
-      append_interfaces(result, index, super->transitive_interfaces());
-    }    
-    // Copy down from local interfaces' superinterfaces
-    for (int i = 0; i < local_ifs->length(); i++) {
-      klassOop l = klassOop(local_ifs->obj_at(i));
-      append_interfaces(result, index, instanceKlass::cast(l)->transitive_interfaces());
-    }
-    // Finally add local interfaces
-    append_interfaces(result, index, local_ifs());
-
-    // Check if duplicates were removed
-    if (index != max_transitive_size) {
-      assert(index < max_transitive_size, "just checking");
-      objArrayOop new_result = oopFactory::new_system_objArray(index, CHECK_(nullHandle));
-      for (int i = 0; i < index; i++) {
-        oop e = result->obj_at(i);
-        assert(e != NULL, "just checking");
-        new_result->obj_at_put(i, e);
-      }
-      result = objArrayHandle(THREAD, new_result);
-    }
-  }
-  return result;  
-}
-
-
-void ClassFileParser::check_super_class_access(instanceKlassHandle this_klass, TRAPS) {
-  klassOop super = this_klass->super();
-  if ((super != NULL) &&
-      (!Reflection::verify_class_access(this_klass->as_klassOop(), super, false))) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(  
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_IllegalAccessError(),
-      "class %s cannot access its superclass %s",
-      this_klass->external_name(),
-      instanceKlass::cast(super)->external_name()
-    );
-    return;
-  }
-}
-
-
-void ClassFileParser::check_super_interface_access(instanceKlassHandle this_klass, TRAPS) {
-  objArrayHandle local_interfaces (THREAD, this_klass->local_interfaces());
-  int lng = local_interfaces->length();
-  for (int i = lng - 1; i >= 0; i--) {
-    klassOop k = klassOop(local_interfaces->obj_at(i)); 
-    assert (k != NULL && Klass::cast(k)->is_interface(), "invalid interface");
-    if (!Reflection::verify_class_access(this_klass->as_klassOop(), k, false)) {
-      ResourceMark rm(THREAD);
-      Exceptions::fthrow(  
-        THREAD_AND_LOCATION,
-        vmSymbolHandles::java_lang_IllegalAccessError(),
-        "class %s cannot access its superinterface %s",
-        this_klass->external_name(),
-        instanceKlass::cast(k)->external_name()
-      );
-      return;
-    }
-  }
-}
-
-
-void ClassFileParser::check_final_method_override(instanceKlassHandle this_klass, TRAPS) {
-  objArrayHandle methods (THREAD, this_klass->methods());
-  int num_methods = methods->length();
-  
-  // go thru each method and check if it overrides a final method
-  for (int index = 0; index < num_methods; index++) {
-    methodOop m = (methodOop)methods->obj_at(index);
-
-    // skip private, static and <init> methods
-    if ((!m->is_private()) &&
-        (!m->is_static()) &&
-        (m->name() != vmSymbols::object_initializer_name())) {
-	
-      symbolOop name = m->name();
-      symbolOop signature = m->signature();
-      klassOop k = this_klass->super();
-      methodOop super_m = NULL;
-      while (k != NULL) {
-        // skip supers that don't have final methods.
-        if (k->klass_part()->has_final_method()) {
-          // lookup a matching method in the super class hierarchy
-          super_m = instanceKlass::cast(k)->lookup_method(name, signature); 
-          if (super_m == NULL) {
-            break; // didn't find any match; get out
-          }
-  
-          if (super_m->is_final() &&
-              // matching method in super is final
-              (Reflection::verify_field_access(this_klass->as_klassOop(), 
-                                               super_m->method_holder(),
-                                               super_m->method_holder(),
-                                               super_m->access_flags(), false))
-            // this class can access super final method and therefore override
-            ) {
-            ResourceMark rm(THREAD);
-            Exceptions::fthrow(  
-              THREAD_AND_LOCATION,
-              vmSymbolHandles::java_lang_VerifyError(),
-              "class %s overrides final method %s.%s",
-              this_klass->external_name(),
-              name->as_C_string(),
-              signature->as_C_string()
-            );
-            return;
-          }
-
-          // continue to look from super_m's holder's super.
-          k = instanceKlass::cast(super_m->method_holder())->super();
-          continue;
-        }
-
-        k = k->klass_part()->super();
-      }
-    }
-  }
-}
-
-
-// assumes that this_klass is an interface
-void ClassFileParser::check_illegal_static_method(instanceKlassHandle this_klass, TRAPS) {
-  assert(this_klass->is_interface(), "not an interface");
-  objArrayHandle methods (THREAD, this_klass->methods());
-  int num_methods = methods->length();
-
-  for (int index = 0; index < num_methods; index++) {
-    methodOop m = (methodOop)methods->obj_at(index);
-    // if m is static and not the init method, throw a verify error
-    if ((m->is_static()) && (m->name() != vmSymbols::class_initializer_name())) {
-      ResourceMark rm(THREAD);
-      Exceptions::fthrow(  
-        THREAD_AND_LOCATION,
-        vmSymbolHandles::java_lang_VerifyError(),
-        "Illegal static method %s in interface %s",
-        m->name()->as_C_string(),
-        this_klass->external_name()
-      );
-      return;
-    }
-  }
-}
-
-// utility methods for format checking 
-
-void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) {
-  if (!_need_verify) { return; }
-
-  const bool is_interface  = (flags & JVM_ACC_INTERFACE)  != 0;
-  const bool is_abstract   = (flags & JVM_ACC_ABSTRACT)   != 0;
-  const bool is_final      = (flags & JVM_ACC_FINAL)      != 0;
-  const bool is_super      = (flags & JVM_ACC_SUPER)      != 0;
-  const bool is_enum       = (flags & JVM_ACC_ENUM)       != 0;
-  const bool is_annotation = (flags & JVM_ACC_ANNOTATION) != 0;
-  const bool major_gte_15  = _major_version >= JAVA_1_5_VERSION;
-
-  if ((is_abstract && is_final) ||
-      (is_interface && !is_abstract) ||
-      (is_interface && major_gte_15 && (is_super || is_enum)) ||
-      (!is_interface && major_gte_15 && is_annotation)) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Illegal class modifiers in class %s: 0x%X",
-      _class_name->as_C_string(), flags
-    );
-    return;
-  }
-}
-
-bool ClassFileParser::has_illegal_visibility(jint flags) {
-  const bool is_public    = (flags & JVM_ACC_PUBLIC)    != 0;
-  const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0;
-  const bool is_private   = (flags & JVM_ACC_PRIVATE)   != 0;
-
-  return ((is_public && is_protected) ||
-          (is_public && is_private) ||
-          (is_protected && is_private));
-}
-
-bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
-  return (major >= JAVA_MIN_SUPPORTED_VERSION) && 
-         (major <= JAVA_MAX_SUPPORTED_VERSION) && 
-         ((major != JAVA_MAX_SUPPORTED_VERSION) || 
-          (minor <= JAVA_MAX_SUPPORTED_MINOR_VERSION));
-}
-
-void ClassFileParser::verify_legal_field_modifiers(
-    jint flags, bool is_interface, TRAPS) {
-  if (!_need_verify) { return; }
-
-  const bool is_public    = (flags & JVM_ACC_PUBLIC)    != 0;
-  const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0;
-  const bool is_private   = (flags & JVM_ACC_PRIVATE)   != 0;
-  const bool is_static    = (flags & JVM_ACC_STATIC)    != 0;
-  const bool is_final     = (flags & JVM_ACC_FINAL)     != 0;
-  const bool is_volatile  = (flags & JVM_ACC_VOLATILE)  != 0;
-  const bool is_transient = (flags & JVM_ACC_TRANSIENT) != 0;
-  const bool is_enum      = (flags & JVM_ACC_ENUM)      != 0;
-  const bool major_gte_15 = _major_version >= JAVA_1_5_VERSION;
-
-  bool is_illegal = false;
-
-  if (is_interface) {
-    if (!is_public || !is_static || !is_final || is_private || 
-        is_protected || is_volatile || is_transient || 
-        (major_gte_15 && is_enum)) {
-      is_illegal = true;
-    }
-  } else { // not interface
-    if (has_illegal_visibility(flags) || (is_final && is_volatile)) {
-      is_illegal = true;
-    }
-  }
-
-  if (is_illegal) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Illegal field modifiers in class %s: 0x%X",
-      _class_name->as_C_string(), flags);
-    return;
-  }
-}
-
-void ClassFileParser::verify_legal_method_modifiers(
-    jint flags, bool is_interface, symbolHandle name, TRAPS) {
-  if (!_need_verify) { return; }
-
-  const bool is_public       = (flags & JVM_ACC_PUBLIC)       != 0;
-  const bool is_private      = (flags & JVM_ACC_PRIVATE)      != 0;
-  const bool is_static       = (flags & JVM_ACC_STATIC)       != 0;
-  const bool is_final        = (flags & JVM_ACC_FINAL)        != 0;
-  const bool is_native       = (flags & JVM_ACC_NATIVE)       != 0;
-  const bool is_abstract     = (flags & JVM_ACC_ABSTRACT)     != 0;
-  const bool is_bridge       = (flags & JVM_ACC_BRIDGE)       != 0;
-  const bool is_strict       = (flags & JVM_ACC_STRICT)       != 0;
-  const bool is_synchronized = (flags & JVM_ACC_SYNCHRONIZED) != 0;
-  const bool major_gte_15    = _major_version >= JAVA_1_5_VERSION;
-  const bool is_initializer  = (name == vmSymbols::object_initializer_name());
-
-  bool is_illegal = false;
-
-  if (is_interface) {
-    if (!is_abstract || !is_public || is_static || is_final || 
-        is_native || (major_gte_15 && (is_synchronized || is_strict))) {
-      is_illegal = true;
-    }
-  } else { // not interface
-    if (is_initializer) {
-      if (is_static || is_final || is_synchronized || is_native || 
-          is_abstract || (major_gte_15 && is_bridge)) {
-        is_illegal = true;
-      }
-    } else { // not initializer
-      if (is_abstract) {
-        if ((is_final || is_native || is_private || is_static || 
-            (major_gte_15 && (is_synchronized || is_strict)))) {
-          is_illegal = true;
-        }
-      }
-      if (has_illegal_visibility(flags)) {
-        is_illegal = true;
-      }
-    }
-  }
-
-  if (is_illegal) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Method %s in class %s has illegal modifiers: 0x%X", 
-      name->as_C_string(), _class_name->as_C_string(), flags);
-    return;
-  }
-}
-
-void ClassFileParser::verify_legal_utf8(const unsigned char* buffer, int length, TRAPS) {
-  assert(_need_verify, "only called when _need_verify is true");
-  int i = 0;
-  int count = length >> 2;
-  for (int k=0; k<count; k++) {
-    unsigned char b0 = buffer[i];
-    unsigned char b1 = buffer[i+1];
-    unsigned char b2 = buffer[i+2];
-    unsigned char b3 = buffer[i+3];
-    // For an unsigned char v,
-    // (v | v - 1) is < 128 (highest bit 0) for 0 < v < 128;
-    // (v | v - 1) is >= 128 (highest bit 1) for v == 0 or v >= 128.
-    unsigned char res = b0 | b0 - 1 |
-                        b1 | b1 - 1 |
-                        b2 | b2 - 1 |
-                        b3 | b3 - 1;
-    if (res >= 128) break;
-    i += 4;
-  }
-  for(; i < length; i++) {
-    unsigned short c;
-    // no embedded zeros
-    guarantee_property((buffer[i] != 0), "Illegal UTF8 string in constant pool in class file %s", CHECK);
-    if(buffer[i] < 128) {
-      continue;
-    }
-    if ((i + 5) < length) { // see if it's legal supplementary character
-      if (UTF8::is_supplementary_character(&buffer[i])) {
-        c = UTF8::get_supplementary_character(&buffer[i]);
-        i += 5;
-        continue;
-      } 
-    }
-    switch (buffer[i] >> 4) {
-      default: break;
-      case 0x8: case 0x9: case 0xA: case 0xB: case 0xF:
-        classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", CHECK);
-      case 0xC: case 0xD:  // 110xxxxx  10xxxxxx
-        c = (buffer[i] & 0x1F) << 6;
-        i++;
-        if ((i < length) && ((buffer[i] & 0xC0) == 0x80)) {
-          c += buffer[i] & 0x3F;
-          if (_major_version <= 47 || c == 0 || c >= 0x80) {
-            // for classes with major > 47, c must a null or a character in its shortest form
-            break;
-          }
-        } 
-        classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", CHECK);
-      case 0xE:  // 1110xxxx 10xxxxxx 10xxxxxx
-        c = (buffer[i] & 0xF) << 12;
-        i += 2;
-        if ((i < length) && ((buffer[i-1] & 0xC0) == 0x80) && ((buffer[i] & 0xC0) == 0x80)) {
-          c += ((buffer[i-1] & 0x3F) << 6) + (buffer[i] & 0x3F);
-          if (_major_version <= 47 || c >= 0x800) {
-            // for classes with major > 47, c must be in its shortest form
-            break;
-          }
-        }
-        classfile_parse_error("Illegal UTF8 string in constant pool in class file %s", CHECK);
-    }  // end of switch
-  } // end of for
-}
-
-// Checks if name is a legal class name.
-void ClassFileParser::verify_legal_class_name(symbolHandle name, TRAPS) {
-  if (!_need_verify || _relax_verify) { return; }
-
-  char buf[fixed_buffer_size];
-  char* bytes = name->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
-  unsigned int length = name->utf8_length();
-  bool legal = false;
-
-  if (length > 0) {
-    char* p;
-    if (bytes[0] == JVM_SIGNATURE_ARRAY) {
-      p = skip_over_field_signature(bytes, false, length, CHECK);
-      legal = (p != NULL) && ((p - bytes) == (int)length);
-    } else if (_major_version < JAVA_1_5_VERSION) {
-      if (bytes[0] != '<') {
-        p = skip_over_field_name(bytes, true, length);
-        legal = (p != NULL) && ((p - bytes) == (int)length);
-      }
-    } else {
-      // 4900761: relax the constraints based on JSR202 spec
-      // Class names may be drawn from the entire Unicode character set.
-      // Identifiers between '/' must be unqualified names.
-      // The utf8 string has been verified when parsing cpool entries.
-      legal = verify_unqualified_name(bytes, length, LegalClass);  
-    }
-  } 
-  if (!legal) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Illegal class name \"%s\" in class file %s", bytes,
-      _class_name->as_C_string()
-    );
-    return;
-  }
-}
-
-// Checks if name is a legal field name.
-void ClassFileParser::verify_legal_field_name(symbolHandle name, TRAPS) {
-  if (!_need_verify || _relax_verify) { return; }
-
-  char buf[fixed_buffer_size];
-  char* bytes = name->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
-  unsigned int length = name->utf8_length();
-  bool legal = false;
-
-  if (length > 0) {
-    if (_major_version < JAVA_1_5_VERSION) {
-      if (bytes[0] != '<') { 
-        char* p = skip_over_field_name(bytes, false, length);
-        legal = (p != NULL) && ((p - bytes) == (int)length);
-      }
-    } else {
-      // 4881221: relax the constraints based on JSR202 spec
-      legal = verify_unqualified_name(bytes, length, LegalField);
-    }
-  }
-
-  if (!legal) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Illegal field name \"%s\" in class %s", bytes,
-      _class_name->as_C_string()
-    );
-    return;
-  }
-}
-
-// Checks if name is a legal method name.
-void ClassFileParser::verify_legal_method_name(symbolHandle name, TRAPS) {
-  if (!_need_verify || _relax_verify) { return; }
-
-  assert(!name.is_null(), "method name is null");
-  char buf[fixed_buffer_size];
-  char* bytes = name->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
-  unsigned int length = name->utf8_length();
-  bool legal = false;
-
-  if (length > 0) {
-    if (bytes[0] == '<') {
-      if (name == vmSymbols::object_initializer_name() || name == vmSymbols::class_initializer_name()) {
-        legal = true;
-      }
-    } else if (_major_version < JAVA_1_5_VERSION) {
-      char* p;
-      p = skip_over_field_name(bytes, false, length);
-      legal = (p != NULL) && ((p - bytes) == (int)length);
-    } else {
-      // 4881221: relax the constraints based on JSR202 spec
-      legal = verify_unqualified_name(bytes, length, LegalMethod);
-    }
-  }
-
-  if (!legal) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Illegal method name \"%s\" in class %s", bytes,
-      _class_name->as_C_string()
-    );
-    return;
-  }
-}
-
-
-// Checks if signature is a legal field signature.
-void ClassFileParser::verify_legal_field_signature(symbolHandle name, symbolHandle signature, TRAPS) {
-  if (!_need_verify) { return; }
-
-  char buf[fixed_buffer_size];
-  char* bytes = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
-  unsigned int length = signature->utf8_length();
-  char* p = skip_over_field_signature(bytes, false, length, CHECK);
-
-  if (p == NULL || (p - bytes) != (int)length) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbolHandles::java_lang_ClassFormatError(),
-      "Field \"%s\" in class %s has illegal signature \"%s\"", 
-      name->as_C_string(), _class_name->as_C_string(), bytes
-    );
-    return;
-  }
-}
-
-// Checks if signature is a legal method signature.
-// Returns number of parameters
-int ClassFileParser::verify_legal_method_signature(symbolHandle name, symbolHandle signature, TRAPS) {
-  if (!_need_verify) {
-    // make sure caller's args_size will be less than 0 even for non-static
-    // method so it will be recomputed in compute_size_of_parameters().
-    return -2;
-  }
-
-  unsigned int args_size = 0;
-  char buf[fixed_buffer_size];
-  char* p = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
-  unsigned int length = signature->utf8_length();
-  char* nextp;
-
-  // The first character must be a '('
-  if ((length > 0) && (*p++ == JVM_SIGNATURE_FUNC)) {
-    length--;
-    // Skip over legal field signatures
-    nextp = skip_over_field_signature(p, false, length, CHECK_0);
-    while ((length > 0) && (nextp != NULL)) {
-      args_size++;
-      if (p[0] == 'J' || p[0] == 'D') {
-        args_size++;
-      }
-      length -= nextp - p;
-      p = nextp;
-      nextp = skip_over_field_signature(p, false, length, CHECK_0);
-    }
-    // The first non-signature thing better be a ')'
-    if ((length > 0) && (*p++ == JVM_SIGNATURE_ENDFUNC)) {
-      length--;
-      if (name->utf8_length() > 0 && name->byte_at(0) == '<') {
-        // All internal methods must return void
-        if ((length == 1) && (p[0] == JVM_SIGNATURE_VOID)) {
-          return args_size;
-        }
-      } else {
-        // Now we better just have a return value
-        nextp = skip_over_field_signature(p, true, length, CHECK_0);
-        if (nextp && ((int)length == (nextp - p))) {
-          return args_size;
-        }
-      }
-    }
-  }
-  // Report error
-  ResourceMark rm(THREAD);
-  Exceptions::fthrow(
-    THREAD_AND_LOCATION,
-    vmSymbolHandles::java_lang_ClassFormatError(),
-    "Method \"%s\" in class %s has illegal signature \"%s\"", 
-    name->as_C_string(),  _class_name->as_C_string(), p
-  );
-  return 0;
-}
-
-
-// Unqualified names may not contain the characters '.', ';', or '/'.
-// Method names also may not contain the characters '<' or '>', unless <init> or <clinit>.
-// Note that method names may not be <init> or <clinit> in this method.
-// Because these names have been checked as special cases before calling this method
-// in verify_legal_method_name.
-bool ClassFileParser::verify_unqualified_name(char* name, unsigned int length, int type) {
-  jchar ch;
-
-  for (char* p = name; p != name + length; ) {
-    ch = *p;
-    if (ch < 128) {
-      p++;
-      if (ch == '.' || ch == ';') {
-        return false;   // do not permit '.' or ';'
-      }
-      if (type != LegalClass && ch == '/') {
-        return false;   // do not permit '/' unless it's class name
-      }
-      if (type == LegalMethod && (ch == '<' || ch == '>')) {
-        return false;   // do not permit '<' or '>' in method names
-      }
-    } else {
-      char* tmp_p = UTF8::next(p, &ch);
-      p = tmp_p;
-    }
-  }
-  return true;
-}
-
-
-// Take pointer to a string. Skip over the longest part of the string that could 
-// be taken as a fieldname. Allow '/' if slash_ok is true.
-// Return a pointer to just past the fieldname. 
-// Return NULL if no fieldname at all was found, or in the case of slash_ok 
-// being true, we saw consecutive slashes (meaning we were looking for a 
-// qualified path but found something that was badly-formed).
-char* ClassFileParser::skip_over_field_name(char* name, bool slash_ok, unsigned int length) {
-  char* p;
-  jchar ch;                     
-  jboolean last_is_slash = false;            
-  jboolean not_first_ch = false; 
-
-  for (p = name; p != name + length; not_first_ch = true) {
-    char* old_p = p;
-    ch = *p;
-    if (ch < 128) {
-      p++;
-      // quick check for ascii
-      if ((ch >= 'a' && ch <= 'z') ||
-          (ch >= 'A' && ch <= 'Z') ||
-          (ch == '_' || ch == '$') ||
-          (not_first_ch && ch >= '0' && ch <= '9')) {
-        last_is_slash = false;
-        continue;
-      }
-      if (slash_ok && ch == '/') {
-        if (last_is_slash) {
-          return NULL;  // Don't permit consecutive slashes
-        }
-        last_is_slash = true;
-        continue;
-      }
-    } else {
-      jint unicode_ch;
-      char* tmp_p = UTF8::next_character(p, &unicode_ch);
-      p = tmp_p;
-      last_is_slash = false;
-      // Check if ch is Java identifier start or is Java identifier part
-      // 4672820: call java.lang.Character methods directly without generating separate tables.
-      EXCEPTION_MARK;
-      instanceKlassHandle klass (THREAD, SystemDictionary::char_klass());
-
-      // return value
-      JavaValue result(T_BOOLEAN);
-      // Set up the arguments to isJavaIdentifierStart and isJavaIdentifierPart
-      JavaCallArguments args;
-      args.push_int(unicode_ch);
-
-      // public static boolean isJavaIdentifierStart(char ch);
-      JavaCalls::call_static(&result,
-                             klass,
-                             vmSymbolHandles::isJavaIdentifierStart_name(), 
-                             vmSymbolHandles::int_bool_signature(),
-                             &args,
-                             THREAD);
-         
-      if (HAS_PENDING_EXCEPTION) {      
-        CLEAR_PENDING_EXCEPTION;
-        return 0;
-      }
-      if (result.get_jboolean()) {
-        continue;
-      }
-        
-      if (not_first_ch) {
-        // public static boolean isJavaIdentifierPart(char ch);
-        JavaCalls::call_static(&result,
-                               klass,
-                               vmSymbolHandles::isJavaIdentifierPart_name(), 
-                               vmSymbolHandles::int_bool_signature(),
-                               &args,
-                               THREAD);
-     
-        if (HAS_PENDING_EXCEPTION) {    
-          CLEAR_PENDING_EXCEPTION;
-          return 0;
-        }
-
-        if (result.get_jboolean()) {
-          continue;
-        }
-      }
-    }
-    return (not_first_ch) ? old_p : NULL;
-  }
-  return (not_first_ch) ? p : NULL;
-}
-
-
-// Take pointer to a string. Skip over the longest part of the string that could
-// be taken as a field signature. Allow "void" if void_ok.
-// Return a pointer to just past the signature. 
-// Return NULL if no legal signature is found.
-char* ClassFileParser::skip_over_field_signature(char* signature, 
-                                                 bool void_ok, 
-                                                 unsigned int length,
-                                                 TRAPS) {
-  unsigned int array_dim = 0;
-  while (length > 0) {
-    switch (signature[0]) {
-      case JVM_SIGNATURE_VOID: if (!void_ok) { return NULL; }
-      case JVM_SIGNATURE_BOOLEAN:
-      case JVM_SIGNATURE_BYTE:
-      case JVM_SIGNATURE_CHAR:
-      case JVM_SIGNATURE_SHORT:
-      case JVM_SIGNATURE_INT:
-      case JVM_SIGNATURE_FLOAT:
-      case JVM_SIGNATURE_LONG:
-      case JVM_SIGNATURE_DOUBLE:
-        return signature + 1;
-      case JVM_SIGNATURE_CLASS: {
-        if (_major_version < JAVA_1_5_VERSION) {
-          // Skip over the class name if one is there
-          char* p = skip_over_field_name(signature + 1, true, --length);
-        
-          // The next character better be a semicolon
-          if (p && (p - signature) > 1 && p[0] == ';') {
-            return p + 1;
-          }
-        } else {
-          // 4900761: For class version > 48, any unicode is allowed in class name.
-          length--; 
-          signature++; 
-          while (length > 0 && signature[0] != ';') {
-            if (signature[0] == '.') {
-              classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0);
-            }
-            length--; 
-            signature++; 
-          }            
-          if (signature[0] == ';') { return signature + 1; }
-        }
-            
-        return NULL;
-      }
-      case JVM_SIGNATURE_ARRAY:
-        array_dim++;
-        if (array_dim > 255) {
-          // 4277370: array descriptor is valid only if it represents 255 or fewer dimensions.
-          classfile_parse_error("Array type descriptor has more than 255 dimensions in class file %s", CHECK_0);
-        }
-        // The rest of what's there better be a legal signature
-        signature++;
-        length--;
-        void_ok = false;
-        break;
-
-      default:
-        return NULL;
-    }
-  }
-  return NULL;
-}
-
--- a/hotspot/src/share/vm/runtime/classFileParser.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,221 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)classFileParser.hpp	1.84 07/05/05 17:06:45 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// Parser for for .class files
-//
-// The bytes describing the class file structure is read from a Stream object
-
-class ClassFileParser VALUE_OBJ_CLASS_SPEC {
- private:
-  bool _need_verify;
-  bool _relax_verify;  
-  u2   _major_version;
-  u2   _minor_version;
-  symbolHandle _class_name;
-
-  bool _has_finalizer;
-  bool _has_empty_finalizer;
-  bool _has_vanilla_constructor;
-
-  enum { fixed_buffer_size = 128 };
-  u_char _fixed_buffer[fixed_buffer_size];
-
-  ClassFileStream* _stream;              // Actual input stream
-
-  enum { LegalClass, LegalField, LegalMethod }; // used to verify unqualified names
-
-  // Accessors
-  ClassFileStream* stream()                        { return _stream; }
-  void set_stream(ClassFileStream* st)             { _stream = st; }
-
-  // Constant pool parsing
-  void parse_constant_pool_entries(constantPoolHandle cp, int length, TRAPS);
-
-  constantPoolHandle parse_constant_pool(TRAPS);
-
-  // Interface parsing
-  objArrayHandle parse_interfaces(constantPoolHandle cp,
-                                  int length,
-                                  Handle class_loader, 
-                                  Handle protection_domain,
-                                  PerfTraceTime* vmtimer,
-                                  symbolHandle class_name,
-                                  TRAPS);
-
-  // Field parsing
-  void parse_field_attributes(constantPoolHandle cp, u2 attributes_count,
-                              bool is_static, u2 signature_index, 
-                              u2* constantvalue_index_addr,
-                              bool* is_synthetic_addr, 
-                              u2* generic_signature_index_addr,
-                              typeArrayHandle* field_annotations, TRAPS);
-  typeArrayHandle parse_fields(constantPoolHandle cp, bool is_interface, 
-                               struct FieldAllocationCount *fac,
-                               objArrayHandle* fields_annotations, TRAPS);
-
-  // Method parsing
-  methodHandle parse_method(constantPoolHandle cp, bool is_interface, 
-                            AccessFlags* promoted_flags,
-                            typeArrayHandle* method_annotations,
-                            typeArrayHandle* method_parameter_annotations,
-                            typeArrayHandle* method_default_annotations,
-                            TRAPS);
-  objArrayHandle parse_methods (constantPoolHandle cp, bool is_interface, 
-                                AccessFlags* promoted_flags,
-                                bool* has_final_method,
-                                objArrayOop* methods_annotations_oop,
-                                objArrayOop* methods_parameter_annotations_oop,
-                                objArrayOop* methods_default_annotations_oop,
-                                TRAPS);
-  typeArrayHandle sort_methods (objArrayHandle methods,
-                                objArrayHandle methods_annotations,
-                                objArrayHandle methods_parameter_annotations,
-                                objArrayHandle methods_default_annotations,
-                                TRAPS);
-  typeArrayHandle parse_exception_table(u4 code_length, u4 exception_table_length, 
-                                        constantPoolHandle cp, TRAPS);
-  u_char* parse_linenumber_table(u4 code_attribute_length, u4 code_length,
-                                 int* compressed_linenumber_table_size, TRAPS);
-  u2* parse_localvariable_table(u4 code_length, u2 max_locals, u4 code_attribute_length,
-                                constantPoolHandle cp, u2* localvariable_table_length,
-                                bool isLVTT, TRAPS);
-  u2* parse_checked_exceptions(u2* checked_exceptions_length, u4 method_attribute_length,
-                               constantPoolHandle cp, TRAPS);
-  void parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
-                        u1* u1_array, u2* u2_array, constantPoolHandle cp, TRAPS);
-  typeArrayOop parse_stackmap_table(u4 code_attribute_length, TRAPS);
-
-  // Classfile attribute parsing
-  void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
-  void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, 
-                                                instanceKlassHandle k, int length, TRAPS);
-  u2   parse_classfile_inner_classes_attribute(constantPoolHandle cp, 
-                                               instanceKlassHandle k, TRAPS);
-  void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
-  void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
-  void parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
-  
-  // Annotations handling
-  typeArrayHandle assemble_annotations(u1* runtime_visible_annotations,
-                                       int runtime_visible_annotations_length,
-                                       u1* runtime_invisible_annotations,
-                                       int runtime_invisible_annotations_length, TRAPS);
-
-  // Final setup
-  int  compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_count, 
-                            int first_nonstatic_oop_offset);
-  void fill_oop_maps(instanceKlassHandle k, int nonstatic_oop_map_count, 
-                     u2* nonstatic_oop_offsets, u2* nonstatic_oop_length);
-  void set_precomputed_flags(instanceKlassHandle k);
-  objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, 
-                                               objArrayHandle local_ifs, TRAPS);
-
-  // Special handling for certain classes.
-  // Add the "discovered" field to java.lang.ref.Reference if
-  // it does not exist.
-  void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr, 
-    constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS);
-  // Adjust the field allocation counts for java.lang.Class to add
-  // fake fields.
-  void java_lang_Class_fix_pre(objArrayHandle* methods_ptr,
-    FieldAllocationCount *fac_ptr, TRAPS);
-  // Adjust the next_nonstatic_oop_offset to place the fake fields
-  // before any Java fields.
-  void java_lang_Class_fix_post(int* next_nonstatic_oop_offset);
-
-  // Format checker methods
-  void classfile_parse_error(const char* msg, TRAPS);
-  void classfile_parse_error(const char* msg, int index, TRAPS);
-  void classfile_parse_error(const char* msg, const char *name, TRAPS);
-  void classfile_parse_error(const char* msg, int index, const char *name, TRAPS);
-  inline void guarantee_property(bool b, const char* msg, TRAPS) {
-    if (!b) { classfile_parse_error(msg, CHECK); }
-  }
-
-  inline void assert_property(bool b, const char* msg, TRAPS) {
-#ifdef ASSERT
-    if (!b) { fatal(msg); }
-#endif
-  }
-
-  inline void check_property(bool property, const char* msg, int index, TRAPS) {
-    if (_need_verify) {
-      guarantee_property(property, msg, index, CHECK);
-    } else {
-      assert_property(property, msg, CHECK);
-    }
-  }
-  inline void guarantee_property(bool b, const char* msg, int index, TRAPS) {
-    if (!b) { classfile_parse_error(msg, index, CHECK); }
-  }
-  inline void guarantee_property(bool b, const char* msg, const char *name, TRAPS) {
-    if (!b) { classfile_parse_error(msg, name, CHECK); }
-  }
-  inline void guarantee_property(bool b, const char* msg, int index, const char *name, TRAPS) {
-    if (!b) { classfile_parse_error(msg, index, name, CHECK); }
-  }
-
-  bool is_supported_version(u2 major, u2 minor);
-  bool has_illegal_visibility(jint flags);
-
-  void verify_constantvalue(int constantvalue_index, int signature_index, constantPoolHandle cp, TRAPS);
-  void verify_legal_utf8(const unsigned char* buffer, int length, TRAPS);
-  void verify_legal_class_name(symbolHandle name, TRAPS);
-  void verify_legal_field_name(symbolHandle name, TRAPS);
-  void verify_legal_method_name(symbolHandle name, TRAPS);
-  void verify_legal_field_signature(symbolHandle fieldname, symbolHandle signature, TRAPS);
-  int  verify_legal_method_signature(symbolHandle methodname, symbolHandle signature, TRAPS);
-  void verify_legal_class_modifiers(jint flags, TRAPS);
-  void verify_legal_field_modifiers(jint flags, bool is_interface, TRAPS);
-  void verify_legal_method_modifiers(jint flags, bool is_interface, symbolHandle name, TRAPS);
-  bool verify_unqualified_name(char* name, unsigned int length, int type);
-  char* skip_over_field_name(char* name, bool slash_ok, unsigned int length);
-  char* skip_over_field_signature(char* signature, bool void_ok, unsigned int length, TRAPS);
-
- public:
-  // Constructor
-  ClassFileParser(ClassFileStream* st) { set_stream(st); }
-
-  // Parse .class file and return new klassOop. The klassOop is not hooked up
-  // to the system dictionary or any other structures, so a .class file can 
-  // be loaded several times if desired. 
-  // The system dictionary hookup is done by the caller.
-  //
-  // "parsed_name" is updated by this method, and is the name found
-  // while parsing the stream.
-  instanceKlassHandle parseClassFile(symbolHandle name, 
-                                     Handle class_loader, 
-                                     Handle protection_domain, 
-                                     symbolHandle& parsed_name,
-                                     TRAPS);
-
-  // Verifier checks
-  static void check_super_class_access(instanceKlassHandle this_klass, TRAPS);
-  static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS);
-  static void check_final_method_override(instanceKlassHandle this_klass, TRAPS);
-  static void check_illegal_static_method(instanceKlassHandle this_klass, TRAPS);
-};
--- a/hotspot/src/share/vm/runtime/classFileStream.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)classFileStream.cpp	1.40 07/05/05 17:06:44 JVM"
-#endif
-/*
- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_classFileStream.cpp.incl"
-
-void ClassFileStream::truncated_file_error(TRAPS) {
-  THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
-}
-
-ClassFileStream::ClassFileStream(u1* buffer, int length, char* source) {
-  _buffer_start = buffer;
-  _buffer_end   = buffer + length;
-  _current      = buffer;
-  _source       = source;
-  _need_verify  = false;
-}
-
-u1 ClassFileStream::get_u1(TRAPS) {
-  if (_need_verify) {
-    check_truncated_file(_current + 1 > _buffer_end, CHECK_0);
-  } else {
-    assert(_current + 1 <= _buffer_end, "buffer overflow");
-  }
-  return *_current++;
-}
-
-u2 ClassFileStream::get_u2(TRAPS) {
-  if (_need_verify) {
-    check_truncated_file(_current + 2 > _buffer_end, CHECK_0);
-  } else {
-    assert(_current + 2 <= _buffer_end, "buffer overflow");
-  }
-  u1* tmp = _current;
-  _current += 2;
-  return Bytes::get_Java_u2(tmp);
-}
-
-u4 ClassFileStream::get_u4(TRAPS) {
-  if (_need_verify) {
-    check_truncated_file(_current + 4 > _buffer_end, CHECK_0);
-  } else {
-    assert(_current + 4 <= _buffer_end, "buffer overflow");
-  }
-  u1* tmp = _current;
-  _current += 4;
-  return Bytes::get_Java_u4(tmp);
-}
-
-u8 ClassFileStream::get_u8(TRAPS) {
-  if (_need_verify) {
-    check_truncated_file(_current + 8 > _buffer_end, CHECK_0);
-  } else {
-    assert(_current + 8 <= _buffer_end, "buffer overflow");
-  }
-  u1* tmp = _current;
-  _current += 8;
-  return Bytes::get_Java_u8(tmp);
-}
-
-void ClassFileStream::skip_u1(int length, TRAPS) {
-  if (_need_verify) {
-    check_truncated_file(_current + length > _buffer_end, CHECK);
-  } 
-  _current += length;
-}
-
-void ClassFileStream::skip_u2(int length, TRAPS) {
-  if (_need_verify) {
-    check_truncated_file(_current + length * 2 > _buffer_end, CHECK);
-  } 
-  _current += length * 2;
-}
--- a/hotspot/src/share/vm/runtime/classFileStream.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)classFileStream.hpp	1.32 07/05/05 17:06:44 JVM"
-#endif
-/*
- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// Input stream for reading .class file
-//
-// The entire input stream is present in a buffer allocated by the caller.
-// The caller is responsible for deallocating the buffer and for using
-// ResourceMarks appropriately when constructing streams.
-
-class ClassFileStream: public ResourceObj {
- private:
-  u1*   _buffer_start; // Buffer bottom
-  u1*   _buffer_end;   // Buffer top (one past last element)
-  u1*   _current;      // Current buffer position
-  char* _source;       // Source of stream (directory name, ZIP/JAR archive name)
-  bool  _need_verify;  // True if verification is on for the class file
-
-  void truncated_file_error(TRAPS);
- public:
-  // Constructor
-  ClassFileStream(u1* buffer, int length, char* source);
-
-  // Buffer access
-  u1* buffer() const           { return _buffer_start; }
-  int length() const           { return _buffer_end - _buffer_start; }
-  u1* current() const          { return _current; }
-  void set_current(u1* pos)    { _current = pos; }
-  char* source() const         { return _source; }
-  void set_verify(bool flag)   { _need_verify = flag; }
-
-  void check_truncated_file(bool b, TRAPS) {
-    if (b) {
-      truncated_file_error(THREAD);
-    }
-  }
-
-  void guarantee_more(int size, TRAPS) {
-    check_truncated_file(_current + size > _buffer_end, CHECK);
-  }
-
-  // Read u1 from stream
-  u1 get_u1(TRAPS);
-  u1 get_u1_fast() {
-    return *_current++;
-  }
-
-  // Read u2 from stream
-  u2 get_u2(TRAPS);
-  u2 get_u2_fast() {
-    u2 res = Bytes::get_Java_u2(_current);
-    _current += 2;
-    return res;
-  }
-
-  // Read u4 from stream
-  u4 get_u4(TRAPS);
-  u4 get_u4_fast() {
-    u4 res = Bytes::get_Java_u4(_current);
-    _current += 4;
-    return res;
-  }
-
-  // Read u8 from stream
-  u8 get_u8(TRAPS);
-  u8 get_u8_fast() {
-    u8 res = Bytes::get_Java_u8(_current);
-    _current += 8;
-    return res;
-  }
-
-  // Get direct pointer into stream at current position. 
-  // Returns NULL if length elements are not remaining. The caller is 
-  // responsible for calling skip below if buffer contents is used.
-  u1* get_u1_buffer() {
-    return _current;
-  }
-
-  u2* get_u2_buffer() {
-    return (u2*) _current;
-  }
-
-  // Skip length u1 or u2 elements from stream
-  void skip_u1(int length, TRAPS);
-  void skip_u1_fast(int length) {
-    _current += length;
-  }
-
-  void skip_u2(int length, TRAPS);
-  void skip_u2_fast(int length) {
-    _current += 2 * length;
-  }
-
-  // Tells whether eos is reached
-  bool at_eos() const          { return _current == _buffer_end; }
-};
--- a/hotspot/src/share/vm/runtime/classLoader.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1260 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)classLoader.cpp	1.186 07/05/05 17:06:44 JVM"
-#endif
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_classLoader.cpp.incl"
-
-
-// Entry points in zip.dll for loading zip/jar file entries
-
-typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
-typedef void (JNICALL *ZipClose_t)(jzfile *zip);
-typedef jzentry* (JNICALL *FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
-typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
-typedef jboolean (JNICALL *ReadMappedEntry_t)(jzfile *zip, jzentry *entry, unsigned char **buf, char *namebuf);
-typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
-
-static ZipOpen_t         ZipOpen            = NULL;
-static ZipClose_t        ZipClose           = NULL;
-static FindEntry_t       FindEntry          = NULL;
-static ReadEntry_t       ReadEntry          = NULL;
-static ReadMappedEntry_t ReadMappedEntry    = NULL;
-static GetNextEntry_t    GetNextEntry       = NULL;
-static canonicalize_fn_t CanonicalizeEntry  = NULL;
-
-// Globals
-
-PerfCounter*    ClassLoader::_perf_accumulated_time = NULL;
-PerfCounter*    ClassLoader::_perf_classes_inited = NULL;
-PerfCounter*    ClassLoader::_perf_class_init_time = NULL;
-PerfCounter*    ClassLoader::_perf_class_verify_time = NULL;
-PerfCounter*    ClassLoader::_perf_classes_linked = NULL;
-PerfCounter*    ClassLoader::_perf_class_link_time = NULL;
-PerfCounter*    ClassLoader::_sync_systemLoaderLockContentionRate = NULL;
-PerfCounter*    ClassLoader::_sync_nonSystemLoaderLockContentionRate = NULL;
-PerfCounter*    ClassLoader::_sync_JVMFindLoadedClassLockFreeCounter = NULL;
-PerfCounter*    ClassLoader::_sync_JVMDefineClassLockFreeCounter = NULL;
-PerfCounter*    ClassLoader::_sync_JNIDefineClassLockFreeCounter = NULL;
-PerfCounter*    ClassLoader::_unsafe_defineClassCallCounter = NULL;
-PerfCounter*    ClassLoader::_isUnsyncloadClass = NULL;
-PerfCounter*    ClassLoader::_load_instance_class_failCounter = NULL;
-
-ClassPathEntry* ClassLoader::_first_entry         = NULL;
-ClassPathEntry* ClassLoader::_last_entry          = NULL;
-PackageHashtable* ClassLoader::_package_hash_table = NULL;
-
-// helper routines
-bool string_starts_with(const char* str, const char* str_to_find) {
-  size_t str_len = strlen(str);
-  size_t str_to_find_len = strlen(str_to_find);
-  if (str_to_find_len > str_len) {
-    return false;
-  }
-  return (strncmp(str, str_to_find, str_to_find_len) == 0);
-}
-
-bool string_ends_with(const char* str, const char* str_to_find) {
-  size_t str_len = strlen(str);
-  size_t str_to_find_len = strlen(str_to_find);
-  if (str_to_find_len > str_len) {
-    return false;
-  }
-  return (strncmp(str + (str_len - str_to_find_len), str_to_find, str_to_find_len) == 0);
-}
-
-
-MetaIndex::MetaIndex(char** meta_package_names, int num_meta_package_names) {
-  if (num_meta_package_names == 0) {
-    _meta_package_names = NULL;
-    _num_meta_package_names = 0;
-  } else {
-    _meta_package_names = NEW_C_HEAP_ARRAY(char*, num_meta_package_names);
-    _num_meta_package_names = num_meta_package_names;
-    memcpy(_meta_package_names, meta_package_names, num_meta_package_names * sizeof(char*));
-  }
-}
-
-
-MetaIndex::~MetaIndex() {
-  FREE_C_HEAP_ARRAY(char*, _meta_package_names);
-}
-
-
-bool MetaIndex::may_contain(const char* class_name) {
-  if ( _num_meta_package_names == 0) {
-    return false;
-  }
-  size_t class_name_len = strlen(class_name);
-  for (int i = 0; i < _num_meta_package_names; i++) {
-    char* pkg = _meta_package_names[i];
-    size_t pkg_len = strlen(pkg);
-    size_t min_len = MIN2(class_name_len, pkg_len);
-    if (!strncmp(class_name, pkg, min_len)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-ClassPathEntry::ClassPathEntry() {
-  set_next(NULL);
-}
-
-
-bool ClassPathEntry::is_lazy() {
-  return false;
-}
-
-ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
-  _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1);
-  strcpy(_dir, dir);
-}
-
-
-ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
-  // construct full path name
-  char path[JVM_MAXPATHLEN];
-  if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
-    return NULL;
-  }
-  // check if file exists
-  struct stat st;
-  if (os::stat(path, &st) == 0) {
-    // found file, open it
-    int file_handle = hpi::open(path, 0, 0);
-    if (file_handle != -1) {
-      // read contents into resource array
-      u1* buffer = NEW_RESOURCE_ARRAY(u1, st.st_size);
-      size_t num_read = os::read(file_handle, (char*) buffer, st.st_size);
-      // close file
-      hpi::close(file_handle);
-      // construct ClassFileStream
-      if (num_read == (size_t)st.st_size) {
-        return new ClassFileStream(buffer, st.st_size, _dir);    // Resource allocated
-      }
-    }
-  }
-  return NULL;
-}
-
-
-ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() {
-  _zip = zip;
-  _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1);
-  strcpy(_zip_name, zip_name);
-}
-
-ClassPathZipEntry::~ClassPathZipEntry() {
-  if (ZipClose != NULL) {
-    (*ZipClose)(_zip);
-  }
-  FREE_C_HEAP_ARRAY(char, _zip_name);
-}
-
-ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
-  // enable call to C land
-  JavaThread* thread = JavaThread::current();
-  ThreadToNativeFromVM ttn(thread);
-  // check whether zip archive contains name
-  jint filesize, name_len;
-  jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len);
-  if (entry == NULL) return NULL;
-  u1* buffer;
-  char name_buf[128];
-  char* filename;
-  if (name_len < 128) {
-    filename = name_buf;
-  } else {
-    filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
-  }
-
-  // file found, get pointer to class in mmaped jar file.
-  if (ReadMappedEntry == NULL ||
-      !(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
-      // mmaped access not available, perhaps due to compression,
-      // read contents into resource array
-      buffer     = NEW_RESOURCE_ARRAY(u1, filesize);
-      if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
-  }
-  // return result
-  return new ClassFileStream(buffer, filesize, _zip_name);    // Resource allocated
-}
-
-// invoke function for each entry in the zip file
-void ClassPathZipEntry::contents_do(void f(const char* name, void* context), void* context) {
-  JavaThread* thread = JavaThread::current();
-  HandleMark  handle_mark(thread);
-  ThreadToNativeFromVM ttn(thread);  
-  for (int n = 0; ; n++) {
-    jzentry * ze = ((*GetNextEntry)(_zip, n));
-    if (ze == NULL) break;
-    (*f)(ze->name, context);
-  }
-}
-
-LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() {
-  _path = strdup(path);
-  _st = st;
-  _meta_index = NULL;
-  _resolved_entry = NULL;
-}
-
-bool LazyClassPathEntry::is_jar_file() {
-  return ((_st.st_mode & S_IFREG) == S_IFREG);
-}
-
-ClassPathEntry* LazyClassPathEntry::resolve_entry() {
-  if (_resolved_entry != NULL) {
-    return (ClassPathEntry*) _resolved_entry;
-  }
-  ClassPathEntry* new_entry = NULL;
-  ClassLoader::create_class_path_entry(_path, _st, &new_entry, false);
-  assert(new_entry != NULL, "earlier code should have caught this");
-  {
-    ThreadCritical tc;
-    if (_resolved_entry == NULL) {
-      _resolved_entry = new_entry;
-      return new_entry;
-    }
-  }
-  assert(_resolved_entry != NULL, "bug in MT-safe resolution logic");
-  delete new_entry;
-  return (ClassPathEntry*) _resolved_entry;
-}
-
-ClassFileStream* LazyClassPathEntry::open_stream(const char* name) {
-  if (_meta_index != NULL &&
-      !_meta_index->may_contain(name)) {
-    return NULL;
-  }
-  return resolve_entry()->open_stream(name);
-}
-
-bool LazyClassPathEntry::is_lazy() {
-  return true;
-}
-
-static void print_meta_index(LazyClassPathEntry* entry, 
-                             GrowableArray<char*>& meta_packages) {
-  tty->print("[Meta index for %s=", entry->name());
-  for (int i = 0; i < meta_packages.length(); i++) {
-    if (i > 0) tty->print(" ");
-    tty->print(meta_packages.at(i));
-  }
-  tty->print_cr("]");
-}
-
-
-void ClassLoader::setup_meta_index() {
-  // Set up meta index which allows us to open boot jars lazily if
-  // class data sharing is enabled
-  const char* known_version = "% VERSION 2";
-  char* meta_index_path = Arguments::get_meta_index_path();
-  char* meta_index_dir  = Arguments::get_meta_index_dir();
-  FILE* file = fopen(meta_index_path, "r");
-  int line_no = 0;
-  if (file != NULL) {
-    ResourceMark rm;
-    LazyClassPathEntry* cur_entry = NULL;
-    GrowableArray<char*> boot_class_path_packages(10);
-    char package_name[256];
-    bool skipCurrentJar = false;
-    while (fgets(package_name, sizeof(package_name), file) != NULL) {
-      ++line_no;
-      // Remove trailing newline
-      package_name[strlen(package_name) - 1] = '\0';
-      switch(package_name[0]) {
-        case '%':
-        {
-          if ((line_no == 1) && (strcmp(package_name, known_version) != 0)) {
-            if (TraceClassLoading && Verbose) {  
-              tty->print("[Unsupported meta index version]");
-            }
-            fclose(file);
-            return;
-          }
-        }
-
-        // These directives indicate jar files which contain only
-        // classes, only non-classfile resources, or a combination of
-        // the two. See src/share/classes/sun/misc/MetaIndex.java and
-        // make/tools/MetaIndex/BuildMetaIndex.java in the J2SE
-        // workspace.
-        case '#':
-        case '!':
-        case '@':
-        {
-          // Hand off current packages to current lazy entry (if any)
-          if ((cur_entry != NULL) &&
-              (boot_class_path_packages.length() > 0)) {
-            if (TraceClassLoading && Verbose) {  
-              print_meta_index(cur_entry, boot_class_path_packages);
-            }
-            MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
-                                             boot_class_path_packages.length());
-            cur_entry->set_meta_index(index);
-          }         
-          cur_entry = NULL;
-          boot_class_path_packages.clear();
-
-          // Find lazy entry corresponding to this jar file
-          for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) {
-            if (entry->is_lazy() &&
-                string_starts_with(entry->name(), meta_index_dir) &&
-                string_ends_with(entry->name(), &package_name[2])) {
-              cur_entry = (LazyClassPathEntry*) entry;
-              break;
-            }
-          }
-   
-          // If the first character is '@', it indicates the following jar
-          // file is a resource only jar file in which case, we should skip
-          // reading the subsequent entries since the resource loading is
-          // totally handled by J2SE side.
-          if (package_name[0] == '@') {
-            if (cur_entry != NULL) {
-              cur_entry->set_meta_index(new MetaIndex(NULL, 0));
-            }
-            cur_entry = NULL;
-            skipCurrentJar = true;
-          } else {
-            skipCurrentJar = false;
-          }
-  
-          break;
-        }
-
-        default:
-        {
-          if (!skipCurrentJar && cur_entry != NULL) {
-            char* new_name = strdup(package_name);
-            boot_class_path_packages.append(new_name);
-          }
-        }
-      }
-    }
-    // Hand off current packages to current lazy entry (if any)
-    if ((cur_entry != NULL) &&
-        (boot_class_path_packages.length() > 0)) {
-      if (TraceClassLoading && Verbose) {  
-        print_meta_index(cur_entry, boot_class_path_packages);
-      }
-      MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
-                                       boot_class_path_packages.length());
-      cur_entry->set_meta_index(index);
-    }          
-    fclose(file);
-  }
-}
-
-void ClassLoader::setup_bootstrap_search_path() {
-  assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
-  char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
-  if (TraceClassLoading && Verbose) {  
-    tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
-  }
-
-  int len = (int)strlen(sys_class_path);
-  int end = 0;
-
-  // Iterate over class path entries
-  for (int start = 0; start < len; start = end) {
-    while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
-      end++;
-    }
-    char* path = NEW_C_HEAP_ARRAY(char, end-start+1);
-    strncpy(path, &sys_class_path[start], end-start);
-    path[end-start] = '\0';
-    update_class_path_entry_list(path);
-    FREE_C_HEAP_ARRAY(char, path);
-    while (sys_class_path[end] == os::path_separator()[0]) {
-      end++;
-    }
-  }
-}
-
-void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) {
-  JavaThread* thread = JavaThread::current();
-  if (lazy) {
-    *new_entry = new LazyClassPathEntry(path, st);
-    return;
-  }
-  if ((st.st_mode & S_IFREG) == S_IFREG) {
-    // Regular file, should be a zip file
-    // Canonicalized filename
-    char canonical_path[JVM_MAXPATHLEN];
-    if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
-      // This matches the classic VM
-      EXCEPTION_MARK;
-      THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");          
-    }
-    char* error_msg = NULL;
-    jzfile* zip;
-    {
-      // enable call to C land
-      ThreadToNativeFromVM ttn(thread);
-      HandleMark hm(thread);
-      zip = (*ZipOpen)(canonical_path, &error_msg);
-    }
-    if (zip != NULL && error_msg == NULL) {
-      *new_entry = new ClassPathZipEntry(zip, path);
-      if (TraceClassLoading) {
-        tty->print_cr("[Opened %s]", path);
-      }
-    } else { 
-      ResourceMark rm(thread);
-      char *msg;
-      if (error_msg == NULL) {
-        msg = NEW_RESOURCE_ARRAY(char, strlen(path) + 128); ;
-        jio_snprintf(msg, strlen(path) + 127, "error in opening JAR file %s", path);
-      } else {
-        int len = (int)(strlen(path) + strlen(error_msg) + 128);
-        msg = NEW_RESOURCE_ARRAY(char, len); ;
-        jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
-      }
-      EXCEPTION_MARK;
-      THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);          
-    } 
-  } else {
-    // Directory
-    *new_entry = new ClassPathDirEntry(path);
-    if (TraceClassLoading) {
-      tty->print_cr("[Path %s]", path);
-    }
-  }      
-}
-
-
-// Create a class path zip entry for a given path (return NULL if not found
-// or zip/JAR file cannot be opened)
-ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path) {
-  // check for a regular file
-  struct stat st;
-  if (os::stat(path, &st) == 0) {
-    if ((st.st_mode & S_IFREG) == S_IFREG) {	        
-      char orig_path[JVM_MAXPATHLEN];
-      char canonical_path[JVM_MAXPATHLEN];
-      
-      strcpy(orig_path, path);
-      if (get_canonical_path(orig_path, canonical_path, JVM_MAXPATHLEN)) {
-        char* error_msg = NULL;
-	jzfile* zip;
-	{
-	  // enable call to C land
-	  JavaThread* thread = JavaThread::current();
-	  ThreadToNativeFromVM ttn(thread);
-	  HandleMark hm(thread);
-	  zip = (*ZipOpen)(canonical_path, &error_msg);
-	}
-	if (zip != NULL && error_msg == NULL) {
-	  // create using canonical path
-          return new ClassPathZipEntry(zip, canonical_path);
-	}
-      }
-    }
-  }
-  return NULL;
-}
-
-// returns true if entry already on class path
-bool ClassLoader::contains_entry(ClassPathEntry *entry) {
-  ClassPathEntry* e = _first_entry;
-  while (e != NULL) {
-    // assume zip entries have been canonicalized
-    if (strcmp(entry->name(), e->name()) == 0) {   
-      return true;
-    }
-    e = e->next();
-  }
-  return false;
-}
-
-void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
-  if (new_entry != NULL) {
-    if (_last_entry == NULL) {
-      _first_entry = _last_entry = new_entry;
-    } else {
-      _last_entry->set_next(new_entry);
-      _last_entry = new_entry;
-    }
-  }
-}
-
-void ClassLoader::update_class_path_entry_list(const char *path) {
-  struct stat st;
-  if (os::stat((char *)path, &st) == 0) {
-    // File or directory found
-    ClassPathEntry* new_entry = NULL;
-    create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
-    // Add new entry to linked list 
-    add_to_list(new_entry);
-  }
-}
-
-void ClassLoader::load_zip_library() {
-  assert(ZipOpen == NULL, "should not load zip library twice");
-  // First make sure native library is loaded
-  os::native_java_library();
-  // Load zip library
-  char path[JVM_MAXPATHLEN];
-  char ebuf[1024];
-  hpi::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "zip");
-  void* handle = hpi::dll_load(path, ebuf, sizeof ebuf);
-  if (handle == NULL) {
-    vm_exit_during_initialization("Unable to load ZIP library", path);
-  }
-  // Lookup zip entry points
-  ZipOpen      = CAST_TO_FN_PTR(ZipOpen_t, hpi::dll_lookup(handle, "ZIP_Open"));
-  ZipClose     = CAST_TO_FN_PTR(ZipClose_t, hpi::dll_lookup(handle, "ZIP_Close"));
-  FindEntry    = CAST_TO_FN_PTR(FindEntry_t, hpi::dll_lookup(handle, "ZIP_FindEntry"));
-  ReadEntry    = CAST_TO_FN_PTR(ReadEntry_t, hpi::dll_lookup(handle, "ZIP_ReadEntry"));
-  ReadMappedEntry = CAST_TO_FN_PTR(ReadMappedEntry_t, hpi::dll_lookup(handle, "ZIP_ReadMappedEntry"));
-  GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, hpi::dll_lookup(handle, "ZIP_GetNextEntry"));
-
-  // ZIP_Close is not exported on Windows in JDK5.0 so don't abort if ZIP_Close is NULL
-  if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL || GetNextEntry == NULL) {
-    vm_exit_during_initialization("Corrupted ZIP library", path);
-  }
-
-  // Lookup canonicalize entry in libjava.dll  
-  void *javalib_handle = os::native_java_library();
-  CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, hpi::dll_lookup(javalib_handle, "Canonicalize"));
-  // This lookup only works on 1.3. Do not check for non-null here
-}
-
-// PackageInfo data exists in order to support the java.lang.Package
-// class.  A Package object provides information about a java package
-// (version, vendor, etc.) which originates in the manifest of the jar
-// file supplying the package.  For application classes, the ClassLoader
-// object takes care of this.
-
-// For system (boot) classes, the Java code in the Package class needs
-// to be able to identify which source jar file contained the boot
-// class, so that it can extract the manifest from it.  This table
-// identifies java packages with jar files in the boot classpath.
-
-// Because the boot classpath cannot change, the classpath index is
-// sufficient to identify the source jar file or directory.  (Since
-// directories have no manifests, the directory name is not required,
-// but is available.)
-
-// When using sharing -- the pathnames of entries in the boot classpath
-// may not be the same at runtime as they were when the archive was
-// created (NFS, Samba, etc.).  The actual files and directories named
-// in the classpath must be the same files, in the same order, even
-// though the exact name is not the same.
-
-class PackageInfo: public BasicHashtableEntry {
-public:
-  const char* _pkgname;       // Package name
-  int _classpath_index;	      // Index of directory or JAR file loaded from
-
-  PackageInfo* next() {
-    return (PackageInfo*)BasicHashtableEntry::next();
-  }
-
-  const char* pkgname()           { return _pkgname; }
-  void set_pkgname(char* pkgname) { _pkgname = pkgname; }
-
-  const char* filename() {
-    return ClassLoader::classpath_entry(_classpath_index)->name();
-  }
-
-  void set_index(int index) {
-    _classpath_index = index;
-  }
-};
-
-
-class PackageHashtable : public BasicHashtable {
-private:
-  inline unsigned int compute_hash(const char *s, int n) {
-    unsigned int val = 0;
-    while (--n >= 0) {
-      val = *s++ + 31 * val;
-    }
-    return val;
-  }
-
-  PackageInfo* bucket(int index) {
-    return (PackageInfo*)BasicHashtable::bucket(index);
-  }
-
-  PackageInfo* get_entry(int index, unsigned int hash,
-                         const char* pkgname, size_t n) {
-    for (PackageInfo* pp = bucket(index); pp != NULL; pp = pp->next()) {
-      if (pp->hash() == hash &&
-          strncmp(pkgname, pp->pkgname(), n) == 0 &&
-          pp->pkgname()[n] == '\0') {
-        return pp;
-      }
-    }
-    return NULL;
-  }
-
-public:
-  PackageHashtable(int table_size)
-    : BasicHashtable(table_size, sizeof(PackageInfo)) {}
-
-  PackageHashtable(int table_size, HashtableBucket* t, int number_of_entries)
-    : BasicHashtable(table_size, sizeof(PackageInfo), t, number_of_entries) {}
-
-  PackageInfo* get_entry(const char* pkgname, int n) {
-    unsigned int hash = compute_hash(pkgname, n);
-    return get_entry(hash_to_index(hash), hash, pkgname, n);
-  }
-
-  PackageInfo* new_entry(char* pkgname, int n) {
-    unsigned int hash = compute_hash(pkgname, n);
-    PackageInfo* pp;
-    pp = (PackageInfo*)BasicHashtable::new_entry(hash);
-    pp->set_pkgname(pkgname);
-    return pp;
-  }
-
-  void add_entry(PackageInfo* pp) {
-    int index = hash_to_index(pp->hash());
-    BasicHashtable::add_entry(index, pp);
-  }
-
-  void copy_pkgnames(const char** packages) {
-    int n = 0;
-    for (int i = 0; i < table_size(); ++i) {
-      for (PackageInfo* pp = bucket(i); pp != NULL; pp = pp->next()) {
-        packages[n++] = pp->pkgname();
-      }
-    }
-    assert(n == number_of_entries(), "just checking");
-  }
-
-  void copy_table(char** top, char* end, PackageHashtable* table);
-};
-
-
-void PackageHashtable::copy_table(char** top, char* end,
-                                  PackageHashtable* table) {
-  // Copy (relocate) the table to the shared space.
-  BasicHashtable::copy_table(top, end);
-
-  // Calculate the space needed for the package name strings.
-  int i;
-  int n = 0;
-  for (i = 0; i < table_size(); ++i) {
-    for (PackageInfo* pp = table->bucket(i);
-                      pp != NULL;
-                      pp = pp->next()) {
-      n += (int)(strlen(pp->pkgname()) + 1);
-    }
-  }
-  if (*top + n + sizeof(intptr_t) >= end) {
-    warning("\nThe shared miscellaneous data space is not large "
-            "enough to \npreload requested classes.  Use "
-            "-XX:SharedMiscDataSize= to increase \nthe initial "
-            "size of the miscellaneous data space.\n");
-    exit(2);
-  }
-
-  // Copy the table data (the strings) to the shared space.
-  n = align_size_up(n, sizeof(HeapWord));
-  *(intptr_t*)(*top) = n;
-  *top += sizeof(intptr_t);
-
-  for (i = 0; i < table_size(); ++i) {
-    for (PackageInfo* pp = table->bucket(i);
-                      pp != NULL;
-                      pp = pp->next()) {
-      int n1 = (int)(strlen(pp->pkgname()) + 1);
-      pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1));
-      *top += n1;
-    }
-  }
-  *top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord));
-}
-
-
-void ClassLoader::copy_package_info_buckets(char** top, char* end) {
-  _package_hash_table->copy_buckets(top, end);
-}
-
-void ClassLoader::copy_package_info_table(char** top, char* end) {
-  _package_hash_table->copy_table(top, end, _package_hash_table);
-}
-
-
-PackageInfo* ClassLoader::lookup_package(const char *pkgname) {
-  const char *cp = strrchr(pkgname, '/');
-  if (cp != NULL) {
-    // Package prefix found
-    int n = cp - pkgname + 1;
-    return _package_hash_table->get_entry(pkgname, n);
-  }
-  return NULL;
-}
-
-
-bool ClassLoader::add_package(const char *pkgname, int classpath_index, TRAPS) {
-  assert(pkgname != NULL, "just checking");
-  // Bootstrap loader no longer holds system loader lock obj serializing
-  // load_instance_class and thereby add_package
-  {
-    MutexLocker ml(PackageTable_lock, THREAD);
-    // First check for previously loaded entry
-    PackageInfo* pp = lookup_package(pkgname);
-    if (pp != NULL) {
-      // Existing entry found, check source of package
-      pp->set_index(classpath_index);
-      return true;
-    }
-
-    const char *cp = strrchr(pkgname, '/');
-    if (cp != NULL) {
-      // Package prefix found
-      int n = cp - pkgname + 1;
-
-      char* new_pkgname = NEW_C_HEAP_ARRAY(char, n + 1);
-      if (new_pkgname == NULL) {
-        return false;
-      }
-  
-      memcpy(new_pkgname, pkgname, n);
-      new_pkgname[n] = '\0';
-      pp = _package_hash_table->new_entry(new_pkgname, n);
-      pp->set_index(classpath_index);
-      
-      // Insert into hash table
-      _package_hash_table->add_entry(pp);
-    }
-    return true;
-  }
-}
-
-
-oop ClassLoader::get_system_package(const char* name, TRAPS) {
-  PackageInfo* pp;
-  {
-    MutexLocker ml(PackageTable_lock, THREAD);
-    pp = lookup_package(name);
-  }
-  if (pp == NULL) {
-    return NULL;
-  } else {
-    Handle p = java_lang_String::create_from_str(pp->filename(), THREAD);
-    return p();
-  }
-}
-
-
-objArrayOop ClassLoader::get_system_packages(TRAPS) {
-  ResourceMark rm(THREAD);
-  int nof_entries;
-  const char** packages;
-  {
-    MutexLocker ml(PackageTable_lock, THREAD);
-    // Allocate resource char* array containing package names
-    nof_entries = _package_hash_table->number_of_entries();
-    if ((packages = NEW_RESOURCE_ARRAY(const char*, nof_entries)) == NULL) {
-      return NULL;
-    }
-    _package_hash_table->copy_pkgnames(packages);
-  }
-  // Allocate objArray and fill with java.lang.String
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
-                                           nof_entries, CHECK_0);
-  objArrayHandle result(THREAD, r);
-  for (int i = 0; i < nof_entries; i++) {
-    Handle str = java_lang_String::create_from_str(packages[i], CHECK_0);
-    result->obj_at_put(i, str());
-  }
-
-  return result();
-}
-
-
-instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) {
-  VTuneClassLoadMarker clm;
-  ResourceMark rm(THREAD);
-  EventMark m("loading class " INTPTR_FORMAT, (address)h_name());
-  ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
-
-  stringStream st;
-  // st.print() uses too much stack space while handling a StackOverflowError
-  // st.print("%s.class", h_name->as_utf8());
-  st.print_raw(h_name->as_utf8());
-  st.print_raw(".class");
-  char* name = st.as_string();
-
-  // Lookup stream for parsing .class file
-  ClassFileStream* stream = NULL;
-  int classpath_index = 0;
-  {
-    PerfTraceTime vmtimer(perf_accumulated_time());
-    ClassPathEntry* e = _first_entry;
-    while (e != NULL) {
-      stream = e->open_stream(name);
-      if (stream != NULL) {
-        break;
-      }
-      e = e->next();
-      ++classpath_index;
-    }
-  }
-
-  instanceKlassHandle h(THREAD, klassOop(NULL));
-  if (stream != NULL) {
-
-    // class file found, parse it
-    ClassFileParser parser(stream);
-    Handle class_loader;
-    Handle protection_domain;
-    symbolHandle parsed_name;
-    instanceKlassHandle result = parser.parseClassFile(h_name, 
-                                                       class_loader, 
-                                                       protection_domain, 
-                                                       parsed_name,
-                                                       CHECK_(h));
-
-    // add to package table
-    if (add_package(name, classpath_index, THREAD)) {
-      h = result;
-    }
-  }
-
-  return h;
-}
-
-
-void ClassLoader::create_package_info_table(HashtableBucket *t, int length,
-                                            int number_of_entries) {
-  assert(_package_hash_table == NULL, "One package info table allowed.");
-  assert(length == package_hash_table_size * sizeof(HashtableBucket),
-         "bad shared package info size.");
-  _package_hash_table = new PackageHashtable(package_hash_table_size, t,
-                                             number_of_entries);
-}
-
-
-void ClassLoader::create_package_info_table() {
-    assert(_package_hash_table == NULL, "shouldn't have one yet");
-    _package_hash_table = new PackageHashtable(package_hash_table_size);
-}
-
-
-// Initialize the class loader's access to methods in libzip.  Parse and
-// process the boot classpath into a list ClassPathEntry objects.  Once
-// this list has been created, it must not change (see class PackageInfo).
-
-void ClassLoader::initialize() {
-  assert(_package_hash_table == NULL, "should have been initialized by now.");
-  EXCEPTION_MARK;
-
-  if (UsePerfData) {
-    // jvmstat performance counters
-    NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time"); 
-    NEWPERFTICKCOUNTER(_perf_class_init_time, SUN_CLS, "classInitTime");
-    NEWPERFTICKCOUNTER(_perf_class_verify_time, SUN_CLS, "classVerifyTime");
-    NEWPERFTICKCOUNTER(_perf_class_link_time, SUN_CLS, "classLinkedTime");
-
-    NEWPERFEVENTCOUNTER(_perf_classes_inited, SUN_CLS, "initializedClasses");
-    NEWPERFEVENTCOUNTER(_perf_classes_linked, SUN_CLS, "linkedClasses");
-
-    // The following performance counters are added for measuring the impact
-    // of the bug fix of 6365597. They are mainly focused on finding out
-    // the behavior of system & user-defined classloader lock, whether 
-    // ClassLoader.loadClass/findClass is being called synchronized or not.
-    // Also two additional counters are created to see whether 'UnsyncloadClass'
-    // flag is being set or not and how many times load_instance_class call
-    // fails with linkageError etc.
-    NEWPERFEVENTCOUNTER(_sync_systemLoaderLockContentionRate, SUN_CLS, 
-			"systemLoaderLockContentionRate");    
-    NEWPERFEVENTCOUNTER(_sync_nonSystemLoaderLockContentionRate, SUN_CLS,
-			"nonSystemLoaderLockContentionRate");
-    NEWPERFEVENTCOUNTER(_sync_JVMFindLoadedClassLockFreeCounter, SUN_CLS,
-			"jvmFindLoadedClassNoLockCalls");
-    NEWPERFEVENTCOUNTER(_sync_JVMDefineClassLockFreeCounter, SUN_CLS,
-			"jvmDefineClassNoLockCalls");
-
-    NEWPERFEVENTCOUNTER(_sync_JNIDefineClassLockFreeCounter, SUN_CLS,
-			"jniDefineClassNoLockCalls");
-    
-    NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS,
-			"unsafeDefineClassCalls");
-    
-    NEWPERFEVENTCOUNTER(_isUnsyncloadClass, SUN_CLS, "isUnsyncloadClassSet");
-    NEWPERFEVENTCOUNTER(_load_instance_class_failCounter, SUN_CLS,
-			"loadInstanceClassFailRate");
-    
-    // increment the isUnsyncloadClass counter if UnsyncloadClass is set.
-    if (UnsyncloadClass) {
-      _isUnsyncloadClass->inc();
-    }
-  }
-
-  // lookup zip library entry points
-  load_zip_library();
-  // initialize search path
-  setup_bootstrap_search_path();
-  if (LazyBootClassLoader) {
-    // set up meta index which makes boot classpath initialization lazier
-    setup_meta_index();
-  }
-}
-
-
-jlong ClassLoader::classloader_time_ms() {
-  return UsePerfData ?
-    Management::ticks_to_ms(_perf_accumulated_time->get_value()) : -1;
-}
-
-jlong ClassLoader::class_init_count() {
-  return UsePerfData ? _perf_classes_inited->get_value() : -1;
-}
-
-jlong ClassLoader::class_init_time_ms() {
-  return UsePerfData ? 
-    Management::ticks_to_ms(_perf_class_init_time->get_value()) : -1;
-}
-
-jlong ClassLoader::class_verify_time_ms() {
-  return UsePerfData ? 
-    Management::ticks_to_ms(_perf_class_verify_time->get_value()) : -1;
-}
-
-jlong ClassLoader::class_link_count() {
-  return UsePerfData ? _perf_classes_linked->get_value() : -1;
-}
-
-jlong ClassLoader::class_link_time_ms() {
-  return UsePerfData ? 
-    Management::ticks_to_ms(_perf_class_link_time->get_value()) : -1;
-}
-
-int ClassLoader::compute_Object_vtable() {
-  // hardwired for JDK1.2 -- would need to duplicate class file parsing
-  // code to determine actual value from file
-  // Would be value '11' if finals were in vtable
-  int JDK_1_2_Object_vtable_size = 5;
-  return JDK_1_2_Object_vtable_size * vtableEntry::size();
-}
-
-
-void classLoader_init() {
-  ClassLoader::initialize();
-}
-
-
-bool ClassLoader::get_canonical_path(char* orig, char* out, int len) {
-  assert(orig != NULL && out != NULL && len > 0, "bad arguments");        
-  if (CanonicalizeEntry != NULL) {
-    JNIEnv* env = JavaThread::current()->jni_environment();
-    if ((CanonicalizeEntry)(env, hpi::native_path(orig), out, len) < 0) {    
-      return false;  
-    }    
-  } else {
-    // On JDK 1.2.2 the Canonicalize does not exist, so just do nothing
-    strncpy(out, orig, len);
-    out[len - 1] = '\0';    
-  }
-  return true;
-}
-
-#ifndef PRODUCT
-
-void ClassLoader::verify() {
-  _package_hash_table->verify();
-}
-
-
-// CompileTheWorld
-//
-// Iterates over all class path entries and forces compilation of all methods
-// in all classes found. Currently, only zip/jar archives are searched.
-// 
-// The classes are loaded by the Java level bootstrap class loader, and the
-// initializer is called. If DelayCompilationDuringStartup is true (default),
-// the interpreter will run the initialization code. Note that forcing 
-// initialization in this way could potentially lead to initialization order
-// problems, in which case we could just force the initialization bit to be set.
-
-
-// We need to iterate over the contents of a zip/jar file, so we replicate the
-// jzcell and jzfile definitions from zip_util.h but rename jzfile to real_jzfile,
-// since jzfile already has a void* definition.
-//
-// Note that this is only used in debug mode.
-//
-// HotSpot integration note:
-// Matches zip_util.h 1.14 99/06/01 from jdk1.3 beta H build
-
-
-// JDK 1.3 version
-typedef struct real_jzentry13 { 	/* Zip file entry */
-    char *name;	  	  	/* entry name */
-    jint time;            	/* modification time */
-    jint size;	  	  	/* size of uncompressed data */
-    jint csize;  	  	/* size of compressed data (zero if uncompressed) */
-    jint crc;		  	/* crc of uncompressed data */
-    char *comment;	  	/* optional zip file comment */
-    jbyte *extra;	  	/* optional extra data */
-    jint pos;	  	  	/* position of LOC header (if negative) or data */
-} real_jzentry13;
-
-typedef struct real_jzfile13 {  /* Zip file */
-    char *name;	  	        /* zip file name */
-    jint refs;		        /* number of active references */
-    jint fd;		        /* open file descriptor */
-    void *lock;		        /* read lock */
-    char *comment; 	        /* zip file comment */
-    char *msg;		        /* zip error message */
-    void *entries;          	/* array of hash cells */
-    jint total;	  	        /* total number of entries */
-    unsigned short *table;      /* Hash chain heads: indexes into entries */
-    jint tablelen;	        /* number of hash eads */
-    real_jzfile13 *next;        /* next zip file in search list */
-    jzentry *cache;             /* we cache the most recently freed jzentry */
-    /* Information on metadata names in META-INF directory */
-    char **metanames;           /* array of meta names (may have null names) */
-    jint metacount;	        /* number of slots in metanames array */
-    /* If there are any per-entry comments, they are in the comments array */
-    char **comments;
-} real_jzfile13;
-
-// JDK 1.2 version
-typedef struct real_jzentry12 {  /* Zip file entry */
-    char *name;                  /* entry name */
-    jint time;                   /* modification time */
-    jint size;                   /* size of uncompressed data */
-    jint csize;                  /* size of compressed data (zero if uncompressed) */
-    jint crc;                    /* crc of uncompressed data */
-    char *comment;               /* optional zip file comment */
-    jbyte *extra;                /* optional extra data */
-    jint pos;                    /* position of LOC header (if negative) or data */
-    struct real_jzentry12 *next; /* next entry in hash table */
-} real_jzentry12;
-
-typedef struct real_jzfile12 {  /* Zip file */
-    char *name;                 /* zip file name */
-    jint refs;                  /* number of active references */
-    jint fd;                    /* open file descriptor */
-    void *lock;                 /* read lock */
-    char *comment;              /* zip file comment */
-    char *msg;                  /* zip error message */
-    real_jzentry12 *entries;    /* array of zip entries */
-    jint total;                 /* total number of entries */
-    real_jzentry12 **table;     /* hash table of entries */
-    jint tablelen;              /* number of buckets */
-    jzfile *next;               /* next zip file in search list */
-} real_jzfile12;
-
-
-void ClassPathDirEntry::compile_the_world(Handle loader, TRAPS) {
-  // For now we only compile all methods in all classes in zip/jar files
-  tty->print_cr("CompileTheWorld : Skipped classes in %s", _dir);
-  tty->cr();
-}
-
-
-bool ClassPathDirEntry::is_rt_jar() {
-  return false;
-}
-
-void ClassPathZipEntry::compile_the_world(Handle loader, TRAPS) {
-  if (JDK_Version::is_jdk12x_version()) {
-    compile_the_world12(loader, THREAD);
-  } else {
-    compile_the_world13(loader, THREAD);
-  }
-  if (HAS_PENDING_EXCEPTION) {
-    if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
-      CLEAR_PENDING_EXCEPTION;
-      tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
-      size_t used = Universe::heap()->permanent_used();
-      size_t capacity = Universe::heap()->permanent_capacity();
-      tty->print_cr("Permanent generation used %dK of %dK", used/K, capacity/K);
-      tty->print_cr("Increase size by setting e.g. -XX:MaxPermSize=%dK\n", capacity*2/K);
-    } else {
-      tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
-    }
-  }
-}
-
-// Version that works for JDK 1.3.x
-void ClassPathZipEntry::compile_the_world13(Handle loader, TRAPS) {
-  real_jzfile13* zip = (real_jzfile13*) _zip;
-  tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
-  tty->cr();
-  // Iterate over all entries in zip file
-  for (int n = 0; ; n++) {
-    real_jzentry13 * ze = (real_jzentry13 *)((*GetNextEntry)(_zip, n));
-    if (ze == NULL) break;
-    ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
-  }
-}
-
-
-// Version that works for JDK 1.2.x
-void ClassPathZipEntry::compile_the_world12(Handle loader, TRAPS) {
-  real_jzfile12* zip = (real_jzfile12*) _zip;
-  tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
-  tty->cr();
-  // Iterate over all entries in zip file
-  for (int n = 0; ; n++) {
-    real_jzentry12 * ze = (real_jzentry12 *)((*GetNextEntry)(_zip, n));
-    if (ze == NULL) break;
-    ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
-  }
-}
-
-bool ClassPathZipEntry::is_rt_jar() {
-  if (JDK_Version::is_jdk12x_version()) {
-    return is_rt_jar12();
-  } else {
-    return is_rt_jar13();
-  }
-}
-
-// JDK 1.3 version
-bool ClassPathZipEntry::is_rt_jar13() {
-  real_jzfile13* zip = (real_jzfile13*) _zip;
-  int len = (int)strlen(zip->name);
-  // Check whether zip name ends in "rt.jar"
-  // This will match other archives named rt.jar as well, but this is
-  // only used for debugging.
-  return (len >= 6) && (strcasecmp(zip->name + len - 6, "rt.jar") == 0);
-}
-
-// JDK 1.2 version
-bool ClassPathZipEntry::is_rt_jar12() {
-  real_jzfile12* zip = (real_jzfile12*) _zip;
-  int len = (int)strlen(zip->name);
-  // Check whether zip name ends in "rt.jar"
-  // This will match other archives named rt.jar as well, but this is
-  // only used for debugging.
-  return (len >= 6) && (strcasecmp(zip->name + len - 6, "rt.jar") == 0);
-}
-
-void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
-  resolve_entry()->compile_the_world(loader, CHECK);
-}
-
-bool LazyClassPathEntry::is_rt_jar() {
-  return resolve_entry()->is_rt_jar();
-}
-
-void ClassLoader::compile_the_world() {
-  EXCEPTION_MARK;
-  HandleMark hm(THREAD);
-  ResourceMark rm(THREAD);
-  // Make sure we don't run with background compilation
-  BackgroundCompilation = false;
-  // Find bootstrap loader
-  Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
-  // Iterate over all bootstrap class path entries
-  ClassPathEntry* e = _first_entry;
-  while (e != NULL) {
-    // We stop at rt.jar, unless it is the first bootstrap path entry
-    if (e->is_rt_jar() && e != _first_entry) break;
-    e->compile_the_world(system_class_loader, CATCH);
-    e = e->next();
-  }
-  tty->print_cr("CompileTheWorld : Done");
-  {
-    // Print statistics as if before normal exit:
-    extern void print_statistics();
-    print_statistics();
-  }
-  vm_exit(0);
-}
-
-int ClassLoader::_compile_the_world_counter = 0;
-
-void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
-  int len = (int)strlen(name);
-  if (len > 6 && strcmp(".class", name + len - 6) == 0) {
-    // We have a .class file
-    char buffer[2048];
-    strncpy(buffer, name, len - 6);
-    buffer[len-6] = 0;
-    // If the file has a period after removing .class, it's not really a
-    // valid class file.  The class loader will check everything else.
-    if (strchr(buffer, '.') == NULL) {
-      _compile_the_world_counter++;
-      if (_compile_the_world_counter >= CompileTheWorldStartAt && _compile_the_world_counter <= CompileTheWorldStopAt) {
-        // Construct name without extension
-        symbolHandle sym = oopFactory::new_symbol_handle(buffer, CHECK);
-        // Use loader to load and initialize class
-        klassOop ik = SystemDictionary::resolve_or_null(sym, loader, Handle(), THREAD);
-        instanceKlassHandle k (THREAD, ik);
-        if (k.not_null() && !HAS_PENDING_EXCEPTION) {
-          k->initialize(THREAD);
-        }
-        bool exception_occurred = HAS_PENDING_EXCEPTION;
-        CLEAR_PENDING_EXCEPTION;
-        if (k.is_null() || (exception_occurred && !CompileTheWorldIgnoreInitErrors)) {
-          // If something went wrong (e.g. ExceptionInInitializerError) we skip this class
-          tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer);
-        } else {
-          tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer);
-          // Preload all classes to get around uncommon traps
-          if (CompileTheWorldPreloadClasses) {
-            constantPoolKlass::preload_and_initialize_all_classes(k->constants(), THREAD);
-            if (HAS_PENDING_EXCEPTION) {
-              // If something went wrong in preloading we just ignore it
-              CLEAR_PENDING_EXCEPTION;
-              tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer);
-            }
-          }
-          // Iterate over all methods in class
-          for (int n = 0; n < k->methods()->length(); n++) {
-            methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
-            if (CompilationPolicy::canBeCompiled(m)) {
-              // Force compilation           
-              CompileBroker::compile_method(m, InvocationEntryBci,
-                                            methodHandle(), 0, "CTW", THREAD);
-              if (HAS_PENDING_EXCEPTION) {
-                CLEAR_PENDING_EXCEPTION;
-                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
-              }
-  	    if (TieredCompilation) {
-  	      // Clobber the first compile and force second tier compilation
-  	      m->clear_code();
-  	      CompileBroker::compile_method(m, InvocationEntryBci,
-                                            methodHandle(), 0, "CTW", THREAD);
-  	      if (HAS_PENDING_EXCEPTION) {
-  		CLEAR_PENDING_EXCEPTION;
-  		tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
-  	      }
-  	    }
-            }
-          }
-        }
-      }
-    }
-  }
-}
-
-#endif //PRODUCT
--- a/hotspot/src/share/vm/runtime/classLoader.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,305 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)classLoader.hpp	1.64 07/05/05 17:06:45 JVM"
-#endif 
-/*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// The VM class loader.
-#include <sys/stat.h>
-
-
-// Meta-index (optional, to be able to skip opening boot classpath jar files)
-class MetaIndex: public CHeapObj {
- private:
-  char** _meta_package_names;
-  int    _num_meta_package_names;
- public:
-  MetaIndex(char** meta_package_names, int num_meta_package_names);
-  ~MetaIndex();
-  bool may_contain(const char* class_name);
-};
-
-
-// Class path entry (directory or zip file)
-
-class ClassPathEntry: public CHeapObj {
- private:
-  ClassPathEntry* _next;
- public:
-  // Next entry in class path
-  ClassPathEntry* next()              { return _next; }
-  void set_next(ClassPathEntry* next) { _next = next; }
-  virtual bool is_jar_file() = 0;
-  virtual const char* name() = 0;
-  virtual bool is_lazy();
-  // Constructor
-  ClassPathEntry();
-  // Attempt to locate file_name through this class path entry.
-  // Returns a class file parsing stream if successfull.
-  virtual ClassFileStream* open_stream(const char* name) = 0;
-  // Debugging
-  NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
-  NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
-};
-
-
-class ClassPathDirEntry: public ClassPathEntry {
- private:
-  char* _dir;           // Name of directory
- public:
-  bool is_jar_file()  { return false;  }
-  const char* name()  { return _dir; }
-  ClassPathDirEntry(char* dir);
-  ClassFileStream* open_stream(const char* name);
-  // Debugging
-  NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
-  NOT_PRODUCT(bool is_rt_jar();)
-};
-
-
-// Type definitions for zip file and zip file entry
-typedef void* jzfile;
-typedef struct {
-  char *name;	  	  	/* entry name */
-  jlong time;            	/* modification time */
-  jlong size;	  	  	/* size of uncompressed data */
-  jlong csize;  	  	/* size of compressed data (zero if uncompressed) */
-  jint crc;		  	/* crc of uncompressed data */
-  char *comment;	  	/* optional zip file comment */
-  jbyte *extra;	  		/* optional extra data */
-  jlong pos;	  	  	/* position of LOC header (if negative) or data */
-} jzentry;
-
-
-class ClassPathZipEntry: public ClassPathEntry {
- private:
-  jzfile* _zip;        // The zip archive
-  char*   _zip_name;   // Name of zip archive
- public:
-  bool is_jar_file()  { return true;  }
-  const char* name()  { return _zip_name; }
-  ClassPathZipEntry(jzfile* zip, const char* zip_name);
-  ~ClassPathZipEntry();
-  ClassFileStream* open_stream(const char* name);
-  void contents_do(void f(const char* name, void* context), void* context);
-  // Debugging
-  NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
-  NOT_PRODUCT(void compile_the_world12(Handle loader, TRAPS);) // JDK 1.2 version
-  NOT_PRODUCT(void compile_the_world13(Handle loader, TRAPS);) // JDK 1.3 version
-  NOT_PRODUCT(bool is_rt_jar();)
-  NOT_PRODUCT(bool is_rt_jar12();)
-  NOT_PRODUCT(bool is_rt_jar13();)
-};
-
-
-// For lazier loading of boot class path entries
-class LazyClassPathEntry: public ClassPathEntry {
- private:
-  char* _path; // dir or file
-  struct stat _st;
-  MetaIndex* _meta_index;
-  volatile ClassPathEntry* _resolved_entry;
-  ClassPathEntry* resolve_entry();
- public:
-  bool is_jar_file();
-  const char* name()  { return _path; }
-  LazyClassPathEntry(char* path, struct stat st);
-  ClassFileStream* open_stream(const char* name);
-  void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
-  virtual bool is_lazy();
-  // Debugging
-  NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
-  NOT_PRODUCT(bool is_rt_jar();)
-};
-
-class PackageHashtable;
-class PackageInfo;
-class HashtableBucket;
-
-class ClassLoader: AllStatic {
- public:
-  enum SomeConstants {
-    package_hash_table_size = 31  // Number of buckets
-  };
- private:
-  friend class LazyClassPathEntry;
-  
-  // Performance counters
-  static PerfCounter* _perf_accumulated_time;
-  static PerfCounter* _perf_classes_inited;
-  static PerfCounter* _perf_class_init_time;
-  static PerfCounter* _perf_class_verify_time;
-  static PerfCounter* _perf_classes_linked;
-  static PerfCounter* _perf_class_link_time;
-  
-  static PerfCounter* _sync_systemLoaderLockContentionRate;
-  static PerfCounter* _sync_nonSystemLoaderLockContentionRate;
-  static PerfCounter* _sync_JVMFindLoadedClassLockFreeCounter;
-  static PerfCounter* _sync_JVMDefineClassLockFreeCounter;
-  static PerfCounter* _sync_JNIDefineClassLockFreeCounter;
-  
-  static PerfCounter* _unsafe_defineClassCallCounter;
-  static PerfCounter* _isUnsyncloadClass;
-  static PerfCounter* _load_instance_class_failCounter;
-
-  // First entry in linked list of ClassPathEntry instances
-  static ClassPathEntry* _first_entry;
-  // Last entry in linked list of ClassPathEntry instances
-  static ClassPathEntry* _last_entry;
-  // Hash table used to keep track of loaded packages
-  static PackageHashtable* _package_hash_table;
-  static const char* _shared_archive;
-
-  // Hash function
-  static unsigned int hash(const char *s, int n);
-  // Returns the package file name corresponding to the specified package 
-  // or class name, or null if not found.
-  static PackageInfo* lookup_package(const char *pkgname);
-  // Adds a new package entry for the specified class or package name and
-  // corresponding directory or jar file name.
-  static bool add_package(const char *pkgname, int classpath_index, TRAPS);
-
-  // Initialization
-  static void setup_meta_index();
-  static void setup_bootstrap_search_path();
-  static void load_zip_library();
-  static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
-  static void update_class_path_entry_list(const char *path);
-
-  // Canonicalizes path names, so strcmp will work properly. This is mainly
-  // to avoid confusing the zip library
-  static bool get_canonical_path(char* orig, char* out, int len);
- public:
-  // Timing
-  static PerfCounter* perf_accumulated_time()  { return _perf_accumulated_time; }
-  static PerfCounter* perf_classes_inited()    { return _perf_classes_inited; }
-  static PerfCounter* perf_class_init_time()   { return _perf_class_init_time; }
-  static PerfCounter* perf_class_verify_time() { return _perf_class_verify_time; }
-  static PerfCounter* perf_classes_linked()    { return _perf_classes_linked; }
-  static PerfCounter* perf_class_link_time() { return _perf_class_link_time; }
-
-  // Record how often system loader lock object is contended
-  static PerfCounter* sync_systemLoaderLockContentionRate() {
-    return _sync_systemLoaderLockContentionRate;
-  }
-
-  // Record how often non system loader lock object is contended
-  static PerfCounter* sync_nonSystemLoaderLockContentionRate() {
-    return _sync_nonSystemLoaderLockContentionRate;
-  }
-
-  // Record how many calls to JVM_FindLoadedClass w/o holding a lock
-  static PerfCounter* sync_JVMFindLoadedClassLockFreeCounter() {
-    return _sync_JVMFindLoadedClassLockFreeCounter;
-  }
-  
-  // Record how many calls to JVM_DefineClass w/o holding a lock
-  static PerfCounter* sync_JVMDefineClassLockFreeCounter() {
-    return _sync_JVMDefineClassLockFreeCounter;
-  }
-
-  // Record how many calls to jni_DefineClass w/o holding a lock
-  static PerfCounter* sync_JNIDefineClassLockFreeCounter() {
-    return _sync_JNIDefineClassLockFreeCounter;
-  }
-
-  // Record how many calls to Unsafe_DefineClass
-  static PerfCounter* unsafe_defineClassCallCounter() {
-    return _unsafe_defineClassCallCounter;
-  }
-
-  // Record how many times SystemDictionary::load_instance_class call
-  // fails with linkageError when Unsyncloadclass flag is set.
-  static PerfCounter* load_instance_class_failCounter() {
-    return _load_instance_class_failCounter;
-  }
-  
-  // Load individual .class file
-  static instanceKlassHandle load_classfile(symbolHandle h_name, TRAPS);  
-
-  // If the specified package has been loaded by the system, then returns
-  // the name of the directory or ZIP file that the package was loaded from.
-  // Returns null if the package was not loaded.
-  // Note: The specified name can either be the name of a class or package.
-  // If a package name is specified, then it must be "/"-separator and also
-  // end with a trailing "/".
-  static oop get_system_package(const char* name, TRAPS);
-
-  // Returns an array of Java strings representing all of the currently
-  // loaded system packages.
-  // Note: The package names returned are "/"-separated and end with a
-  // trailing "/".
-  static objArrayOop get_system_packages(TRAPS);
-
-  // Initialization
-  static void initialize();
-  static void create_package_info_table();
-  static void create_package_info_table(HashtableBucket *t, int length,
-                                        int number_of_entries);
-  static int compute_Object_vtable();
-
-  static ClassPathEntry* classpath_entry(int n) {
-    ClassPathEntry* e = ClassLoader::_first_entry;
-    while (--n >= 0) {
-      assert(e != NULL, "Not that many classpath entries.");
-      e = e->next();
-    }
-    return e;
-  }
-
-  // Sharing dump and restore
-  static void copy_package_info_buckets(char** top, char* end);
-  static void copy_package_info_table(char** top, char* end);
-
-  // VM monitoring and management support
-  static jlong classloader_time_ms();
-  static jlong class_method_total_size();
-  static jlong class_init_count();
-  static jlong class_init_time_ms();
-  static jlong class_verify_time_ms();
-  static jlong class_link_count();
-  static jlong class_link_time_ms();
-
-  // indicates if class path already contains a entry (exact match by name)
-  static bool contains_entry(ClassPathEntry* entry);
-
-  // adds a class path list
-  static void add_to_list(ClassPathEntry* new_entry);
-
-  // creates a class path zip entry (returns NULL if JAR file cannot be opened)
-  static ClassPathZipEntry* create_class_path_zip_entry(const char *apath);   
-
-  // Debugging
-  static void verify()              PRODUCT_RETURN;
-
-  // Force compilation of all methods in all classes in bootstrap class path (stress test)
-#ifndef PRODUCT
- private:
-  static int _compile_the_world_counter;
- public:
-  static void compile_the_world();
-  static void compile_the_world_in(char* name, Handle loader, TRAPS);
-  static int  compile_the_world_counter() { return _compile_the_world_counter; }
-#endif //PRODUCT
-};
--- a/hotspot/src/share/vm/runtime/concurrentGCThread.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,314 +0,0 @@
-/*
- * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// CopyrightVersion 1.2
-
-# include "incls/_precompiled.incl"
-# include "incls/_concurrentGCThread.cpp.incl"
-
-bool ConcurrentGCThread::_should_terminate    = false;
-bool ConcurrentGCThread::_has_terminated      = false;
-int  ConcurrentGCThread::_CGC_flag            = CGC_nil;
-
-SuspendibleThreadSet ConcurrentGCThread::_sts;
-
-ConcurrentGCThread::ConcurrentGCThread() {
-  _sts.initialize();
-};
-
-void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) {
-  MutexLockerEx x(Heap_lock,
-                  Mutex::_no_safepoint_check_flag);
-  // warning("CGC: about to try stopping world");
-  SafepointSynchronize::begin();
-  // warning("CGC: successfully stopped world");
-  op->do_void();
-  SafepointSynchronize::end();
-  // warning("CGC: successfully restarted world");
-}
-
-void ConcurrentGCThread::safepoint_synchronize() {
-  _sts.suspend_all();
-}
-
-void ConcurrentGCThread::safepoint_desynchronize() {
-  _sts.resume_all();
-}
-
-void ConcurrentGCThread::create_and_start() {
-  if (os::create_thread(this, os::cgc_thread)) {
-    // XXX: need to set this to low priority
-    // unless "agressive mode" set; priority
-    // should be just less than that of VMThread.
-    os::set_priority(this, NearMaxPriority);
-    if (!_should_terminate && !DisableStartThread) {
-      os::start_thread(this);
-    }
-  }
-}
-
-void ConcurrentGCThread::initialize_in_thread() {
-  this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
-  this->set_active_handles(JNIHandleBlock::allocate_block());
-  // From this time Thread::current() should be working.
-  assert(this == Thread::current(), "just checking");
-}
-
-void ConcurrentGCThread::wait_for_universe_init() {
-  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  while (!is_init_completed() && !_should_terminate) {
-    CGC_lock->wait(Mutex::_no_safepoint_check_flag, 200);
-  }
-}
-
-void ConcurrentGCThread::terminate() {
-  // Signal that it is terminated
-  {
-    MutexLockerEx mu(Terminator_lock,
-                     Mutex::_no_safepoint_check_flag);
-    _has_terminated = true;
-    Terminator_lock->notify();
-  }
-
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
-}
-
-
-void SuspendibleThreadSet::initialize_work() {
-  MutexLocker x(STS_init_lock);
-  if (!_initialized) {
-    _m             = new Monitor(Mutex::leaf,
-				 "SuspendibleThreadSetLock", true);
-    _async         = 0;
-    _async_stop    = false;
-    _async_stopped = 0;
-    _initialized   = true;
-  }
-}
-
-void SuspendibleThreadSet::join() {
-  initialize();
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
-  _async++;
-  assert(_async > 0, "Huh.");
-}
-
-void SuspendibleThreadSet::leave() {
-  assert(_initialized, "Must be initialized.");
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  _async--;
-  assert(_async >= 0, "Huh.");
-  if (_async_stop) _m->notify_all();
-}
-
-void SuspendibleThreadSet::yield(const char* id) {
-  assert(_initialized, "Must be initialized.");
-  if (_async_stop) {
-    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-    if (_async_stop) {
-      _async_stopped++;
-      assert(_async_stopped > 0, "Huh.");
-      if (_async_stopped == _async) {
-	if (ConcGCYieldTimeout > 0) {
-	  double now = os::elapsedTime();
-	  guarantee((now - _suspend_all_start) * 1000.0 <
-		    (double)ConcGCYieldTimeout,
-		    "Long delay; whodunit?");
-	}
-      }
-      _m->notify_all();
-      while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
-      _async_stopped--;
-      assert(_async >= 0, "Huh");
-      _m->notify_all();
-    }
-  }
-}
-
-void SuspendibleThreadSet::suspend_all() {
-  initialize();  // If necessary.
-  if (ConcGCYieldTimeout > 0) {
-    _suspend_all_start = os::elapsedTime();
-  }
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  assert(!_async_stop, "Only one at a time.");
-  _async_stop = true;
-  while (_async_stopped < _async) _m->wait(Mutex::_no_safepoint_check_flag);
-}
-
-void SuspendibleThreadSet::resume_all() {
-  assert(_initialized, "Must be initialized.");
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  assert(_async_stopped == _async, "Huh.");
-  _async_stop = false;
-  _m->notify_all();
-}
-
-static void _sltLoop(JavaThread* thread, TRAPS) {
-  SurrogateLockerThread* slt = (SurrogateLockerThread*)thread;
-  slt->loop();
-}
-
-SurrogateLockerThread::SurrogateLockerThread() :
-  JavaThread(&_sltLoop),
-  _monitor(Mutex::nonleaf, "SLTMonitor"),
-  _buffer(empty)
-{}
-
-SurrogateLockerThread* SurrogateLockerThread::make(TRAPS) {
-  klassOop k =
-    SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(),
-                                      true, CHECK_NULL);
-  instanceKlassHandle klass (THREAD, k);
-  instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL);
-
-  const char thread_name[] = "Surrogate Locker Thread (CMS)";
-  Handle string = java_lang_String::create_from_str(thread_name, CHECK_NULL);
-
-  // Initialize thread_oop to put it into the system threadGroup
-  Handle thread_group (THREAD, Universe::system_thread_group());
-  JavaValue result(T_VOID);
-  JavaCalls::call_special(&result, thread_oop,
-			  klass,
-			  vmSymbolHandles::object_initializer_name(),
-			  vmSymbolHandles::threadgroup_string_void_signature(),
-			  thread_group,
-			  string,
-			  CHECK_NULL);
-
-  SurrogateLockerThread* res;
-  {
-    MutexLocker mu(Threads_lock);
-    res = new SurrogateLockerThread();
-
-    // At this point it may be possible that no osthread was created for the
-    // JavaThread due to lack of memory. We would have to throw an exception
-    // in that case. However, since this must work and we do not allow
-    // exceptions anyway, check and abort if this fails.
-    if (res == NULL || res->osthread() == NULL) {
-      vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
-    }
-    java_lang_Thread::set_thread(thread_oop(), res);
-    java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
-    java_lang_Thread::set_daemon(thread_oop());
-
-    res->set_threadObj(thread_oop());
-    Threads::add(res);
-    Thread::start(res);
-  }
-  os::yield(); // This seems to help with initial start-up of SLT
-  return res;
-}
-
-void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) {
-  MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
-  assert(_buffer == empty, "Should be empty");
-  assert(msg != empty, "empty message");
-  _buffer = msg;
-  while (_buffer != empty) {
-    _monitor.notify();
-    _monitor.wait(Mutex::_no_safepoint_check_flag);
-  }
-}
-
-// ======= Surrogate Locker Thread =============
-
-void SurrogateLockerThread::loop() {
-  BasicLock pll_basic_lock;
-  SLT_msg_type msg;
-  debug_only(unsigned int owned = 0;)
-
-  while (/* !isTerminated() */ 1) {
-    {
-      MutexLocker x(&_monitor);
-      // Since we are a JavaThread, we can't be here at a safepoint.
-      assert(!SafepointSynchronize::is_at_safepoint(),
-             "SLT is a JavaThread");
-      // wait for msg buffer to become non-empty
-      while (_buffer == empty) {
-        _monitor.notify();
-        _monitor.wait();
-      }
-      msg = _buffer;
-    }
-    switch(msg) {
-      case acquirePLL: {
-        instanceRefKlass::acquire_pending_list_lock(&pll_basic_lock);
-        debug_only(owned++;)
-        break;
-      }
-      case releaseAndNotifyPLL: {
-        assert(owned > 0, "Don't have PLL");
-        instanceRefKlass::release_and_notify_pending_list_lock(&pll_basic_lock);
-        debug_only(owned--;)
-        break;
-      }
-      case empty:
-      default: {
-        guarantee(false,"Unexpected message in _buffer");
-        break;
-      }
-    }
-    {
-      MutexLocker x(&_monitor);
-      // Since we are a JavaThread, we can't be here at a safepoint.
-      assert(!SafepointSynchronize::is_at_safepoint(),
-             "SLT is a JavaThread");
-      _buffer = empty;
-      _monitor.notify();
-    }
-  }
-  assert(!_monitor.owned_by_self(), "Should unlock before exit.");
-}
-
-
-// ===== STS Access From Outside CGCT =====
-
-void ConcurrentGCThread::stsYield(const char* id) {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-	  "only a conc GC thread can call this" );
-  _sts.yield(id);
-}
-
-bool ConcurrentGCThread::stsShouldYield() {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-	  "only a conc GC thread can call this" );
-  return _sts.should_yield();
-}
-
-void ConcurrentGCThread::stsJoin() {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-	  "only a conc GC thread can call this" );
-  _sts.join();
-}
-
-void ConcurrentGCThread::stsLeave() {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-	  "only a conc GC thread can call this" );
-  _sts.leave();
-}
--- a/hotspot/src/share/vm/runtime/concurrentGCThread.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,165 +0,0 @@
-/*
- * Copyright 2001-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class VoidClosure;
-
-// A SuspendibleThreadSet is (obviously) a set of threads that can be
-// suspended.  A thread can join and later leave the set, and periodically
-// yield.  If some thread (not in the set) requests, via suspend_all, that
-// the threads be suspended, then the requesting thread is blocked until
-// all the threads in the set have yielded or left the set.  (Threads may
-// not enter the set when an attempted suspension is in progress.)  The
-// suspending thread later calls resume_all, allowing the suspended threads
-// to continue.
-
-class SuspendibleThreadSet {
-  Monitor* _m;
-  int      _async;
-  bool     _async_stop;
-  int      _async_stopped;
-  bool     _initialized;
-  double   _suspend_all_start;
-
-  void initialize_work();
-
- public:
-  SuspendibleThreadSet() : _initialized(false) {}
-
-  // Add the current thread to the set.  May block if a suspension
-  // is in progress.    
-  void join();
-  // Removes the current thread from the set.
-  void leave();
-  // Returns "true" iff an suspension is in progress.
-  bool should_yield() { return _async_stop; }
-  // Suspends the current thread if a suspension is in progress (for
-  // the duration of the suspension.)
-  void yield(const char* id);
-  // Return when all threads in the set are suspended.
-  void suspend_all();
-  // Allow suspended threads to resume.
-  void resume_all();
-  // Redundant initializations okay.
-  void initialize() {
-    // Double-check dirty read idiom.
-    if (!_initialized) initialize_work();
-  }
-};
-
-
-class ConcurrentGCThread: public NamedThread {
-  friend class VMStructs;
-
-protected:
-  static bool _should_terminate;
-  static bool _has_terminated;
-
-  enum CGC_flag_type {
-    CGC_nil           = 0x0,
-    CGC_dont_suspend  = 0x1,
-    CGC_CGC_safepoint = 0x2,
-    CGC_VM_safepoint  = 0x4
-  };
-
-  static int _CGC_flag;
-
-  static bool CGC_flag_is_set(int b)       { return (_CGC_flag & b) != 0; }
-  static int set_CGC_flag(int b)           { return _CGC_flag |= b; }
-  static int reset_CGC_flag(int b)         { return _CGC_flag &= ~b; }
-
-  void stopWorldAndDo(VoidClosure* op);
-
-  // All instances share this one set.
-  static SuspendibleThreadSet _sts;
-
-  // Create and start the thread (setting it's priority high.)
-  void create_and_start();
-
-  // Do initialization steps in the thread: record stack base and size,
-  // init thread local storage, set JNI handle block.
-  void initialize_in_thread();
-
-  // Wait until Universe::is_fully_initialized();
-  void wait_for_universe_init();
-
-  // Record that the current thread is terminating, and will do more
-  // concurrent work.
-  void terminate();
-
-public:
-  // Constructor
-
-  ConcurrentGCThread();
-  ~ConcurrentGCThread() {} // Exists to call NamedThread destructor.
-  
-  // Tester
-  bool is_ConcurrentGC_thread() const          { return true;       }
-
-  static void safepoint_synchronize();
-  static void safepoint_desynchronize();
-
-  // All overridings should probably do _sts::yield, but we allow
-  // overriding for distinguished debugging messages.  Default is to do
-  // nothing.
-  virtual void yield() {}
-
-  bool should_yield() { return _sts.should_yield(); }
-
-  // they are prefixed by sts since there are already yield() and
-  // should_yield() (non-static) methods in this class and it was an
-  // easy way to differentiate them.
-  static void stsYield(const char* id);
-  static bool stsShouldYield();
-  static void stsJoin();
-  static void stsLeave();
-
-};
-
-// The SurrogateLockerThread is used by concurrent GC threads for
-// manipulating Java monitors, in particular, currently for
-// manipulating the pending_list_lock. XXX
-class SurrogateLockerThread: public JavaThread {
-  friend class VMStructs;
- public:
-  enum SLT_msg_type {
-    empty = 0,           // no message
-    acquirePLL,          // acquire pending list lock
-    releaseAndNotifyPLL  // notify and release pending list lock
-  };
- private:
-  // the following are shared with the CMSThread
-  SLT_msg_type  _buffer;  // communication buffer
-  Monitor       _monitor; // monitor controlling buffer
-  BasicLock     _basicLock; // used for PLL locking
-  
- public:
-  static SurrogateLockerThread* make(TRAPS);
-
-  SurrogateLockerThread();
-
-  void loop(); // main method
-
-  void manipulatePLL(SLT_msg_type msg);
-
-};
--- a/hotspot/src/share/vm/runtime/concurrentMarkSweepThread.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,353 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)concurrentMarkSweepThread.cpp	1.48 07/05/05 17:06:45 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_concurrentMarkSweepThread.cpp.incl"
-
-// ======= Concurrent Mark Sweep Thread ========
-
-// The CMS thread is created when Concurrent Mark Sweep is used in the
-// older of two generations in a generational memory system.
-
-ConcurrentMarkSweepThread*
-     ConcurrentMarkSweepThread::_cmst     = NULL;
-CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
-bool ConcurrentMarkSweepThread::_should_terminate = false;
-int  ConcurrentMarkSweepThread::_CMS_flag         = CMS_nil;
-
-volatile jint ConcurrentMarkSweepThread::_pending_yields      = 0;
-volatile jint ConcurrentMarkSweepThread::_pending_decrements  = 0;
-
-volatile bool ConcurrentMarkSweepThread::_icms_enabled   = false;
-volatile bool ConcurrentMarkSweepThread::_should_run     = false;
-// When icms is enabled, the icms thread is stopped until explicitly
-// started.
-volatile bool ConcurrentMarkSweepThread::_should_stop    = true;
-
-SurrogateLockerThread*
-     ConcurrentMarkSweepThread::_slt = NULL;
-SurrogateLockerThread::SLT_msg_type
-     ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
-Monitor*
-     ConcurrentMarkSweepThread::_sltMonitor = NULL;
-
-ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
-  : ConcurrentGCThread() {
-  assert(UseConcMarkSweepGC,  "UseConcMarkSweepGC should be set");
-  assert(_cmst == NULL, "CMS thread already created");
-  _cmst = this;
-  assert(_collector == NULL, "Collector already set");
-  _collector = collector;
-
-  set_name("Concurrent Mark-Sweep GC Thread");
-
-  if (os::create_thread(this, os::cgc_thread)) {
-    // XXX: need to set this to low priority
-    // unless "agressive mode" set; priority
-    // should be just less than that of VMThread.
-    os::set_priority(this, NearMaxPriority);
-    if (!DisableStartThread) {
-      os::start_thread(this);
-    }
-  }
-  _sltMonitor = SLT_lock;
-  set_icms_enabled(CMSIncrementalMode);
-}
-
-void ConcurrentMarkSweepThread::run() {
-  assert(this == cmst(), "just checking");
-
-  this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
-  this->set_active_handles(JNIHandleBlock::allocate_block());
-  // From this time Thread::current() should be working.
-  assert(this == Thread::current(), "just checking");
-  if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
-    warning("Couldn't bind CMS thread to processor %u", CPUForCMSThread);
-  }
-  // Wait until Universe::is_fully_initialized()
-  {
-    CMSLoopCountWarn loopX("CMS::run", "waiting for "
-                           "Universe::is_fully_initialized()", 2);
-    MutexLockerEx x(CGC_lock, true);
-    set_CMS_flag(CMS_cms_wants_token);
-    // Wait until Universe is initialized and all initialization is completed.
-    while (!is_init_completed() && !Universe::is_fully_initialized() &&
-           !_should_terminate) {
-      CGC_lock->wait(true, 200);
-      loopX.tick();
-    }
-    // Wait until the surrogate locker thread that will do
-    // pending list locking on our behalf has been created.
-    // We cannot start the SLT thread ourselves since we need
-    // to be a JavaThread to do so.
-    CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
-    while (_slt == NULL && !_should_terminate) {
-      CGC_lock->wait(true, 200);
-      loopY.tick();
-    }
-    clear_CMS_flag(CMS_cms_wants_token);
-  }
-
-  while (!_should_terminate) {
-    sleepBeforeNextCycle();
-    if (_should_terminate) break;
-    _collector->collect_in_background(false);  // !clear_all_soft_refs
-  }
-  assert(_should_terminate, "just checking");
-  // Check that the state of any protocol for synchronization
-  // between background (CMS) and foreground collector is "clean"
-  // (i.e. will not potentially block the foreground collector,
-  // requiring action by us).
-  verify_ok_to_terminate();
-  // Signal that it is terminated
-  {
-    MutexLockerEx mu(Terminator_lock,
-                     Mutex::_no_safepoint_check_flag);
-    assert(_cmst == this, "Weird!");
-    _cmst = NULL;
-    Terminator_lock->notify();
-  }
-  
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
-}
-
-#ifndef PRODUCT
-void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
-  assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
-           cms_thread_wants_cms_token()),
-         "Must renounce all worldly possessions and desires for nirvana");
-  _collector->verify_ok_to_terminate();
-}
-#endif
-
-// create and start a new ConcurrentMarkSweep Thread for given CMS generation
-ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
-  if (!_should_terminate) {
-    assert(cmst() == NULL, "start() called twice?");
-    ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
-    assert(cmst() == th, "Where did the just-created CMS thread go?");
-    return th;
-  }
-  return NULL;
-}
-
-void ConcurrentMarkSweepThread::stop() {
-  if (CMSIncrementalMode) {
-    // Disable incremental mode and wake up the thread so it notices the change.
-    disable_icms();
-    start_icms();
-  }
-  // it is ok to take late safepoints here, if needed
-  {
-    MutexLockerEx x(Terminator_lock);
-    _should_terminate = true;  
-  }
-  { // Now post a notify on CGC_lock so as to nudge
-    // CMS thread(s) that might be slumbering in
-    // sleepBeforeNextCycle.
-    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    CGC_lock->notify_all();
-  }
-  { // Now wait until (all) CMS thread(s) have exited
-    MutexLockerEx x(Terminator_lock);
-    while(cmst() != NULL) {
-      Terminator_lock->wait();
-    }
-  }  
-}
-
-void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
-  assert(tc != NULL, "Null ThreadClosure");
-  if (_cmst != NULL) {
-    tc->do_thread(_cmst);
-  }
-  assert(Universe::is_fully_initialized(), 
-         "Called too early, make sure heap is fully initialized");
-  if (_collector != NULL) {
-    AbstractWorkGang* gang = _collector->conc_workers();
-    if (gang != NULL) {
-      gang->threads_do(tc);
-    }
-  }
-}
-
-void ConcurrentMarkSweepThread::print_on(outputStream* st) const {
-  st->print("\"%s\" ", name());
-  Thread::print_on(st);
-  st->cr();
-}
-
-void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
-  if (_cmst != NULL) {
-    _cmst->print_on(st);
-  }
-  if (_collector != NULL) {
-    AbstractWorkGang* gang = _collector->conc_workers();
-    if (gang != NULL) {
-      gang->print_worker_threads_on(st);
-    }
-  }
-}
-
-void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
-  assert(UseConcMarkSweepGC, "just checking");
-
-  MutexLockerEx x(CGC_lock,
-                  Mutex::_no_safepoint_check_flag);
-  if (!is_cms_thread) {
-    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
-    CMSSynchronousYieldRequest yr;
-    while (CMS_flag_is_set(CMS_cms_has_token)) {
-      // indicate that we want to get the token
-      set_CMS_flag(CMS_vm_wants_token);
-      CGC_lock->wait(true);
-    }
-    // claim the token and proceed
-    clear_CMS_flag(CMS_vm_wants_token);
-    set_CMS_flag(CMS_vm_has_token);
-  } else {
-    assert(Thread::current()->is_ConcurrentGC_thread(),
-           "Not a CMS thread");
-    // The following barrier assumes there's only one CMS thread.
-    // This will need to be modified is there are more CMS threads than one.
-    while (CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token)) {
-      set_CMS_flag(CMS_cms_wants_token);
-      CGC_lock->wait(true);
-    }
-    // claim the token
-    clear_CMS_flag(CMS_cms_wants_token);
-    set_CMS_flag(CMS_cms_has_token);
-  }
-}
-
-void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
-  assert(UseConcMarkSweepGC, "just checking");
-
-  MutexLockerEx x(CGC_lock,
-                  Mutex::_no_safepoint_check_flag);
-  if (!is_cms_thread) {
-    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
-    assert(CMS_flag_is_set(CMS_vm_has_token), "just checking");
-    clear_CMS_flag(CMS_vm_has_token);
-    if (CMS_flag_is_set(CMS_cms_wants_token)) {
-      // wake-up a waiting CMS thread
-      CGC_lock->notify();
-    }
-    assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
-           "Should have been cleared");
-  } else {
-    assert(Thread::current()->is_ConcurrentGC_thread(),
-           "Not a CMS thread");
-    assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
-    clear_CMS_flag(CMS_cms_has_token);
-    if (CMS_flag_is_set(CMS_vm_wants_token)) {
-      // wake-up a waiting VM thread
-      CGC_lock->notify();
-    }
-    assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
-           "Should have been cleared");
-  }
-}
-
-// Wait until the next synchronous GC or a timeout, whichever is earlier.
-void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) {
-  MutexLockerEx x(CGC_lock,
-                  Mutex::_no_safepoint_check_flag);
-  set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
-  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t);
-  clear_CMS_flag(CMS_cms_wants_token);
-  assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
-         "Should not be set");
-}
-
-void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
-  while (!_should_terminate) {
-    if (CMSIncrementalMode) {
-      icms_wait();
-      return;
-    } else {
-      // Wait until the next synchronous GC or a timeout, whichever is earlier
-      wait_on_cms_lock(CMSWaitDuration);
-    }
-    // Check if we should start a CMS collection cycle
-    if (_collector->shouldConcurrentCollect()) {
-      return;
-    }
-    // .. collection criterion not yet met, let's go back 
-    // and wait some more
-  }
-}
-
-// Incremental CMS
-void ConcurrentMarkSweepThread::start_icms() {
-  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
-  MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
-  trace_state("start_icms");
-  _should_run = true;
-  iCMS_lock->notify_all();
-}
-
-void ConcurrentMarkSweepThread::stop_icms() {
-  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
-  MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
-  if (!_should_stop) {
-    trace_state("stop_icms");
-    _should_stop = true;
-    _should_run = false;
-    asynchronous_yield_request();
-    iCMS_lock->notify_all();
-  }
-}
-
-void ConcurrentMarkSweepThread::icms_wait() {
-  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
-  if (_should_stop && icms_enabled()) {
-    MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
-    trace_state("pause_icms");
-    _collector->stats().stop_cms_timer();
-    while(!_should_run && icms_enabled()) {
-      iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
-    }
-    _collector->stats().start_cms_timer();
-    _should_stop = false;
-    trace_state("pause_icms end");
-  }
-}
-
-// Note: this method, although exported by the ConcurrentMarkSweepThread,
-// which is a non-JavaThread, can only be called by a JavaThread.
-// Currently this is done at vm creation time (post-vm-init) by the
-// main/Primordial (Java)Thread.
-// XXX Consider changing this in the future to allow the CMS thread
-// itself to create this thread?
-void ConcurrentMarkSweepThread::makeSurrogateLockerThread(TRAPS) {
-  assert(UseConcMarkSweepGC, "SLT thread needed only for CMS GC");
-  assert(_slt == NULL, "SLT already created");
-  _slt = SurrogateLockerThread::make(THREAD);
-}
--- a/hotspot/src/share/vm/runtime/concurrentMarkSweepThread.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,232 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)concurrentMarkSweepThread.hpp	1.38 07/05/05 17:06:46 JVM"
-#endif
-/*
- * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class ConcurrentMarkSweepGeneration;
-class CMSCollector;
-
-// The Concurrent Mark Sweep GC Thread (could be several in the future).
-class ConcurrentMarkSweepThread: public ConcurrentGCThread {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepGeneration;   // XXX should remove friendship
-  friend class CMSCollector;
- public:
-  virtual void run();
-
- private:
-  static ConcurrentMarkSweepThread*     _cmst;
-  static CMSCollector*                  _collector;
-  static SurrogateLockerThread*         _slt;
-  static SurrogateLockerThread::SLT_msg_type _sltBuffer;
-  static Monitor*                       _sltMonitor;
-
-  ConcurrentMarkSweepThread*            _next;
-
-  static bool _should_terminate;
-
-  enum CMS_flag_type {
-    CMS_nil             = NoBits,
-    CMS_cms_wants_token = nth_bit(0),
-    CMS_cms_has_token   = nth_bit(1),
-    CMS_vm_wants_token  = nth_bit(2),
-    CMS_vm_has_token    = nth_bit(3)
-  };
-
-  static int _CMS_flag;
-
-  static bool CMS_flag_is_set(int b)        { return (_CMS_flag & b) != 0;   }
-  static bool set_CMS_flag(int b)           { return (_CMS_flag |= b) != 0;  }
-  static bool clear_CMS_flag(int b)         { return (_CMS_flag &= ~b) != 0; }
-  void sleepBeforeNextCycle();
-
-  // CMS thread should yield for a young gen collection, direct allocation,
-  // and iCMS activity.
-  static char _pad_1[64 - sizeof(jint)];    // prevent cache-line sharing
-  static volatile jint _pending_yields;
-  static volatile jint _pending_decrements; // decrements to _pending_yields
-  static char _pad_2[64 - sizeof(jint)];    // prevent cache-line sharing
-
-  // Tracing messages, enabled by CMSTraceThreadState.
-  static inline void trace_state(const char* desc);
-
-  static volatile bool _icms_enabled;	// iCMS enabled?
-  static volatile bool _should_run;	// iCMS may run
-  static volatile bool _should_stop;	// iCMS should stop
-
-  // debugging
-  void verify_ok_to_terminate() const PRODUCT_RETURN;
-
- public:
-  // Constructor
-  ConcurrentMarkSweepThread(CMSCollector* collector);
-
-  static void makeSurrogateLockerThread(TRAPS);
-  static SurrogateLockerThread* slt() { return _slt; }
-
-  // Tester
-  bool is_ConcurrentGC_thread() const { return true;       }
-
-  static void threads_do(ThreadClosure* tc);
-
-  // Printing
-  void print_on(outputStream* st) const;
-  void print() const 				      { print_on(tty); }
-  static void print_all_on(outputStream* st);
-  static void print_all() 			      { print_all_on(tty); }
-
-  // Returns the CMS Thread
-  static ConcurrentMarkSweepThread* cmst()    { return _cmst; }
-  static CMSCollector*         collector()    { return _collector;  }
-
-  // Create and start the CMS Thread, or stop it on shutdown
-  static ConcurrentMarkSweepThread* start(CMSCollector* collector);
-  static void stop();
-  static bool should_terminate() { return _should_terminate; }
-
-  // Synchronization using CMS token
-  static void synchronize(bool is_cms_thread);
-  static void desynchronize(bool is_cms_thread);
-  static bool vm_thread_has_cms_token() {
-    return CMS_flag_is_set(CMS_vm_has_token);
-  }
-  static bool cms_thread_has_cms_token() {
-    return CMS_flag_is_set(CMS_cms_has_token);
-  }
-  static bool vm_thread_wants_cms_token() {
-    return CMS_flag_is_set(CMS_vm_wants_token);
-  }
-  static bool cms_thread_wants_cms_token() {
-    return CMS_flag_is_set(CMS_cms_wants_token);
-  }
-
-  // Wait on CMS lock until the next synchronous GC 
-  // or given timeout, whichever is earlier.
-  void    wait_on_cms_lock(long t); // milliseconds
-
-  // The CMS thread will yield during the work portion of it's cycle
-  // only when requested to.  Both synchronous and asychronous requests
-  // are provided.  A synchronous request is used for young gen
-  // collections and direct allocations.  The requesting thread increments
-  // pending_yields at the beginning of an operation, and decrements it when
-  // the operation is completed.  The CMS thread yields when pending_yields
-  // is positive.  An asynchronous request is used by iCMS in the stop_icms()
-  // operation. A single yield satisfies the outstanding asynch yield requests.
-  // The requesting thread increments both pending_yields and pending_decrements.
-  // After yielding, the CMS thread decrements both by the amount in
-  // pending_decrements.
-  // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
-  // we cannot easily test that invariant, since the counters are manipulated via
-  // atomic instructions without explicit locking and we cannot read
-  // the two counters atomically together: one suggestion is to
-  // use (for example) 16-bit counters so as to be able to read the
-  // two counters atomically even on 32-bit platforms. Notice that
-  // the second assert in acknowledge_yield_request() does indeed
-  // check a form of the above invariant, albeit indirectly.
-
-  static void increment_pending_yields()   {
-    Atomic::inc(&_pending_yields);
-    assert(_pending_yields >= 0, "can't be negative");
-  }
-  static void decrement_pending_yields()   {
-    Atomic::dec(&_pending_yields);
-    assert(_pending_yields >= 0, "can't be negative");
-  }
-  static void asynchronous_yield_request() {
-    increment_pending_yields();
-    Atomic::inc(&_pending_decrements);
-    assert(_pending_decrements >= 0, "can't be negative");
-  }
-  static void acknowledge_yield_request() {
-    jint decrement = _pending_decrements;
-    if (decrement > 0) {
-      // Order important to preserve: _pending_yields >= _pending_decrements
-      Atomic::add(-decrement, &_pending_decrements);
-      Atomic::add(-decrement, &_pending_yields);
-      assert(_pending_decrements >= 0, "can't be negative");
-      assert(_pending_yields >= 0, "can't be negative");
-    }
-  }
-  static bool should_yield()   { return _pending_yields > 0; }
-
-  // CMS incremental mode.
-  static void start_icms(); // notify thread to start a quantum of work
-  static void stop_icms();  // request thread to stop working
-  void icms_wait();	    // if asked to stop, wait until notified to start
-
-  // Incremental mode is enabled globally by the flag CMSIncrementalMode.  It
-  // must also be enabled/disabled dynamically to allow foreground collections.
-  static inline void enable_icms()              { _icms_enabled = true; }
-  static inline void disable_icms()             { _icms_enabled = false; }
-  static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
-  static inline bool icms_enabled()             { return _icms_enabled; } 
-};
-
-inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
-  if (CMSTraceThreadState) {
-    char buf[128];
-    TimeStamp& ts = gclog_or_tty->time_stamp();
-    if (!ts.is_updated()) {
-      ts.update();
-    }
-    jio_snprintf(buf, sizeof(buf), " [%.3f:  CMSThread %s] ",
-		 ts.seconds(), desc);
-    buf[sizeof(buf) - 1] = '\0';
-    gclog_or_tty->print(buf);
-  }
-}
-
-// For scoped increment/decrement of yield requests
-class CMSSynchronousYieldRequest: public StackObj {
- public:
-  CMSSynchronousYieldRequest() {
-    ConcurrentMarkSweepThread::increment_pending_yields();
-  }
-  ~CMSSynchronousYieldRequest() {
-    ConcurrentMarkSweepThread::decrement_pending_yields();
-  }
-};
-
-// Used to emit a warning in case of unexpectedly excessive
-// looping (in "apparently endless loops") in CMS code.
-class CMSLoopCountWarn: public StackObj {
- private:
-  const char* _src;
-  const char* _msg;
-  const intx  _threshold;
-  intx        _ticks;
-
- public:
-  inline CMSLoopCountWarn(const char* src, const char* msg,
-                          const intx threshold) :
-    _src(src), _msg(msg), _threshold(threshold), _ticks(0) { }
-
-  inline void tick() {
-    _ticks++;
-    if (CMSLoopWarn && _ticks % _threshold == 0) {
-      warning("%s has looped %d times %s", _src, _ticks, _msg);
-    }
-  }
-};
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)deoptimization.cpp	1.281 07/05/05 17:06:45 JVM"
+#pragma ident "@(#)deoptimization.cpp	1.282 07/05/17 16:05:24 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1474,8 +1474,9 @@
   juint total = total_deoptimization_count();
   juint account = total;
   if (total != 0) {
+    ttyLocker ttyl;
+    if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
     tty->print_cr("Deoptimization traps recorded:");
-    ttyLocker ttyl;
     #define PRINT_STAT_LINE(name, r) \
       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
     PRINT_STAT_LINE("total", total);
@@ -1506,6 +1507,7 @@
       PRINT_STAT_LINE("unaccounted", account);
     }
     #undef PRINT_STAT_LINE
+    if (xtty != NULL)  xtty->tail("statistics");
   }
 }
 #else // COMPILER2
--- a/hotspot/src/share/vm/runtime/globals.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)globals.cpp	1.48 07/05/05 17:06:43 JVM"
+#pragma ident "@(#)globals.cpp	1.49 07/05/17 16:05:26 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -38,10 +38,6 @@
                  MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
                  MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
 
-#ifdef JVMPI_SUPPORT
-RUNTIME_JVMPI_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG)
-#endif // JVMPI_SUPPORT
-
 bool Flag::is_unlocker() const {
   return strcmp(name, "UnlockDiagnosticVMOptions") == 0;
 }
@@ -154,6 +150,7 @@
 
 #define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 product}", DEFAULT },
 #define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd product}", DEFAULT },
+#define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 diagnostic}", DEFAULT },
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ 
   #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */ 
@@ -168,14 +165,11 @@
 static Flag flagTable[] = {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
  RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT)
-#ifdef JVMPI_SUPPORT
- RUNTIME_JVMPI_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT)
-#endif // JVMPI_SUPPORT
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
 #ifdef COMPILER2
- C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
+ C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
 #endif
  {0, NULL, NULL}
 };
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)globals.hpp	1.961 07/05/05 17:06:48 JVM"
+#pragma ident "@(#)globals.hpp	1.962 07/05/17 16:05:39 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -416,6 +416,9 @@
           "inline Object::hashCode() native that is known to be part "      \
           "of base library DLL")                                            \
                                                                             \
+  develop(bool, InlineObjectCopy, true,                                     \
+          "inline Object.clone and Arrays.copyOf[Range] intrinsics")        \
+                                                                            \
   develop(bool, InlineNatives, true,                                        \
           "inline natives that are known to be part of base library DLL")   \
                                                                             \
@@ -919,9 +922,15 @@
   product(bool, LazyBootClassLoader, true,                                  \
           "Enable/disable lazy opening of boot class path entries")         \
                                                                             \
+  diagnostic(bool, UseIncDec, true,                                         \
+          "Use INC, DEC instructions on x86")                               \
+                                                                            \
   product(bool, UseStoreImmI16, true,                                       \
           "Use store immediate 16-bits value instruction on x86")           \
                                                                             \
+  product(bool, UseAddressNop, false,                                       \
+          "Use '0F 1F [addr]' NOP instructions on x86 cpus")                \
+                                                                            \
   product(intx, FieldsAllocationStyle, 1,                                   \
           "0 - type based with oops first, 1 - with oops last")             \
                                                                             \
@@ -937,7 +946,7 @@
   product(intx, BiasedLockingStartupDelay, 4000,                            \
           "Number of milliseconds to wait before enabling biased locking")  \
                                                                             \
-  develop(bool, PrintBiasedLockingStatistics, false,                        \
+  diagnostic(bool, PrintBiasedLockingStatistics, false,                     \
           "Print statistics of biased locking in JVM")                      \
                                                                             \
   product(intx, BiasedLockingBulkRebiasThreshold, 20,                       \
@@ -1163,6 +1172,11 @@
           "A System.gc() request invokes a concurrent collection;"          \
           " (effective only when UseConcMarkSweepGC)")                      \
                                                                             \
+  product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false,        \
+          "A System.gc() request invokes a concurrent collection and"       \
+          " also unloads classes during such a concurrent gc cycle  "       \
+          " (effective only when UseConcMarkSweepGC)")                      \
+                                                                            \
   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
           "Use Adaptive Free Lists in the CMS generation")      	    \
                                                                             \
@@ -1369,9 +1383,6 @@
           "Whether concurrent precleaning enabled in perm gen"              \
           " (effective only when CMSPrecleaningEnabled is true)")           \
                                                                             \
-  product(bool, CMSPermGenSweepingEnabled, false,                           \
-          "Whether sweeping of perm gen is enabled")                        \
-                                                                            \
   product(bool, CMSPrecleaningEnabled, true,                                \
           "Whether concurrent precleaning enabled")                         \
                                                                             \
@@ -1992,9 +2003,18 @@
   notproduct(bool, TimeCompiler2, false,                                    \
           "detailed time the compiler (requires +TimeCompiler)")            \
                                                                             \
-  develop(bool, PrintInlining, false,                                       \
+  diagnostic(bool, PrintInlining, false,                                    \
           "prints inlining optimizations")                                  \
                                                                             \
+  diagnostic(bool, PrintIntrinsics, false,                                  \
+          "prints attempted and successful inlining of intrinsics")         \
+                                                                            \
+  diagnostic(ccstr, DisableIntrinsic, "",                                   \
+          "do not expand intrinsics whose (internal) names appear here")    \
+                                                                            \
+  develop(bool, StressReflectiveCode, false,                                \
+          "Use inexact types at allocations, etc., to test reflection")     \
+                                                                            \
   develop(bool, EagerInitialization, false,                                 \
           "Eagerly initialize classes if possible")                         \
                                                                             \
@@ -3151,29 +3171,4 @@
 
 RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
 
-#ifdef JVMPI_SUPPORT
 
-#define RUNTIME_JVMPI_FLAGS(develop, product) \
-  develop(bool, TraceJVMPI, false,                                          \
-          "Trace JVMPI")                                                    \
-                                                                            \
-  product(bool, EnableJVMPIInstructionStartEvent, false,                    \
-          "Enable JVMPI_EVENT_INSTRUCTION_START events - slows down "       \
-          "interpretation")                                                 \
-                                                                            \
-  product(bool, JVMPICheckGCCompatibility, true,                            \
-          "If JVMPI is used, make sure that we are using a "                \
-          " JVMPI-compatible garbage collector ")                           \
-                                                                            \
-  product(bool, UseSuspendResumeThreadLists, true,                          \
-          "Enable SuspendThreadList and ResumeThreadList")                  \
-                                                                            \
-  /* flags for slow deprecation */                                          \
-                                                                            \
-  product(bool, UseUnsupportedDeprecatedJVMPI, false,                       \
-          "Flag to temporarily re-enable the, soon to be removed, "         \
-          "experimental interface JVMPI.")
-
-RUNTIME_JVMPI_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG)
-#endif // JVMPI_SUPPORT
-
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)globals_extension.hpp	1.16 07/05/05 17:06:48 JVM"
+#pragma ident "@(#)globals_extension.hpp	1.17 07/05/17 16:05:46 JVM"
 #endif
 /*
  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -60,6 +60,7 @@
 
 #define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
+#define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */ 
   #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */ 
@@ -83,7 +84,7 @@
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER,
-          C2_NOTPRODUCT_FLAG_MEMBER)
+          C2_DIAGNOSTIC_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
 #endif
  NUM_CommandLineFlag
 } CommandLineFlag;
@@ -122,6 +123,7 @@
 
 #define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
 #define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */ 
   #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */ 
@@ -149,7 +151,7 @@
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER_WITH_TYPE, C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C2_PRODUCT_FLAG_MEMBER_WITH_TYPE,
-          C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+          C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
  NUM_CommandLineFlagWithType
 } CommandLineFlagWithType;
--- a/hotspot/src/share/vm/runtime/hpi.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/hpi.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)hpi.cpp	1.17 07/05/05 17:06:42 JVM"
+#pragma ident "@(#)hpi.cpp	1.18 07/05/17 16:05:48 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -43,15 +43,9 @@
   unimplemented_panic,
   unimplemented_monitorRegister,
   
-#ifdef JVMPI_SUPPORT
-  NULL, // jvmpi_monitor_contended_enter,
-  NULL, // jvmpi_monitor_contended_entered,
-  NULL  // jvmpi_monitor_contended_exit,
-#else // !JVMPI_SUPPORT
   NULL, // unused
   NULL, // unused
   NULL  // unused
-#endif // JVMPI_SUPPORT
 };
 
 GetInterfaceFunc        hpi::_get_interface = NULL;
--- a/hotspot/src/share/vm/runtime/init.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/init.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)init.cpp	1.120 07/05/05 17:06:49 JVM"
+#pragma ident "@(#)init.cpp	1.121 07/05/17 16:05:50 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -69,11 +69,7 @@
 void stubRoutines_init2(); // note: StubRoutines need 2-phase init 
 
 // Do not disable thread-local-storage, as it is important for some
-#ifdef JVMPI_SUPPORT
-// JNI/JVM/JVMPI/JVMTI functions and signal handlers to work properly
-#else // !JVMPI_SUPPORT
 // JNI/JVM/JVMTI functions and signal handlers to work properly
-#endif // JVMPI_SUPPORT
 // during VM shutdown
 void perfMemory_exit();
 void ostream_exit();
--- a/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)interfaceSupport.hpp	1.175 07/05/05 17:06:50 JVM"
+#pragma ident "@(#)interfaceSupport.hpp	1.176 07/05/17 16:05:52 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -569,25 +569,3 @@
 
 
 #define JVM_END } }
-#ifdef JVMPI_SUPPORT
-
-// Definitions for JVMPI
-
-#define JVMPI_ENTRY(result_type, header)                             \
-  result_type header {                                               \
-    Thread* THREAD = Thread::current();                              \
-    ThreadInVMfromUnknown __tiv;                                     \
-    HandleMark __hm;                                                 \
-    TRACE_CALL(result_type, header)                                  \
-    /* body */
-
-#define JVMPI_END  }
-
-#define JVMPI_RAW_ENTRY(result_type, header)                         \
-  result_type header {                                               \
-    TRACE_CALL(result_type, header)                                  \
-    Thread* THREAD = Thread::current();                              \
-    /* body */
-
-#define JVMPI_RAW_END  }
-#endif // JVMPI_SUPPORT
--- a/hotspot/src/share/vm/runtime/java.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/java.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)java.cpp	1.219 07/05/05 17:06:50 JVM"
+#pragma ident "@(#)java.cpp	1.220 07/05/17 16:06:10 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -183,20 +183,16 @@
 #ifdef COMPILER2
   if ((PrintOptoStatistics || LogVMOutput || LogCompilation) && UseCompiler) {
     FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintOptoStatistics);
-    Parse::print_statistics();
-    PhaseCCP::print_statistics();
-    PhaseRegAlloc::print_statistics();
-    Scheduling::print_statistics();
+    Compile::print_statistics();
+#ifndef COMPILER1
     Deoptimization::print_statistics();
     nmethod::print_statistics();
+#endif //COMPILER1
     SharedRuntime::print_statistics();
-    PhasePeephole::print_statistics();
-    PhaseIdealLoop::print_statistics();
-
     os::print_statistics();
   }
 
-  if (PrintLockStatistics) {
+  if (PrintLockStatistics || PrintPreciseBiasedLockingStatistics) {
     OptoRuntime::print_named_counters();
   }
 
@@ -294,6 +290,14 @@
   if (CITime) {
     CompileBroker::print_times();
   }
+#ifdef COMPILER2
+  if (PrintPreciseBiasedLockingStatistics) {
+    OptoRuntime::print_named_counters();
+  }
+#endif
+  if (PrintBiasedLockingStatistics) {
+    BiasedLocking::print_counters();
+  }
 }
 
 #endif
@@ -347,11 +351,7 @@
   #define BEFORE_EXIT_DONE    2
   static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN;
 
-#ifdef JVMPI_SUPPORT
-  // Note: don't use a Mutex to guard the entire before_exit(), as JVMPI and
-#else // !JVMPI_SUPPORT
   // Note: don't use a Mutex to guard the entire before_exit(), as
-#endif // JVMPI_SUPPORT
   // JVMTI post_thread_end_event and post_vm_death_event will run native code. 
   // A CAS or OSMutex would work just fine but then we need to manipulate 
   // thread state for Safepoint. Here we use Monitor wait() and notify_all() 
@@ -437,29 +437,8 @@
   // may be attached late and JVMTI must track phases of VM execution
   JvmtiExport::post_vm_death();
 
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::enabled()) {
-    // Notify the profiler this thread is about to die, so it can stop
-    // querying us - we don't want to get caught in a suspend-query-resume
-    // operation when we disengage JVMPI.
-    if (jvmpi::is_event_enabled(JVMPI_EVENT_THREAD_END)) {
-      jvmpi::post_thread_end_event(thread);
-    }
-
-    if (jvmpi::is_event_enabled(JVMPI_EVENT_JVM_SHUT_DOWN)) {
-      jvmpi::post_vm_death_event();
-    }
-
-    // Do not accept new JVMPI requests or post new events
-    jvmpi::disengage();
-  }
-#endif // JVMPI_SUPPORT
-
   // Terminate the signal thread
   // Note: we don't wait until it actually dies.
-#ifdef JVMPI_SUPPORT
-  // Do this after JVMPI is disengaged, see 4513141
-#endif // JVMPI_SUPPORT
   os::terminate_signal_thread();
 
   print_statistics();
--- a/hotspot/src/share/vm/runtime/javaAssertions.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,210 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)javaAssertions.cpp	1.14 07/05/05 17:06:50 JVM"
-#endif
-/*
- * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_javaAssertions.cpp.incl"
-
-bool				JavaAssertions::_userDefault = false;
-bool				JavaAssertions::_sysDefault = false;
-JavaAssertions::OptionList*	JavaAssertions::_classes = 0;
-JavaAssertions::OptionList*	JavaAssertions::_packages = 0;
-
-JavaAssertions::OptionList::OptionList(const char* name, bool enabled,
-  OptionList* next) {
-  assert(name != 0, "need a name");
-  _name = name;
-  _enabled = enabled;
-  _next = next;
-}
-
-int JavaAssertions::OptionList::count(OptionList* p) {
-  int rc;
-  for (rc = 0; p != 0; p = p->next(), ++rc) /* empty */;
-  return rc;
-}
-
-void JavaAssertions::addOption(const char* name, bool enable) {
-  assert(name != 0, "must have a name");
-
-  // Copy the name.  The storage needs to exist for the the lifetime of the vm;
-  // it is never freed, so will be leaked (along with other option strings -
-  // e.g., bootclasspath) if a process creates/destroys multiple VMs.
-  int len = (int)strlen(name);
-  char *name_copy = NEW_C_HEAP_ARRAY(char, len + 1);
-  strcpy(name_copy, name);
-
-  // Figure out which list the new item should go on.  Names that end in "..."
-  // go on the package tree list.
-  OptionList** head = &_classes;
-  if (len >= 3 && strcmp(name_copy + len - 3, "...") == 0) {
-    // Delete the "...".
-    len -= 3;
-    name_copy[len] = '\0';
-    head = &_packages;
-  }
-
-  // Convert class/package names to internal format.  Will have to convert back
-  // when copying to java in createJavaAssertionStatusDirectives, but that
-  // should happen only once.  Alternative would require that
-  // JVM_DesiredAssertionStatus pass the external_name() to
-  // JavaAssertion::enabled(), but that is done once per loaded class.
-  for (int i = 0; i < len; ++i) {
-    if (name_copy[i] == '.') name_copy[i] = '/';
-  }
-
-  if (TraceJavaAssertions) {
-    tty->print_cr("JavaAssertions: adding %s %s=%d",
-      head == &_classes ? "class" : "package",
-      name_copy[0] != '\0' ? name_copy : "'default'",
-      enable);
-  }
-
-  // Prepend a new item to the list.  Items added later take precedence, so
-  // prepending allows us to stop searching the list after the first match.
-  *head = new OptionList(name_copy, enable, *head);
-}
-
-oop JavaAssertions::createAssertionStatusDirectives(TRAPS) {
-  symbolHandle asd_sym = vmSymbolHandles::java_lang_AssertionStatusDirectives();
-  klassOop k = SystemDictionary::resolve_or_fail(asd_sym, true, CHECK_NULL);
-  instanceKlassHandle asd_klass (THREAD, k);
-  asd_klass->initialize(CHECK_NULL);
-  Handle h = asd_klass->allocate_instance_handle(CHECK_NULL);
-
-  int len;
-  typeArrayOop t;
-  len = OptionList::count(_packages);
-  objArrayOop pn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
-  objArrayHandle pkgNames (THREAD, pn);
-  t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
-  typeArrayHandle pkgEnabled(THREAD, t);
-  fillJavaArrays(_packages, len, pkgNames, pkgEnabled, CHECK_NULL);
-
-  len = OptionList::count(_classes);
-  objArrayOop cn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
-  objArrayHandle classNames (THREAD, cn);
-  t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
-  typeArrayHandle classEnabled(THREAD, t);
-  fillJavaArrays(_classes, len, classNames, classEnabled, CHECK_NULL);
-
-  java_lang_AssertionStatusDirectives::set_packages(h(), pkgNames());
-  java_lang_AssertionStatusDirectives::set_packageEnabled(h(), pkgEnabled());
-  java_lang_AssertionStatusDirectives::set_classes(h(), classNames());
-  java_lang_AssertionStatusDirectives::set_classEnabled(h(), classEnabled());
-  java_lang_AssertionStatusDirectives::set_deflt(h(), userClassDefault());
-  return h();
-}
-
-void JavaAssertions::fillJavaArrays(const OptionList* p, int len,
-objArrayHandle names, typeArrayHandle enabled, TRAPS) {
-  // Fill in the parallel names and enabled (boolean) arrays.  Start at the end
-  // of the array and work backwards, so the order of items in the arrays
-  // matches the order on the command line (the list is in reverse order, since
-  // it was created by prepending successive items from the command line).
-  int index;
-  for (index = len - 1; p != 0; p = p->next(), --index) {
-    assert(index >= 0, "length does not match list");
-    Handle s = java_lang_String::create_from_str(p->name(), CHECK);
-    s = java_lang_String::char_converter(s, '/', '.', CHECK);
-    names->obj_at_put(index, s());
-    enabled->bool_at_put(index, p->enabled());
-  }
-  assert(index == -1, "length does not match list");
-}
-
-inline JavaAssertions::OptionList*
-JavaAssertions::match_class(const char* classname) {
-  for (OptionList* p = _classes; p != 0; p = p->next()) {
-    if (strcmp(p->name(), classname) == 0) {
-      return p;
-    }
-  }
-  return 0;
-}
-
-JavaAssertions::OptionList*
-JavaAssertions::match_package(const char* classname) {
-  // Search the package list for any items that apply to classname.  Each
-  // sub-package in classname is checked, from most-specific to least, until one
-  // is found.
-  if (_packages == 0) return 0;
-
-  // Find the length of the "most-specific" package in classname.  If classname
-  // does not include a package, length will be 0 which will match items for the
-  // default package (from options "-ea:..."  or "-da:...").
-  size_t len = strlen(classname);
-  for (/* empty */; len > 0 && classname[len] != '/'; --len) /* empty */;
-
-  do {
-    assert(len == 0 || classname[len] == '/', "not a package name");
-    for (OptionList* p = _packages; p != 0; p = p->next()) {
-      if (strncmp(p->name(), classname, len) == 0 && p->name()[len] == '\0') {
-	return p;
-      }
-    }
-
-    // Find the length of the next package, taking care to avoid decrementing
-    // past 0 (len is unsigned).
-    while (len > 0 && classname[--len] != '/') /* empty */;
-  } while (len > 0);
-
-  return 0;
-}
-
-inline void JavaAssertions::trace(const char* name,
-const char* typefound, const char* namefound, bool enabled) {
-  if (TraceJavaAssertions) {
-    tty->print_cr("JavaAssertions:  search for %s found %s %s=%d",
-      name, typefound, namefound[0] != '\0' ? namefound : "'default'", enabled);
-  }
-}
-
-bool JavaAssertions::enabled(const char* classname, bool systemClass) {
-  assert(classname != 0, "must have a classname");
-
-  // This will be slow if the number of assertion options on the command line is
-  // large--it traverses two lists, one of them multiple times.  Could use a
-  // single n-ary tree instead of lists if someone ever notices.
-
-  // First check options that apply to classes.  If we find a match we're done.
-  OptionList* p;
-  if (p = match_class(classname)) {
-    trace(classname, "class", p->name(), p->enabled());
-    return p->enabled();
-  }
-
-  // Now check packages, from most specific to least.
-  if (p = match_package(classname)) {
-    trace(classname, "package", p->name(), p->enabled());
-    return p->enabled();
-  }
-
-  // No match.  Return the default status.
-  bool result = systemClass ? systemClassDefault() : userClassDefault();
-  trace(classname, systemClass ? "system" : "user", "default", result);
-  return result;
-}
--- a/hotspot/src/share/vm/runtime/javaAssertions.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)javaAssertions.hpp	1.11 07/05/05 17:06:50 JVM"
-#endif
-/*
- * Copyright 2000 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class JavaAssertions: AllStatic {
-public:
-  static inline bool userClassDefault();
-  static inline void setUserClassDefault(bool enabled);
-  static inline bool systemClassDefault();
-  static inline void setSystemClassDefault(bool enabled);
-
-  // Add a command-line option.  A name ending in "..." applies to a package and
-  // any subpackages; other names apply to a single class.
-  static void addOption(const char* name, bool enable);
-
-  // Return true if command-line options have enabled assertions for the named
-  // class.  Should be called only after all command-line options have been
-  // processed.  Note:  this only consults command-line options and does not
-  // account for any dynamic changes to assertion status.
-  static bool enabled(const char* classname, bool systemClass);
-
-  // Create an instance of java.lang.AssertionStatusDirectives and fill in the
-  // fields based on the command-line assertion options.
-  static oop createAssertionStatusDirectives(TRAPS);
-
-private:
-  class OptionList;
-  static void fillJavaArrays(const OptionList* p, int len, objArrayHandle names,
-    typeArrayHandle status, TRAPS);
-
-  static inline void trace(const char* name, const char* typefound,
-    const char* namefound, bool enabled);
-
-  static inline OptionList*	match_class(const char* classname);
-  static OptionList*		match_package(const char* classname);
-
-  static bool		_userDefault;	// User class default (-ea/-da).
-  static bool		_sysDefault;	// System class default (-esa/-dsa).
-  static OptionList*	_classes;	// Options for classes.
-  static OptionList*	_packages;	// Options for package trees.
-};
-
-class JavaAssertions::OptionList: public CHeapObj {
-public:
-  inline OptionList(const char* name, bool enable, OptionList* next);
-
-  inline const char*	name() const	{ return _name; }
-  inline bool		enabled() const	{ return _enabled; }
-  inline OptionList*	next() const	{ return _next; }
-
-  static int count(OptionList* p);
-
-private:
-  const char*	_name;
-  OptionList*	_next;
-  bool		_enabled;
-};
-
-inline bool JavaAssertions::userClassDefault() {
-  return _userDefault;
-}
-
-inline void JavaAssertions::setUserClassDefault(bool enabled) {
-  if (TraceJavaAssertions)
-    tty->print_cr("JavaAssertions::setUserClassDefault(%d)", enabled);
-  _userDefault = enabled;
-}
-
-inline bool JavaAssertions::systemClassDefault() {
-  return _sysDefault;
-}
-
-inline void JavaAssertions::setSystemClassDefault(bool enabled) {
-  if (TraceJavaAssertions)
-    tty->print_cr("JavaAssertions::setSystemClassDefault(%d)", enabled);
-  _sysDefault = enabled;
-}
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)jniHandles.cpp	1.63 07/05/05 17:06:51 JVM"
+#pragma ident "@(#)jniHandles.cpp	1.64 07/05/17 16:06:13 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -68,11 +68,7 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-jobject JNIHandles::make_global(Handle obj, bool post_jvmpi_event) {  
-#else // !JVMPI_SUPPORT
 jobject JNIHandles::make_global(Handle obj) {  
-#endif // JVMPI_SUPPORT
   jobject res = NULL;
   if (!obj.is_null()) {
     // ignore null handles
@@ -83,11 +79,6 @@
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
 
-#ifdef JVMPI_SUPPORT
-  if (Universe::jvmpi_jni_global_alloc_event_enabled()) {
-    jvmpi::post_new_globalref_event(res, obj(), post_jvmpi_event);
-  }
-#endif // JVMPI_SUPPORT
   return res;
 }
 
@@ -102,31 +93,11 @@
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
-#ifdef JVMPI_SUPPORT
-  if (Universe::jvmpi_jni_weak_global_alloc_event_enabled()) {
-    jvmpi::post_new_weakref_event(res, obj());
-  }
-#endif // JVMPI_SUPPORT
   return res;
 }
 
 jmethodID JNIHandles::make_jmethod_id(methodHandle mh) {
-#ifdef JVMPI_SUPPORT
-  // once JVMPI goes away, this can be just -- return (jmethodID) make_weak_global(mh);
-  // Below is make_weak_global without the JVMPI code
-  jobject res = NULL;
-  if (!mh.is_null()) {
-    // ignore null handles
-    MutexLocker ml(JNIGlobalHandle_lock);
-    assert(Universe::heap()->is_in_reserved(mh()), "sanity check");
-    res = _weak_global_handles->allocate_handle(mh());   
-  } else {
-    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
-  }
-  return (jmethodID)res;
-#else // !JVMPI_SUPPORT
   return (jmethodID) make_weak_global(mh);
-#endif // JVMPI_SUPPORT
 }
 
 
@@ -139,14 +110,7 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-void JNIHandles::destroy_global(jobject handle, bool post_jvmpi_event) {
-  if (Universe::jvmpi_jni_global_free_event_enabled()) {
-    jvmpi::post_delete_globalref_event(handle, post_jvmpi_event);
-  }
-#else // !JVMPI_SUPPORT
 void JNIHandles::destroy_global(jobject handle) {
-#endif // JVMPI_SUPPORT
   if (handle != NULL) {
     assert(is_global_handle(handle), "Invalid delete of global JNI handle");
     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
@@ -155,11 +119,6 @@
 
 
 void JNIHandles::destroy_weak_global(jobject handle) {
-#ifdef JVMPI_SUPPORT
-  if (Universe::jvmpi_jni_weak_global_free_event_enabled()) {
-    jvmpi::post_delete_weakref_event(handle);
-  }
-#endif // JVMPI_SUPPORT
   if (handle != NULL) {
     assert(!CheckJNICalls || is_weak_global_handle(handle), "Invalid delete of weak global JNI handle");
     *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
@@ -167,16 +126,7 @@
 }
 
 void JNIHandles::destroy_jmethod_id(jmethodID mid) {
-#ifdef JVMPI_SUPPORT
-  // once JVMPI goes away, this can be just -- destroy_weak_global((jobject)mid);
-  // Below is destroy_weak_global without the JVMPI code
-  if (mid != NULL) {
-    assert(!CheckJNICalls || is_weak_global_handle((jobject)mid), "Invalid delete of jmethodID");
-    *((oop*)mid) = deleted_handle(); // Mark the jmethodID as deleted, allocate will reuse it
-  }
-#else // !JVMPI_SUPPORT
   destroy_weak_global((jobject)mid);
-#endif // JVMPI_SUPPORT
 }
 
 
--- a/hotspot/src/share/vm/runtime/jniHandles.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/jniHandles.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)jniHandles.hpp	1.53 07/05/05 17:06:51 JVM"
+#pragma ident "@(#)jniHandles.hpp	1.54 07/05/17 16:06:14 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -52,13 +52,8 @@
   inline static void destroy_local(jobject handle);
 
   // Global handles
-#ifdef JVMPI_SUPPORT
-  static jobject make_global(Handle  obj,    bool post_jvmpi_event = true);
-  static void destroy_global(jobject handle, bool post_jvmpi_event = true);
-#else // !JVMPI_SUPPORT
   static jobject make_global(Handle  obj);
   static void destroy_global(jobject handle);
-#endif // JVMPI_SUPPORT
 
   // Weak global handles
   static jobject make_weak_global(Handle obj);
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)mutexLocker.hpp	1.148 07/05/05 17:06:51 JVM"
+#pragma ident "@(#)mutexLocker.hpp	1.149 07/05/17 16:06:16 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -81,9 +81,6 @@
 extern Monitor* ProfileVM_lock;                  // a lock used for profiling the VMThread
 extern Mutex*   ProfilePrint_lock;               // a lock used to serialize the printing of profiles
 extern Mutex*   ExceptionCache_lock;             // a lock used to synchronize exception cache updates
-#ifdef JVMPI_SUPPORT
-extern Monitor* ObjAllocPost_lock;               // a lock used to synchronize VMThread JVM/PI OBJ_ALLOC event posting
-#endif // JVMPI_SUPPORT
 extern Mutex*   OsrList_lock;                    // a lock used to serialize access to OSR queues
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)objectMonitor.hpp	1.40 07/05/05 17:06:52 JVM"
+#pragma ident "@(#)objectMonitor.hpp	1.41 07/05/17 16:06:18 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -38,11 +38,7 @@
 // transformed from the lightweight structure of the thread stack to a
 // heavy weight lock due to contention
 
-#ifdef JVMPI_SUPPORT
-// It is also used as RawMonitor by the JVMTI and JVMPI
-#else // !JVMPI_SUPPORT
 // It is also used as RawMonitor by the JVMTI
-#endif // JVMPI_SUPPORT
 
 
 class ObjectWaiter;
@@ -129,19 +125,14 @@
   intptr_t  complete_exit(TRAPS);
   void      reenter(intptr_t recursions, TRAPS);
 
-#ifdef JVMPI_SUPPORT
-  int       raw_enter(TRAPS, bool ReportEvents);
-  int       raw_exit(TRAPS, bool ReportEvents);
-#else // !JVMPI_SUPPORT
   int       raw_enter(TRAPS);
   int       raw_exit(TRAPS);
-#endif // JVMPI_SUPPORT
   int       raw_wait(jlong millis, bool interruptable, TRAPS);
   int       raw_notify(TRAPS);
   int       raw_notifyAll(TRAPS);
 
  private:
-  // JVMTI/DI/PI support -- remove ASAP
+  // JVMTI support -- remove ASAP
   int       SimpleEnter (Thread * Self) ; 
   int       SimpleExit  (Thread * Self) ; 
   int       SimpleWait  (Thread * Self, jlong millis) ; 
--- a/hotspot/src/share/vm/runtime/os.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/os.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)os.cpp	1.179 07/05/05 17:06:51 JVM"
+#pragma ident "@(#)os.cpp	1.180 07/05/17 16:06:20 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -197,9 +197,6 @@
         if (JvmtiExport::should_post_data_dump()) {
           JvmtiExport::post_data_dump();
         }
-#ifdef JVMPI_SUPPORT
-        jvmpi::post_dump_event();
-#endif // JVMPI_SUPPORT
         break;
       }
       default: {       
--- a/hotspot/src/share/vm/runtime/os.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/os.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)os.hpp	1.217 07/05/05 17:06:49 JVM"
+#pragma ident "@(#)os.hpp	1.218 07/05/17 16:06:23 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -465,11 +465,6 @@
   // Structured OS Exception support
   static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi
-  static bool thread_is_running(JavaThread* tp);
-#endif // JVMPI_SUPPORT
-
   // JVMTI & JVM monitoring and management support
   // The thread_cpu_time() and current_thread_cpu_time() are only
   // supported if is_thread_cpu_time_supported() returns true.
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)sharedRuntime.cpp	1.380 07/05/05 17:06:57 JVM"
+#pragma ident "@(#)sharedRuntime.cpp	1.381 07/05/17 16:06:26 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -77,6 +77,10 @@
 int SharedRuntime::_jint_array_copy_ctr=0;
 int SharedRuntime::_jlong_array_copy_ctr=0;
 int SharedRuntime::_oop_array_copy_ctr=0;
+int SharedRuntime::_checkcast_array_copy_ctr=0;
+int SharedRuntime::_unsafe_array_copy_ctr=0;
+int SharedRuntime::_generic_array_copy_ctr=0;
+int SharedRuntime::_slow_array_copy_ctr=0;
 int SharedRuntime::_find_handler_ctr=0;          
 int SharedRuntime::_rethrow_ctr=0;
 
@@ -619,12 +623,7 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-// SharedRuntime::trace_bytecode is only used by the TraceBytecodes and
-// EnableJVMPIInstructionStartEvent options. When JVM/PI is retired,
-// this entire routine can be made '#ifndef PRODUCT'.
-#endif // JVMPI_SUPPORT
-#if defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+#ifndef PRODUCT
 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
   const frame f = thread->last_frame();
   assert(f.is_interpreted_frame(), "must be an interpreted frame");
@@ -632,14 +631,9 @@
   methodHandle mh(THREAD, f.interpreter_frame_method());
   BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
 #endif // !PRODUCT
-#ifdef JVMPI_SUPPORT
-  if (EnableJVMPIInstructionStartEvent && jvmpi::is_event_enabled(JVMPI_EVENT_INSTRUCTION_START)) {
-    jvmpi::post_instruction_start_event(f);
-  }
-#endif // JVMPI_SUPPORT
   return preserve_this_value;
 JRT_END
-#endif // defined(JVMPI_SUPPORT) || !defined(PRODUCT)
+#endif // !PRODUCT
 
 
 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
@@ -688,36 +682,6 @@
 
 #endif  // PRODUCT
 
-#ifdef JVMPI_SUPPORT
-// This is factored because it is also called from Runtime1.
-void SharedRuntime::jvmpi_method_entry_work(
-    JavaThread* thread, methodOop method, oop receiver) {
-  methodHandle m(thread, method);
-  bool entry = jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY);
-  bool entry2 = jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_ENTRY2);
-  if (entry || entry2) {
-    GC_locker::lock();
-    if (entry2) {
-      jvmpi::post_method_entry2_event(m(), receiver);
-    } 
-    if (entry) {
-      jvmpi::post_method_entry_event(m());
-    } 
-    GC_locker::unlock();
-  }
-}
-
-void SharedRuntime::jvmpi_method_exit_work(
-    JavaThread* thread, methodOop method) {
-  methodHandle m(thread, method);
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_METHOD_EXIT)) {
-    GC_locker::lock();
-    jvmpi::post_method_exit_event(m());
-    GC_locker::unlock();
-  }
-}
-#endif // JVMPI_SUPPORT
-
 
 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
   assert(obj->is_oop(), "must be a valid oop");
@@ -726,20 +690,6 @@
 JRT_END
 
 
-#ifdef JVMPI_SUPPORT
-// Must be entry as it may lock when acquring the jmethodID of the method
-// but we don't need no stinkin' async exceptions
-JRT_ENTRY_NO_ASYNC (void, SharedRuntime::jvmpi_method_entry(
-    JavaThread* thread, methodOopDesc* method, oopDesc* receiver))
-  jvmpi_method_entry_work(thread, method, receiver);
-JRT_END
-
-JRT_ENTRY_NO_ASYNC (void, SharedRuntime::jvmpi_method_exit(
-    JavaThread* thread, methodOopDesc* method))
-  jvmpi_method_exit_work(thread, method);
-JRT_END
-#endif // JVMPI_SUPPORT
-
 jlong SharedRuntime::get_java_tid(Thread* thread) {
   if (thread != NULL) {
     if (thread->is_Java_thread()) {
@@ -1492,6 +1442,30 @@
 IRT_END
 
 
+// same as JVM_Arraycopy, but called directly from compiled code
+JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
+                                                oopDesc* dest, jint dest_pos,
+                                                jint length,
+                                                JavaThread* thread)) {
+#ifndef PRODUCT
+  _slow_array_copy_ctr++;
+#endif
+  // Check if we have null pointers
+  if (src == NULL || dest == NULL) {
+    THROW(vmSymbols::java_lang_NullPointerException());
+  }
+  // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
+  // even though the copy_array API also performs dynamic checks to ensure
+  // that src and dest are truly arrays (and are conformable).
+  // The copy_array mechanism is awkward and could be removed, but
+  // the compilers don't call this function except as a last resort,
+  // so it probably doesn't matter.
+  Klass::cast(src->klass())->copy_array((arrayOopDesc*)src,  src_pos,
+                                        (arrayOopDesc*)dest, dest_pos,
+                                        length, thread);
+}
+JRT_END
+
 char* SharedRuntime::generate_class_cast_message(
     JavaThread* thread, const char* objName) {
 
@@ -1635,6 +1609,10 @@
   if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
   if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
   if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
+  if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
+  if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
+  if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
+  if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
   if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
   if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)sharedRuntime.hpp	1.155 07/05/05 17:06:56 JVM"
+#pragma ident "@(#)sharedRuntime.hpp	1.156 07/05/17 16:06:29 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -161,13 +161,7 @@
   static address native_method_throw_unsatisfied_link_error_entry();
 
   // bytecode tracing is only used by the TraceBytecodes
-#ifdef JVMPI_SUPPORT
-  // and EnableJVMPIInstructionStartEvent options. When JVM/PI is retired,
-  // this declaration can be made PRODUCT_RETURN0.
-  static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2);
-#else // !JVMPI_SUPPORT
   static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0;
-#endif // JVMPI_SUPPORT
   
   // Used to back off a spin lock that is under heavy contention
   static void yield_all(JavaThread* thread, int attempts = 0);
@@ -179,16 +173,6 @@
 
   static void register_finalizer(JavaThread* thread, oopDesc* obj);
 
-#ifdef JVMPI_SUPPORT
-  // jvmpi notifications
-  static void jvmpi_method_entry(
-    JavaThread* thread, methodOopDesc* method, oopDesc* receiver);
-  static void jvmpi_method_exit(JavaThread* thread, methodOopDesc* method);
-  static void jvmpi_method_entry_work(
-    JavaThread* thread, methodOop method, oop receiver);
-  static void jvmpi_method_exit_work(JavaThread* thread, methodOop method);
-#endif // JVMPI_SUPPORT
-
   // dtrace notifications
   static int dtrace_object_alloc(oopDesc* o);
   static int dtrace_object_alloc_base(Thread* thread, oopDesc* o);
@@ -383,6 +367,11 @@
   static address resolve_static_call_C     (JavaThread *thread);    
   static address resolve_virtual_call_C    (JavaThread *thread);    
   static address resolve_opt_virtual_call_C(JavaThread *thread);  
+
+  // arraycopy, the non-leaf version.  (See StubRoutines for all the leaf calls.)
+  static void slow_arraycopy_C(oopDesc* src,  jint src_pos,
+                               oopDesc* dest, jint dest_pos,
+                               jint length, JavaThread* thread);
   
   // handle ic miss with caller being compiled code
   // wrong method handling (inline cache misses, zombie methods)
@@ -416,6 +405,10 @@
   static int _jint_array_copy_ctr;         // Slow-path int array copy
   static int _jlong_array_copy_ctr;        // Slow-path long array copy
   static int _oop_array_copy_ctr;          // Slow-path oop array copy
+  static int _checkcast_array_copy_ctr;    // Slow-path oop array copy, with cast
+  static int _unsafe_array_copy_ctr;       // Slow-path includes alignment checks
+  static int _generic_array_copy_ctr;      // Slow-path includes type decoding
+  static int _slow_array_copy_ctr;         // Slow-path failed out to a method call
 
   static int _new_instance_ctr;            // 'new' object requires GC
   static int _new_array_ctr;               // 'new' array requires GC
--- a/hotspot/src/share/vm/runtime/stackMapFrame.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,307 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)stackMapFrame.cpp	1.24 07/05/05 17:06:57 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_stackMapFrame.cpp.incl"
-
-StackMapFrame::StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* v) : 
-                      _offset(0), _locals_size(0), _stack_size(0), _flags(0), 
-                      _max_locals(max_locals), _max_stack(max_stack),
-                      _verifier(v) {
-  Thread* thr = v->thread();
-  _locals = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_locals);
-  _stack = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_stack);
-  int32_t i;
-  for(i = 0; i < max_locals; i++) {
-    _locals[i] = VerificationType::bogus_type();
-  }
-  for(i = 0; i < max_stack; i++) {
-    _stack[i] = VerificationType::bogus_type();
-  }  
-}
-
-StackMapFrame* StackMapFrame::frame_in_exception_handler(u1 flags) {
-  Thread* thr = _verifier->thread();
-  VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, 1);
-  StackMapFrame* frame = new StackMapFrame(_offset, flags, _locals_size, 0, _max_locals, _max_stack, _locals, stack, _verifier);
-  return frame;
-}
-
-bool StackMapFrame::has_new_object() const {
-  int32_t i;
-  for (i = 0; i < _max_locals; i++) {
-    if (_locals[i].is_uninitialized()) {
-      return true;
-    }
-  }
-  for (i = 0; i < _stack_size; i++) {
-    if (_stack[i].is_uninitialized()) {
-      return true;
-    }
-  }
-  return false;
-}
-
-void StackMapFrame::initialize_object(
-    VerificationType old_object, VerificationType new_object) {
-  int32_t i;
-  for (i = 0; i < _max_locals; i++) {
-    if (_locals[i].equals(old_object)) {
-      _locals[i] = new_object;
-    }
-  }
-  for (i = 0; i < _stack_size; i++) {
-    if (_stack[i].equals(old_object)) {
-      _stack[i] = new_object;
-    }
-  }
-  if (old_object == VerificationType::uninitialized_this_type()) {
-    // "this" has been initialized - reset flags
-    _flags = 0;
-  } 
-}
-
-VerificationType StackMapFrame::set_locals_from_arg(
-    const methodHandle m, VerificationType thisKlass, TRAPS) {
-  symbolHandle signature(THREAD, m->signature());
-  SignatureStream ss(signature);
-  int init_local_num = 0;
-  if (!m->is_static()) {
-    init_local_num++;
-    // add one extra argument for instance method
-    if (m->name() == vmSymbols::object_initializer_name() &&
-       thisKlass.name() != vmSymbols::java_lang_Object()) {
-      _locals[0] = VerificationType::uninitialized_this_type();
-      _flags |= FLAG_THIS_UNINIT;
-    } else {
-      _locals[0] = thisKlass;
-    }
-  } 
-  
-  // local num may be greater than size of parameters because long/double occupies two slots
-  while(!ss.at_return_type()) {
-    init_local_num += _verifier->change_sig_to_verificationType(
-      &ss, &_locals[init_local_num], 
-      CHECK_VERIFY_(verifier(), VerificationType::bogus_type()));
-    ss.next();
-  }
-  _locals_size = init_local_num;
-
-  switch (ss.type()) {
-    case T_OBJECT:
-    case T_ARRAY:
-    {
-      symbolOop sig = ss.as_symbol(CHECK_(VerificationType::bogus_type()));
-      return VerificationType::reference_type(symbolHandle(THREAD, sig));
-    }
-    case T_INT:     return VerificationType::integer_type();
-    case T_BYTE:    return VerificationType::byte_type();
-    case T_CHAR:    return VerificationType::char_type();
-    case T_SHORT:   return VerificationType::short_type();
-    case T_BOOLEAN: return VerificationType::boolean_type();
-    case T_FLOAT:   return VerificationType::float_type();
-    case T_DOUBLE:  return VerificationType::double_type();
-    case T_LONG:    return VerificationType::long_type();
-    case T_VOID:    return VerificationType::bogus_type();
-    default:
-      ShouldNotReachHere();
-  }
-  return VerificationType::bogus_type();
-}
-
-void StackMapFrame::copy_locals(const StackMapFrame* src) {
-  int32_t len = src->locals_size() < _locals_size ? 
-    src->locals_size() : _locals_size;
-  for (int32_t i = 0; i < len; i++) {
-    _locals[i] = src->locals()[i];
-  }
-}
-
-void StackMapFrame::copy_stack(const StackMapFrame* src) {
-  int32_t len = src->stack_size() < _stack_size ? 
-    src->stack_size() : _stack_size;
-  for (int32_t i = 0; i < len; i++) {
-    _stack[i] = src->stack()[i];
-  }
-}
-
-
-bool StackMapFrame::is_assignable_to(
-    VerificationType* from, VerificationType* to, int32_t len, TRAPS) const {
-  for (int32_t i = 0; i < len; i++) {
-    bool subtype = to[i].is_assignable_from(
-      from[i], verifier()->current_class(), THREAD);
-    if (!subtype) {
-      return false;
-    }
-  }
-  return true;
-}
-
-bool StackMapFrame::is_assignable_to(const StackMapFrame* target, TRAPS) const {
-  if (_max_locals != target->max_locals() || _stack_size != target->stack_size()) {
-    return false;
-  }
-  // Only need to compare type elements up to target->locals() or target->stack().
-  // The remaining type elements in this state can be ignored because they are
-  // assignable to bogus type.
-  bool match_locals = is_assignable_to(
-    _locals, target->locals(), target->locals_size(), CHECK_false);
-  bool match_stack = is_assignable_to(
-    _stack, target->stack(), _stack_size, CHECK_false);
-  bool match_flags = (_flags | target->flags()) == target->flags();
-  return (match_locals && match_stack && match_flags);
-}
-
-VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {
-  if (_stack_size <= 0) {
-    verifier()->verify_error(_offset, "Operand stack underflow");
-    return VerificationType::bogus_type();
-  }
-  VerificationType top = _stack[--_stack_size];
-  bool subtype = type.is_assignable_from(
-    top, verifier()->current_class(), CHECK_(VerificationType::bogus_type()));
-  if (!subtype) {
-    verifier()->verify_error(_offset, "Bad type on operand stack");
-    return VerificationType::bogus_type();
-  }
-  NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
-  return top;
-}
-
-VerificationType StackMapFrame::get_local(
-    int32_t index, VerificationType type, TRAPS) {
-  if (index >= _max_locals) {
-    verifier()->verify_error(_offset, "Local variable table overflow");
-    return VerificationType::bogus_type(); 
-  }
-  bool subtype = type.is_assignable_from(_locals[index], 
-    verifier()->current_class(), CHECK_(VerificationType::bogus_type()));
-  if (!subtype) {
-    verifier()->verify_error(_offset, "Bad local variable type");
-    return VerificationType::bogus_type();
-  }
-  if(index >= _locals_size) { _locals_size = index + 1; }
-  return _locals[index];
-}
-
-void StackMapFrame::get_local_2(
-    int32_t index, VerificationType type1, VerificationType type2, TRAPS) {
-  assert(type1.is_long() || type1.is_double(), "must be long/double");
-  assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
-  if (index >= _locals_size - 1) {
-    verifier()->verify_error(_offset, "get long/double overflows locals");
-    return;
-  }
-  bool subtype1 = type1.is_assignable_from(
-    _locals[index], verifier()->current_class(), CHECK);
-  bool subtype2 = type2.is_assignable_from(
-    _locals[index+1], verifier()->current_class(), CHECK);
-  if (!subtype1 || !subtype2) {
-    verifier()->verify_error(_offset, "Bad local variable type");
-    return;
-  }
-}
-
-void StackMapFrame::set_local(int32_t index, VerificationType type, TRAPS) {
-  assert(!type.is_check(), "Must be a real type");
-  if (index >= _max_locals) {
-    verifier()->verify_error("Local variable table overflow", _offset);
-    return;
-  }
-  // If type at index is double or long, set the next location to be unusable
-  if (_locals[index].is_double() || _locals[index].is_long()) {
-    assert((index + 1) < _locals_size, "Local variable table overflow");
-    _locals[index + 1] = VerificationType::bogus_type();
-  }
-  // If type at index is double_2 or long_2, set the previous location to be unusable
-  if (_locals[index].is_double2() || _locals[index].is_long2()) {
-    assert(index >= 1, "Local variable table underflow");
-    _locals[index - 1] = VerificationType::bogus_type();
-  }
-  _locals[index] = type;
-  if (index >= _locals_size) {
-#ifdef ASSERT
-    for (int i=_locals_size; i<index; i++) {
-      assert(_locals[i] == VerificationType::bogus_type(), 
-             "holes must be bogus type");
-    }
-#endif
-    _locals_size = index + 1;
-  }
-}
-
-void StackMapFrame::set_local_2(
-    int32_t index, VerificationType type1, VerificationType type2, TRAPS) {
-  assert(type1.is_long() || type1.is_double(), "must be long/double");
-  assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
-  if (index >= _max_locals - 1) {
-    verifier()->verify_error("Local variable table overflow", _offset);
-    return;
-  }
-  // If type at index+1 is double or long, set the next location to be unusable
-  if (_locals[index+1].is_double() || _locals[index+1].is_long()) {
-    assert((index + 2) < _locals_size, "Local variable table overflow");
-    _locals[index + 2] = VerificationType::bogus_type();
-  }
-  // If type at index is double_2 or long_2, set the previous location to be unusable
-  if (_locals[index].is_double2() || _locals[index].is_long2()) {
-    assert(index >= 1, "Local variable table underflow");
-    _locals[index - 1] = VerificationType::bogus_type();
-  }
-  _locals[index] = type1;
-  _locals[index+1] = type2;
-  if (index >= _locals_size - 1) {
-#ifdef ASSERT
-    for (int i=_locals_size; i<index; i++) {
-      assert(_locals[i] == VerificationType::bogus_type(), 
-             "holes must be bogus type");
-    }
-#endif
-    _locals_size = index + 2;
-  }
-}
-
-#ifndef PRODUCT
-
-void StackMapFrame::print() const {
-  tty->print_cr("stackmap_frame[%d]:", _offset);
-  tty->print_cr("flags = 0x%x", _flags);
-  tty->print("locals[%d] = { ", _locals_size);
-  for (int32_t i = 0; i < _locals_size; i++) {
-    _locals[i].print_on(tty);
-  }
-  tty->print_cr(" }");
-  tty->print("stack[%d] = { ", _stack_size);
-  for (int32_t j = 0; j < _stack_size; j++) {
-    _stack[j].print_on(tty);
-  }
-  tty->print_cr(" }");
-}
-
-#endif
-
--- a/hotspot/src/share/vm/runtime/stackMapFrame.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,230 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)stackMapFrame.hpp	1.20 07/05/05 17:06:57 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// A StackMapFrame represents one frame in the stack map attribute.
-
-enum {
-  FLAG_THIS_UNINIT = 0x01
-};
-
-class StackMapFrame : public ResourceObj {
- private:
-  int32_t _offset;
-
-  // See comment in StackMapTable about _frame_count about why these 
-  // fields are int32_t instead of u2.
-  int32_t _locals_size;  // number of valid type elements in _locals 
-  int32_t _stack_size;   // number of valid type elements in _stack
-
-  int32_t _max_locals;
-  int32_t _max_stack;
-
-  u1 _flags;
-  VerificationType* _locals; // local variable type array
-  VerificationType* _stack;  // operand stack type array
-
-  ClassVerifier* _verifier;  // the verifier verifying this method
-
- public:
-  // constructors
-
-  // This constructor is used by the type checker to allocate frames 
-  // in type state, which have _max_locals and _max_stack array elements
-  // in _locals and _stack.
-  StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* verifier);
-
-  // This constructor is used to initialize stackmap frames in stackmap table,
-  // which have _locals_size and _stack_size array elements in _locals and _stack.
-  StackMapFrame(int32_t offset,
-                u1 flags,
-                u2 locals_size,
-                u2 stack_size, 
-                u2 max_locals,
-                u2 max_stack,
-                VerificationType* locals,
-                VerificationType* stack,
-                ClassVerifier* v) : _offset(offset), _flags(flags),
-                                    _locals_size(locals_size),
-                                    _stack_size(stack_size),
-                                    _max_locals(max_locals),
-                                    _max_stack(max_stack),
-                                    _locals(locals), _stack(stack),
-                                    _verifier(v) { }
-
-  inline void set_offset(int32_t offset)      { _offset = offset; }
-  inline void set_verifier(ClassVerifier* v)  { _verifier = v; }
-  inline void set_flags(u1 flags)             { _flags = flags; }
-  inline void set_locals_size(u2 locals_size) { _locals_size = locals_size; }
-  inline void set_stack_size(u2 stack_size)   { _stack_size = stack_size; }
-  inline void clear_stack()                   { _stack_size = 0; }
-  inline int32_t offset()   const             { return _offset; }
-  inline ClassVerifier* verifier() const      { return _verifier; }
-  inline u1 flags() const                     { return _flags; }
-  inline int32_t locals_size() const          { return _locals_size; }
-  inline VerificationType* locals() const     { return _locals; }
-  inline int32_t stack_size() const           { return _stack_size; }
-  inline VerificationType* stack() const      { return _stack; }
-  inline int32_t max_locals() const           { return _max_locals; }
-  inline int32_t max_stack() const            { return _max_stack; }
-  inline bool flag_this_uninit() const        { return _flags & FLAG_THIS_UNINIT; }
-
-  // Set locals and stack types to bogus
-  inline void reset() {
-    int32_t i;
-    for (i = 0; i < _max_locals; i++) {
-      _locals[i] = VerificationType::bogus_type();
-    }
-    for (i = 0; i < _max_stack; i++) {
-      _stack[i] = VerificationType::bogus_type();
-    }
-  }
-
-  // Return a StackMapFrame with the same local variable array and empty stack.
-  // Stack array is allocate with unused one element.
-  StackMapFrame* frame_in_exception_handler(u1 flags);
-
-  // Set local variable type array based on m's signature.
-  VerificationType set_locals_from_arg(
-    const methodHandle m, VerificationType thisKlass, TRAPS);
-
-  // Search local variable type array and stack type array.
-  // Return true if an uninitialized object is found.
-  bool has_new_object() const;
-
-  // Search local variable type array and stack type array.
-  // Set every element with type of old_object to new_object.
-  void initialize_object(
-    VerificationType old_object, VerificationType new_object);
-
-  // Copy local variable type array in src into this local variable type array.
-  void copy_locals(const StackMapFrame* src);
-
-  // Copy stack type array in src into this stack type array.
-  void copy_stack(const StackMapFrame* src);
-
-  // Return true if this stack map frame is assignable to target.
-  bool is_assignable_to(const StackMapFrame* target, TRAPS) const;
-
-  // Push type into stack type array.
-  inline void push_stack(VerificationType type, TRAPS) {
-    assert(!type.is_check(), "Must be a real type");
-    if (_stack_size >= _max_stack) {
-      verifier()->verify_error(_offset, "Operand stack overflow");
-      return;
-    }
-    _stack[_stack_size++] = type;
-  }
-
-  inline void push_stack_2(
-      VerificationType type1, VerificationType type2, TRAPS) {
-    assert(type1.is_long() || type1.is_double(), "must be long/double");
-    assert(type2.is_long2() || type2.is_double2(), "must be long/double_2");
-    if (_stack_size >= _max_stack - 1) {
-      verifier()->verify_error(_offset, "Operand stack overflow");
-      return;
-    }
-    _stack[_stack_size++] = type1;
-    _stack[_stack_size++] = type2;
-  }
-
-  // Pop and return the top type on stack without verifying.
-  inline VerificationType pop_stack(TRAPS) {
-    if (_stack_size <= 0) {
-      verifier()->verify_error(_offset, "Operand stack underflow");
-      return VerificationType::bogus_type();
-    }
-    // Put bogus type to indicate it's no longer valid.
-    // Added to make it consistent with the other pop_stack method.
-    VerificationType top = _stack[--_stack_size];
-    NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
-    return top;
-  }
-
-  // Pop and return the top type on stack type array after verifying it
-  // is assignable to type.
-  inline VerificationType pop_stack(VerificationType type, TRAPS) {
-    if (_stack_size != 0) {
-      VerificationType top = _stack[_stack_size - 1];
-      bool subtype = type.is_assignable_from(
-        top, verifier()->current_class(), 
-        CHECK_(VerificationType::bogus_type()));
-      if (subtype) {
-        _stack_size --;
-        NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
-        return top;
-      }
-    }
-    return pop_stack_ex(type, THREAD);
-  }
-
-  inline void pop_stack_2(
-      VerificationType type1, VerificationType type2, TRAPS) {
-    assert(type1.is_long2() || type1.is_double2(), "must be long/double");
-    assert(type2.is_long() || type2.is_double(), "must be long/double_2");
-    if (_stack_size >= 2) {
-      VerificationType top1 = _stack[_stack_size - 1];
-      bool subtype1 = type1.is_assignable_from(
-        top1, verifier()->current_class(), CHECK);
-      VerificationType top2 = _stack[_stack_size - 2];
-      bool subtype2 = type2.is_assignable_from(
-        top2, verifier()->current_class(), CHECK);
-      if (subtype1 && subtype2) {
-        _stack_size -= 2;
-        NOT_PRODUCT( _stack[_stack_size] = VerificationType::bogus_type(); )
-        NOT_PRODUCT( _stack[_stack_size+1] = VerificationType::bogus_type(); )
-        return;
-      }
-    }
-    pop_stack_ex(type1, THREAD);
-    pop_stack_ex(type2, THREAD);
-  }
-
-  // Uncommon case that throws exceptions.
-  VerificationType pop_stack_ex(VerificationType type, TRAPS);
-
-  // Return the type at index in local variable array after verifying
-  // it is assignable to type.
-  VerificationType get_local(int32_t index, VerificationType type, TRAPS);
-  // For long/double.
-  void get_local_2(
-    int32_t index, VerificationType type1, VerificationType type2, TRAPS);
-
-  // Set element at index in local variable array to type.
-  void set_local(int32_t index, VerificationType type, TRAPS);
-  // For long/double.
-  void set_local_2(
-    int32_t index, VerificationType type1, VerificationType type2, TRAPS);
-
-  // Private auxiliary method used only in is_assignable_to(StackMapFrame).
-  // Returns true if src is assignable to target.
-  bool is_assignable_to(
-    VerificationType* src, VerificationType* target, int32_t len, TRAPS) const;
-
-  // Debugging
-  void print() const PRODUCT_RETURN;
-};
-
--- a/hotspot/src/share/vm/runtime/stackMapTable.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,431 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)stackMapTable.cpp	1.28 07/05/05 17:06:53 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_stackMapTable.cpp.incl"
-
-StackMapTable::StackMapTable(StackMapReader* reader, StackMapFrame* init_frame,
-                             u2 max_locals, u2 max_stack,
-                             char* code_data, int code_len, TRAPS) {
-  _code_length = code_len;
-  _frame_count = reader->get_frame_count();
-  if (_frame_count > 0) {
-    _frame_array = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD,
-                                                StackMapFrame*, _frame_count);
-    StackMapFrame* pre_frame = init_frame;
-    for (int32_t i = 0; i < _frame_count; i++) {
-      StackMapFrame* frame = reader->next(
-        pre_frame, i == 0, max_locals, max_stack, 
-        CHECK_VERIFY(pre_frame->verifier()));
-      _frame_array[i] = frame;
-      int offset = frame->offset();
-      if (offset >= code_len || code_data[offset] == 0) {
-        frame->verifier()->verify_error("StackMapTable error: bad offset");
-        return;
-      }
-      pre_frame = frame;
-    }
-  }
-  reader->check_end(CHECK);
-}
-
-// This method is only called by method in StackMapTable.
-int StackMapTable::get_index_from_offset(int32_t offset) const {
-  int i = 0;
-  for (; i < _frame_count; i++) {
-    if (_frame_array[i]->offset() == offset) {
-      return i;
-    }
-  }
-  return i;  // frame with offset doesn't exist in the array
-}
-
-bool StackMapTable::match_stackmap(
-    StackMapFrame* frame, int32_t target, 
-    bool match, bool update, TRAPS) const {
-  int index = get_index_from_offset(target);
-
-  return match_stackmap(
-    frame, target, index, match, 
-    update, CHECK_VERIFY_(frame->verifier(), false));
-}
-
-// Match and/or update current_frame to the frame in stackmap table with
-// specified offset and frame index. Return true if the two frames match.
-//
-// The values of match and update are:                  _match__update_
-//
-// checking a branch target/exception handler:           true   false
-// linear bytecode verification following an 
-// unconditional branch:                                 false  true
-// linear bytecode verification not following an 
-// unconditional branch:                                 true   true
-bool StackMapTable::match_stackmap(
-    StackMapFrame* frame, int32_t target, int32_t frame_index,
-    bool match, bool update, TRAPS) const {
-  if (frame_index < 0 || frame_index >= _frame_count) {
-    frame->verifier()->verify_error(frame->offset(),
-      "Expecting a stackmap frame at branch target %d", target);
-    return false;
-  }
-
-  bool result = true;
-  StackMapFrame *stackmap_frame = _frame_array[frame_index];
-  if (match) {
-    // Has direct control flow from last instruction, need to match the two
-    // frames.
-    result = frame->is_assignable_to(
-      stackmap_frame, CHECK_VERIFY_(frame->verifier(), false));
-  }
-  if (update) { 
-    // Use the frame in stackmap table as current frame
-    int lsize = stackmap_frame->locals_size();
-    int ssize = stackmap_frame->stack_size();
-    if (frame->locals_size() > lsize || frame->stack_size() > ssize) {
-      // Make sure unused type array items are all _bogus_type.
-      frame->reset();
-    }
-    frame->set_locals_size(lsize);
-    frame->copy_locals(stackmap_frame);
-    frame->set_stack_size(ssize);
-    frame->copy_stack(stackmap_frame);
-    frame->set_flags(stackmap_frame->flags());
-  }
-  return result;
-}
-
-void StackMapTable::check_jump_target(
-    StackMapFrame* frame, int32_t target, TRAPS) const {
-  bool match = match_stackmap(
-    frame, target, true, false, CHECK_VERIFY(frame->verifier()));
-  if (!match || (target < 0 || target >= _code_length)) {
-    frame->verifier()->verify_error(frame->offset(),
-      "Inconsistent stackmap frames at branch target %d", target); 
-    return;
-  }
-  // check if uninitialized objects exist on backward branches
-  check_new_object(frame, target, CHECK_VERIFY(frame->verifier()));
-}
-
-void StackMapTable::check_new_object(
-    const StackMapFrame* frame, int32_t target, TRAPS) const {
-  if (frame->offset() > target && frame->has_new_object()) {
-    frame->verifier()->verify_error(frame->offset(),
-      "Uninitialized object exists on backward branch %d", target); 
-    return;
-  }
-}
-
-#ifndef PRODUCT
-
-void StackMapTable::print() const {
-  tty->print_cr("StackMapTable: frame_count = %d", _frame_count);
-  tty->print_cr("table = { ");
-  for (int32_t i = 0; i < _frame_count; i++) {
-    _frame_array[i]->print();
-  }
-  tty->print_cr(" }");
-}
-
-#endif
-
-int32_t StackMapReader::chop(
-    VerificationType* locals, int32_t length, int32_t chops) {
-  int32_t pos = length - 1;
-  for (int32_t i=0; i<chops; i++) {
-    if (locals[pos].is_category2_2nd()) {
-      pos -= 2;
-    } else {
-      pos --;
-    }
-    if (pos<0 && i<(chops-1)) return -1;
-  }
-  return pos+1;
-}
-
-VerificationType StackMapReader::parse_verification_type(u1* flags, TRAPS) {
-  u1 tag = _stream->get_u1(THREAD);
-  if (tag < (u1)ITEM_UninitializedThis) {
-    return VerificationType::from_tag(tag);
-  }
-  if (tag == ITEM_Object) {
-    u2 class_index = _stream->get_u2(THREAD);
-    int nconstants = _cp->length();
-    if ((class_index <= 0 || class_index >= nconstants) ||
-        (!_cp->tag_at(class_index).is_klass() &&
-         !_cp->tag_at(class_index).is_unresolved_klass())) {
-      _stream->stackmap_format_error("bad class index", THREAD);
-      return VerificationType::bogus_type();
-    }
-    return VerificationType::reference_type(
-      symbolHandle(THREAD, _cp->klass_name_at(class_index)));
-  }
-  if (tag == ITEM_UninitializedThis) {
-    if (flags != NULL) {
-      *flags |= FLAG_THIS_UNINIT;
-    }
-    return VerificationType::uninitialized_this_type();
-  }
-  if (tag == ITEM_Uninitialized) {
-    u2 offset = _stream->get_u2(THREAD);
-    if (offset >= _code_length ||
-        _code_data[offset] != ClassVerifier::NEW_OFFSET) {
-      ResourceMark rm(THREAD);
-      _verifier->class_format_error(
-        "StackMapTable format error: bad offset for Uninitialized");
-      return VerificationType::bogus_type();
-    }
-    return VerificationType::uninitialized_type(offset);
-  }
-  _stream->stackmap_format_error("bad verification type", THREAD);
-  return VerificationType::bogus_type();
-}
-
-StackMapFrame* StackMapReader::next(
-    StackMapFrame* pre_frame, bool first, u2 max_locals, u2 max_stack, TRAPS) {
-  StackMapFrame* frame;
-  int offset;
-  VerificationType* locals = NULL;
-  u1 frame_type = _stream->get_u1(THREAD);
-  if (frame_type < 64) {
-    // same_frame
-    if (first) {
-      offset = frame_type;
-      // Can't share the locals array since that is updated by the verifier.
-      if (pre_frame->locals_size() > 0) {
-        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
-          THREAD, VerificationType, pre_frame->locals_size());
-      }
-    } else {
-      offset = pre_frame->offset() + frame_type + 1;
-      locals = pre_frame->locals();
-    }
-    frame = new StackMapFrame(
-      offset, pre_frame->flags(), pre_frame->locals_size(), 0,
-      max_locals, max_stack, locals, NULL, _verifier);
-    if (first && locals != NULL) {
-      frame->copy_locals(pre_frame);
-    }
-    return frame;
-  } 
-  if (frame_type < 128) {
-    // same_locals_1_stack_item_frame
-    if (first) {
-      offset = frame_type - 64;
-      // Can't share the locals array since that is updated by the verifier.
-      if (pre_frame->locals_size() > 0) {
-        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
-          THREAD, VerificationType, pre_frame->locals_size());
-      }
-    } else {
-      offset = pre_frame->offset() + frame_type - 63;
-      locals = pre_frame->locals();
-    }
-    VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(
-      THREAD, VerificationType, 2);
-    u2 stack_size = 1; 
-    stack[0] = parse_verification_type(NULL, CHECK_VERIFY_(_verifier, NULL));
-    if (stack[0].is_category2()) {
-      stack[1] = stack[0].to_category2_2nd();
-      stack_size = 2;
-    }
-    check_verification_type_array_size(
-      stack_size, max_stack, CHECK_VERIFY_(_verifier, NULL));
-    frame = new StackMapFrame(
-      offset, pre_frame->flags(), pre_frame->locals_size(), stack_size,
-      max_locals, max_stack, locals, stack, _verifier);
-    if (first && locals != NULL) {
-      frame->copy_locals(pre_frame);
-    }
-    return frame;
-  }
-
-  u2 offset_delta = _stream->get_u2(THREAD);
-
-  if (frame_type < SAME_LOCALS_1_STACK_ITEM_EXTENDED) {
-    // reserved frame types
-    _stream->stackmap_format_error(
-      "reserved frame type", CHECK_VERIFY_(_verifier, NULL));
-  }
-
-  if (frame_type == SAME_LOCALS_1_STACK_ITEM_EXTENDED) {
-    // same_locals_1_stack_item_frame_extended
-    if (first) {
-      offset = offset_delta;
-      // Can't share the locals array since that is updated by the verifier.
-      if (pre_frame->locals_size() > 0) {
-        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
-          THREAD, VerificationType, pre_frame->locals_size());
-      }
-    } else {
-      offset = pre_frame->offset() + offset_delta + 1;
-      locals = pre_frame->locals();
-    }
-    VerificationType* stack = NEW_RESOURCE_ARRAY_IN_THREAD(
-      THREAD, VerificationType, 2);
-    u2 stack_size = 1; 
-    stack[0] = parse_verification_type(NULL, CHECK_VERIFY_(_verifier, NULL));
-    if (stack[0].is_category2()) {
-      stack[1] = stack[0].to_category2_2nd();
-      stack_size = 2;
-    }
-    check_verification_type_array_size(
-      stack_size, max_stack, CHECK_VERIFY_(_verifier, NULL));
-    frame = new StackMapFrame(
-      offset, pre_frame->flags(), pre_frame->locals_size(), stack_size,
-      max_locals, max_stack, locals, stack, _verifier);
-    if (first && locals != NULL) {
-      frame->copy_locals(pre_frame);
-    }
-    return frame;
-  }
-
-  if (frame_type <= SAME_EXTENDED) {
-    // chop_frame or same_frame_extended
-    locals = pre_frame->locals();
-    int length = pre_frame->locals_size();
-    int chops = SAME_EXTENDED - frame_type;
-    int new_length = length;
-    u1 flags = pre_frame->flags();
-    if (chops != 0) {
-      new_length = chop(locals, length, chops);
-      check_verification_type_array_size(
-        new_length, max_locals, CHECK_VERIFY_(_verifier, NULL));
-      // Recompute flags since uninitializedThis could have been chopped.
-      flags = 0;
-      for (int i=0; i<new_length; i++) {
-        if (locals[i].is_uninitialized_this()) {
-          flags |= FLAG_THIS_UNINIT;
-          break;
-        }
-      }
-    }
-    if (first) {
-      offset = offset_delta;
-      // Can't share the locals array since that is updated by the verifier.
-      if (new_length > 0) {
-        locals = NEW_RESOURCE_ARRAY_IN_THREAD(
-          THREAD, VerificationType, new_length);
-      } else {
-        locals = NULL;
-      }
-    } else {
-      offset = pre_frame->offset() + offset_delta + 1;
-    }
-    frame = new StackMapFrame(
-      offset, flags, new_length, 0, max_locals, max_stack, 
-      locals, NULL, _verifier);
-    if (first && locals != NULL) {
-      frame->copy_locals(pre_frame);
-    }
-    return frame;
-  } else if (frame_type < SAME_EXTENDED + 4) {
-    // append_frame
-    int appends = frame_type - SAME_EXTENDED;
-    int real_length = pre_frame->locals_size();
-    int new_length = real_length + appends*2;
-    locals = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, VerificationType, new_length);
-    VerificationType* pre_locals = pre_frame->locals();
-    int i;
-    for (i=0; i<pre_frame->locals_size(); i++) {
-      locals[i] = pre_locals[i];
-    }
-    u1 flags = pre_frame->flags();
-    for (i=0; i<appends; i++) {
-      locals[real_length] = parse_verification_type(&flags, THREAD);
-      if (locals[real_length].is_category2()) {
-        locals[real_length + 1] = locals[real_length].to_category2_2nd();
-        ++real_length;
-      }
-      ++real_length;
-    }
-    check_verification_type_array_size(
-      real_length, max_locals, CHECK_VERIFY_(_verifier, NULL));
-    if (first) {
-      offset = offset_delta;
-    } else {
-      offset = pre_frame->offset() + offset_delta + 1;
-    }
-    frame = new StackMapFrame(
-      offset, flags, real_length, 0, max_locals, 
-      max_stack, locals, NULL, _verifier);
-    return frame;
-  }
-  if (frame_type == FULL) {
-    // full_frame
-    u1 flags = 0;
-    u2 locals_size = _stream->get_u2(THREAD);
-    int real_locals_size = 0;
-    if (locals_size > 0) {
-      locals = NEW_RESOURCE_ARRAY_IN_THREAD(
-        THREAD, VerificationType, locals_size*2);
-    }
-    int i;
-    for (i=0; i<locals_size; i++) {
-      locals[real_locals_size] = parse_verification_type(&flags, THREAD);
-      if (locals[real_locals_size].is_category2()) {
-        locals[real_locals_size + 1] = 
-          locals[real_locals_size].to_category2_2nd();
-        ++real_locals_size;
-      }
-      ++real_locals_size;
-    }
-    check_verification_type_array_size(
-      real_locals_size, max_locals, CHECK_VERIFY_(_verifier, NULL));
-    u2 stack_size = _stream->get_u2(THREAD);
-    int real_stack_size = 0;
-    VerificationType* stack = NULL;
-    if (stack_size > 0) {
-      stack = NEW_RESOURCE_ARRAY_IN_THREAD(
-        THREAD, VerificationType, stack_size*2);
-    }
-    for (i=0; i<stack_size; i++) {
-      stack[real_stack_size] = parse_verification_type(NULL, THREAD);
-      if (stack[real_stack_size].is_category2()) {
-        stack[real_stack_size + 1] = stack[real_stack_size].to_category2_2nd();
-        ++real_stack_size;
-      }
-      ++real_stack_size;
-    }
-    check_verification_type_array_size(
-      real_stack_size, max_stack, CHECK_VERIFY_(_verifier, NULL));
-    if (first) {
-      offset = offset_delta;
-    } else {
-      offset = pre_frame->offset() + offset_delta + 1;
-    }
-    frame = new StackMapFrame(
-      offset, flags, real_locals_size, real_stack_size,
-      max_locals, max_stack, locals, stack, _verifier);
-    return frame;
-  }
-
-  _stream->stackmap_format_error(
-    "reserved frame type", CHECK_VERIFY_(pre_frame->verifier(), NULL));
-  return NULL;
-}
-
--- a/hotspot/src/share/vm/runtime/stackMapTable.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,164 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)stackMapTable.hpp	1.21 07/05/05 17:06:57 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-class StackMapReader;
-
-// StackMapTable class is the StackMap table used by type checker
-class StackMapTable : public StackObj {
- private:
-  // Logically, the _frame_count (as well as many fields in the StackFrame)
-  // should be a u2, but if we defined the variable as that type it will
-  // be difficult to detect/recover from overflow or underflow conditions.
-  // Widening the type and making it signed will help detect these.
-  int32_t              _code_length;
-  int32_t              _frame_count;     // Stackmap frame count 
-  StackMapFrame**       _frame_array;
-
- public:
-  StackMapTable(StackMapReader* reader, StackMapFrame* init_frame,
-                u2 max_locals, u2 max_stack,
-                char* code_data, int code_len, TRAPS);
-
-  inline int32_t get_frame_count() const { return _frame_count; }
-  inline int get_offset(int index) const { 
-    return _frame_array[index]->offset(); 
-  }
-
-  // Match and/or update current_frame to the frame in stackmap table with
-  // specified offset. Return true if the two frames match. 
-  bool match_stackmap(
-    StackMapFrame* current_frame, int32_t offset, 
-    bool match, bool update, TRAPS) const;
-  // Match and/or update current_frame to the frame in stackmap table with
-  // specified offset and frame index. Return true if the two frames match. 
-  bool match_stackmap(
-    StackMapFrame* current_frame, int32_t offset, int32_t frame_index,
-    bool match, bool update, TRAPS) const;
-
-  // Check jump instructions. Make sure there are no uninitialized 
-  // instances on backward branch.
-  void check_jump_target(StackMapFrame* frame, int32_t target, TRAPS) const;
-
-  // The following methods are only used inside this class.
-
-  // Returns the frame array index where the frame with offset is stored. 
-  int get_index_from_offset(int32_t offset) const;
-
-  // Make sure that there's no uninitialized object exist on backward branch.
-  void check_new_object(
-    const StackMapFrame* frame, int32_t target, TRAPS) const;
-
-  // Debugging
-  void print() const PRODUCT_RETURN;
-};
-
-class StackMapStream : StackObj {
- private:
-  typeArrayHandle _data;
-  int _index;
- public:
-  StackMapStream(typeArrayHandle ah) 
-    : _data(ah), _index(0) {
-  }
-  u1 get_u1(TRAPS) {
-    if (_data == NULL || _index >= _data->length()) {
-      stackmap_format_error("access beyond the end of attribute", CHECK_0);
-    }
-    return _data->byte_at(_index++);
-  }
-  u2 get_u2(TRAPS) {
-    if (_data == NULL || _index >= _data->length() - 1) {
-      stackmap_format_error("access beyond the end of attribute", CHECK_0);
-    }
-    u2 res = Bytes::get_Java_u2((u1*)_data->byte_at_addr(_index));
-    _index += 2;
-    return res;
-  }
-  bool at_end() {
-    return (_data == NULL) || (_index == _data->length());
-  }
-  static void stackmap_format_error(const char* msg, TRAPS);
-};
-
-class StackMapReader : StackObj {
- private:
-  // information about the class and method 
-  constantPoolHandle  _cp;
-  ClassVerifier* _verifier;
-  StackMapStream* _stream;
-  char* _code_data;
-  int32_t _code_length;
-
-  // information get from the attribute
-  int32_t  _frame_count;       // frame count 
-
-  int32_t chop(VerificationType* locals, int32_t length, int32_t chops);
-  VerificationType parse_verification_type(u1* flags, TRAPS);
-  void check_verification_type_array_size(
-      int32_t size, int32_t max_size, TRAPS) {
-    if (size < 0 || size > max_size) {
-      // Since this error could be caused someone rewriting the method
-      // but not knowing to update the stackmap data, we call the the
-      // verifier's error method, which may not throw an exception and 
-      // failover to the old verifier instead.
-      _verifier->class_format_error(
-        "StackMapTable format error: bad type array size");
-    }
-  }
-
-  enum {
-    SAME_LOCALS_1_STACK_ITEM_EXTENDED = 247,
-    SAME_EXTENDED = 251,
-    FULL = 255
-  };
-
- public:
-  // Constructor
-  StackMapReader(ClassVerifier* v, StackMapStream* stream, char* code_data,
-                 int32_t code_len, TRAPS) :
-                 _verifier(v), _stream(stream),
-                 _code_data(code_data), _code_length(code_len) {
-    methodHandle m = v->method();
-    if (m->has_stackmap_table()) {
-      _cp = constantPoolHandle(THREAD, m->constants());
-      _frame_count = _stream->get_u2(CHECK);
-    } else {
-      // There's no stackmap table present. Frame count and size are 0.
-      _frame_count = 0;
-    }
-  }
-
-  inline int32_t get_frame_count() const		{ return _frame_count; }
-  StackMapFrame* next(StackMapFrame* pre_frame, bool first,
-                      u2 max_locals, u2 max_stack, TRAPS);
-
-  void check_end(TRAPS) {
-    if (!_stream->at_end()) {
-      StackMapStream::stackmap_format_error("wrong attribute size", CHECK);
-    }
-  }
-};
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)stubCodeGenerator.cpp	1.29 07/05/05 17:06:58 JVM"
+#pragma ident "@(#)stubCodeGenerator.cpp	1.30 07/05/17 16:06:31 JVM"
 #endif
 /*
  * Copyright 1997-2004 Sun Microsystems, Inc.  All Rights Reserved.
@@ -67,19 +67,60 @@
 
 // Implementation of StubCodeGenerator
 
+StubCodeGenerator::StubCodeGenerator(CodeBuffer* code) {
+  _masm = new MacroAssembler(code);
+  _first_stub = _last_stub = NULL;
+}
+
+#ifndef PRODUCT
+extern "C" {
+  static int compare_cdesc(const void* void_a, const void* void_b) {
+    int ai = (*((StubCodeDesc**) void_a))->index();
+    int bi = (*((StubCodeDesc**) void_b))->index();
+    return ai - bi;
+  }
+}
+#endif
+
+StubCodeGenerator::~StubCodeGenerator() {
+#ifndef PRODUCT
+  if (PrintStubCode) {
+    CodeBuffer* cbuf = _masm->code();
+    CodeBlob*   blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
+    if (blob != NULL) {
+      blob->set_comments(cbuf->comments());
+    }
+    bool saw_first = false;
+    StubCodeDesc* toprint[1000];
+    int toprint_len = 0;
+    for (StubCodeDesc* cdesc = _last_stub; cdesc != NULL; cdesc = cdesc->_next) {
+      toprint[toprint_len++] = cdesc;
+      if (cdesc == _first_stub) { saw_first = true; break; }
+    }
+    assert(saw_first, "must get both first & last");
+    // Print in reverse order:
+    qsort(toprint, toprint_len, sizeof(toprint[0]), compare_cdesc);
+    for (int i = 0; i < toprint_len; i++) {
+      StubCodeDesc* cdesc = toprint[i];
+      cdesc->print();
+      tty->cr();
+      Disassembler::decode(cdesc->begin(), cdesc->end());
+      tty->cr();
+    }
+  }
+#endif //PRODUCT
+}
+
+
 void StubCodeGenerator::stub_prolog(StubCodeDesc* cdesc) {
   // default implementation - do nothing
 }
 
 
 void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) {
-  // default implementation - support printing
-  if (PrintStubCode) {
-    cdesc->print();
-    tty->cr();
-    Disassembler::decode(cdesc->begin(), cdesc->end());
-    tty->cr();
-  }
+  // default implementation - record the cdesc
+  if (_first_stub == NULL)  _first_stub = cdesc;
+  _last_stub = cdesc;
 }
 
 
@@ -96,6 +137,7 @@
 StubCodeMark::~StubCodeMark() {
   _cgen->assembler()->flush();
   _cdesc->set_end(_cgen->assembler()->pc());
+  assert(StubCodeDesc::_list == _cdesc, "expected order on list");
   _cgen->stub_epilog(_cdesc);
   VTune::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
   Forte::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)stubCodeGenerator.hpp	1.24 07/05/05 17:06:58 JVM"
+#pragma ident "@(#)stubCodeGenerator.hpp	1.25 07/05/17 16:06:33 JVM"
 #endif
 /*
  * Copyright 1997-2000 Sun Microsystems, Inc.  All Rights Reserved.
@@ -57,6 +57,7 @@
   }
 
   friend class StubCodeMark;
+  friend class StubCodeGenerator;
 
  public:
   static StubCodeDesc* desc_for(address pc);     // returns the code descriptor for the code containing pc or NULL
@@ -91,8 +92,13 @@
  protected:
   MacroAssembler*  _masm;
 
+  StubCodeDesc* _first_stub;
+  StubCodeDesc* _last_stub;
+
  public:
-  StubCodeGenerator(CodeBuffer* code)            { _masm = new MacroAssembler(code); }
+  StubCodeGenerator(CodeBuffer* code);
+  ~StubCodeGenerator();
+
   MacroAssembler* assembler() const              { return _masm; }
 
   virtual void stub_prolog(StubCodeDesc* cdesc); // called by StubCodeMark constructor
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)stubRoutines.cpp	1.113 07/05/05 17:06:58 JVM"
+#pragma ident "@(#)stubRoutines.cpp	1.114 07/05/17 16:06:35 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -95,6 +95,8 @@
 address StubRoutines::_arrayof_jlong_disjoint_arraycopy  = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy);
 address StubRoutines::_arrayof_oop_disjoint_arraycopy  = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy);
 
+address StubRoutines::_checkcast_arraycopy               = NULL;
+address StubRoutines::_unsafe_arraycopy                  = NULL;
 address StubRoutines::_generic_arraycopy                 = NULL;
 
 // Initialization
--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)stubRoutines.hpp	1.115 07/05/05 17:06:59 JVM"
+#pragma ident "@(#)stubRoutines.hpp	1.116 07/05/17 16:06:37 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -145,6 +145,9 @@
   static address _arrayof_jlong_disjoint_arraycopy;
   static address _arrayof_oop_disjoint_arraycopy;
 
+  // these are recommended but optional:
+  static address _checkcast_arraycopy;
+  static address _unsafe_arraycopy;
   static address _generic_arraycopy;
 
  public:
@@ -238,6 +241,8 @@
   static address arrayof_jlong_disjoint_arraycopy()  { return _arrayof_jlong_disjoint_arraycopy; }
   static address arrayof_oop_disjoint_arraycopy()    { return _arrayof_oop_disjoint_arraycopy; }
 
+  static address checkcast_arraycopy()     { return _checkcast_arraycopy; }
+  static address unsafe_arraycopy()        { return _unsafe_arraycopy; }
   static address generic_arraycopy()       { return _generic_arraycopy; }
 
   //
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)synchronizer.cpp	1.107 07/05/05 17:07:00 JVM"
+#pragma ident "@(#)synchronizer.cpp	1.108 07/05/17 16:06:41 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -104,7 +104,7 @@
 
 // ObjectWaiter serves as a "proxy" or surrogate thread.
 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
-// ParkEvent instead.  Beware, however, that the JVMTI/PI code
+// ParkEvent instead.  Beware, however, that the JVMTI code
 // knows about ObjectWaiters, so we'll have to reconcile that code.
 // See next_waiter(), first_waiter(), etc. 
 
@@ -3075,7 +3075,7 @@
   // and before going through the awkward and expensive state
   // transitions.  The following spin is strictly optional ...
   // Note that if we acquire the monitor from an initial spin
-  // we forgo posting JVMTI/PI/DI events and firing DTRACE probes.
+  // we forgo posting JVMTI events and firing DTRACE probes.
   if (Knob_SpinEarly && TrySpin (Self) > 0) { 
      assert (_owner == Self      , "invariant") ; 
      assert (_recursions == 0    , "invariant") ; 
@@ -3104,11 +3104,6 @@
     if (JvmtiExport::should_post_monitor_contended_enter()) {
       JvmtiExport::post_monitor_contended_enter(jt, this);
     }
-#ifdef JVMPI_SUPPORT
-    if (jvmpi::is_event_enabled(JVMPI_EVENT_MONITOR_CONTENDED_ENTER)) {
-      jvmpi::post_monitor_contended_enter_event(object());
-    }
-#endif // JVMPI_SUPPORT
 
     OSThreadContendState osts(Self->osthread());
     ThreadBlockInVM tbivm(jt);
@@ -3151,7 +3146,7 @@
   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 
 
   // The thread -- now the owner -- is back in vm mode. 
-  // Report the glorious news via TI,PI,DTrace and jvmstat.  
+  // Report the glorious news via TI,DTrace and jvmstat.  
   // The probe effect is non-trivial.  All the reportage occurs 
   // while we hold the monitor, increasing the length of the critical 
   // section.  Amdahl's parallel speedup law comes vividly into play.  
@@ -3166,11 +3161,6 @@
   if (JvmtiExport::should_post_monitor_contended_entered()) {
     JvmtiExport::post_monitor_contended_entered(jt, this);
   }
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_MONITOR_CONTENDED_ENTERED)) {
-    jvmpi::post_monitor_contended_entered_event(object());
-  }
-#endif // JVMPI_SUPPORT
   if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) { 
      ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ; 
   }
@@ -3225,16 +3215,11 @@
    //   See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
    Trigger->unpark() ;     
 
-   // Maintain stats and report events to JVMTI/PI/DI
+   // Maintain stats and report events to JVMTI
    if (ObjectSynchronizer::_sync_Parks != NULL) { 
       ObjectSynchronizer::_sync_Parks->inc() ; 
    }
    DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
-#ifdef JVMPI_SUPPORT
-   if (jvmpi::is_event_enabled(JVMPI_EVENT_MONITOR_CONTENDED_EXIT)) {
-      jvmpi::post_monitor_contended_exit_event(object());
-   }
-#endif // JVMPI_SUPPORT
 }
 
 
@@ -3773,16 +3758,6 @@
    SpinAcquire (&_WaitSetLock, "WaitSet - add") ; 
    AddWaiter (&node) ; 
    SpinRelease (&_WaitSetLock) ; 
-   
-#ifdef JVMPI_SUPPORT
-   if (jvmpi::is_event_enabled(JVMPI_EVENT_MONITOR_WAIT)) {
-     jvmpi::post_monitor_wait_event((oop)object(), millis);
-   }
-   elapsedTimer waitTimer;
-   if (jvmpi::is_event_enabled(JVMPI_EVENT_MONITOR_WAITED)) {
-     waitTimer.start();
-   }
-#endif // JVMPI_SUPPORT
 
    if ((SyncFlags & 4) == 0) { 
       _Responsible = NULL ; 
@@ -3918,12 +3893,6 @@
 
    jt->set_current_waiting_monitor(NULL);
  
-#ifdef JVMPI_SUPPORT
-   if (jvmpi::is_event_enabled(JVMPI_EVENT_MONITOR_WAITED)) {
-     waitTimer.stop();
-     jvmpi::post_monitor_waited_event((oop)object(), waitTimer.milliseconds());
-   }
-#endif // JVMPI_SUPPORT
    guarantee (_recursions == 0, "invariant") ; 
    _recursions = save;     // restore the old recursion count
    _waiters--;             // decrement the number of waiters
@@ -4221,11 +4190,7 @@
 // Ideally, the raw monitor implementation would be built on top of 
 // park-unpark and nothing else.
 //
-#ifdef JVMPI_SUPPORT
-// raw monitors are used mainly by JVMTI and JVMPI.  
-#else // !JVMPI_SUPPORT
 // raw monitors are used mainly by JVMTI
-#endif // JVMPI_SUPPORT
 // The raw monitor implementation borrows the ObjectMonitor structure,
 // but the operators are degenerate and extremely simple.  
 //
@@ -4376,11 +4341,7 @@
 }
 
 // Any JavaThread will enter here with state _thread_blocked
-#ifdef JVMPI_SUPPORT
-int ObjectMonitor::raw_enter(TRAPS, bool ReportEvents) {
-#else // !JVMPI_SUPPORT
 int ObjectMonitor::raw_enter(TRAPS) {
-#endif // JVMPI_SUPPORT
   TEVENT (raw_enter) ; 
   void * Contended ; 
 
@@ -4409,23 +4370,9 @@
   if (Contended == NULL) { 
      guarantee (_owner == THREAD, "invariant") ; 
      guarantee (_recursions == 0, "invariant") ;
-#ifdef JVMPI_SUPPORT
-     if (ReportEvents) { 
-        ((RawMonitor *)this)->add_to_locked_list();
-     }
-#endif // JVMPI_SUPPORT
      return OM_OK ; 
   }
 
-#ifdef JVMPI_SUPPORT
-  if (ReportEvents) {
-      Atomic::inc_ptr(&_count);
-      if (jvmpi::is_event_enabled(JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTER)) {
-        jvmpi::post_raw_monitor_contended_enter_event((RawMonitor *)this);
-      }
-  }
-#endif // JVMPI_SUPPORT
-
   THREAD->set_current_pending_monitor(this);
 
   if (!THREAD->is_Java_thread()) {
@@ -4446,11 +4393,7 @@
 
        // This thread was externally suspended
        //
-#ifdef JVMPI_SUPPORT
-       // This logic isn't needed for JVMTI or JVM/PI raw monitors,
-#else // !JVMPI_SUPPORT
        // This logic isn't needed for JVMTI raw monitors,
-#endif // JVMPI_SUPPORT
        // but doesn't hurt just in case the suspend rules change. This
 	   // logic is needed for the ObjectMonitor.wait() reentry phase.
 	   // We have reentered the contended monitor, but while we were
@@ -4470,29 +4413,12 @@
 
   THREAD->set_current_pending_monitor(NULL);
   guarantee (_recursions == 0, "invariant") ; 
-#ifdef JVMPI_SUPPORT
-  if (ReportEvents) {
-    ((RawMonitor *)this)->add_to_locked_list();
-    if (jvmpi::is_event_enabled(JVMPI_EVENT_RAW_MONITOR_CONTENDED_ENTERED)) {
-      jvmpi::post_raw_monitor_contended_entered_event((RawMonitor *)this);
-    }
-    Atomic::dec_ptr(&_count);
-  }
-#endif // JVMPI_SUPPORT
   return OM_OK;
 }
 
-#ifdef JVMPI_SUPPORT
-// Used mainly for JVMTI, JVMPI raw monitor implementation
-#else // !JVMPI_SUPPORT
 // Used mainly for JVMTI raw monitor implementation
-#endif // JVMPI_SUPPORT
 // Also used for ObjectMonitor::wait().
-#ifdef JVMPI_SUPPORT
-int ObjectMonitor::raw_exit(TRAPS, bool ReportEvents) {
-#else // !JVMPI_SUPPORT
 int ObjectMonitor::raw_exit(TRAPS) {
-#endif // JVMPI_SUPPORT
   TEVENT (raw_exit) ; 
   if (THREAD != _owner) {
     return OM_ILLEGAL_MONITOR_STATE;
@@ -4502,30 +4428,13 @@
     return OM_OK ; 
   }
 
-#ifdef JVMPI_SUPPORT
-  if (ReportEvents) {
-    ((RawMonitor *)this)->remove_from_locked_list();
-  }
-#endif // JVMPI_SUPPORT
-
   void * List = _EntryList ; 
   SimpleExit (THREAD) ; 
 
-#ifdef JVMPI_SUPPORT
-  if (ReportEvents && List != NULL) { 
-    if (jvmpi::is_event_enabled(JVMPI_EVENT_RAW_MONITOR_CONTENDED_EXIT)) {
-      jvmpi::post_raw_monitor_contended_exit_event((RawMonitor*)this);
-    }
-  }
-#endif // JVMPI_SUPPORT
   return OM_OK;
 }
 
-#ifdef JVMPI_SUPPORT
-// Used for JVMTI, JVMPI raw monitor implementation.
-#else // !JVMPI_SUPPORT
 // Used for JVMTI raw monitor implementation.
-#endif // JVMPI_SUPPORT
 // All JavaThreads will enter here with state _thread_blocked
 int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
   TEVENT (raw_wait) ; 
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)synchronizer.hpp	1.69 07/05/05 17:07:00 JVM"
+#pragma ident "@(#)synchronizer.hpp	1.70 07/05/17 16:06:44 JVM"
 #endif
 /*
  * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -147,9 +147,6 @@
 
   // JNI detach support
   static void release_monitors_owned_by_thread(TRAPS);
-#ifdef JVMPI_SUPPORT
-  // JVMPI also uses monitors_iterate
-#endif // JVMPI_SUPPORT
   static void monitors_iterate(MonitorClosure* m);
   
   // GC: we current use aggressive monitor deflation policy
--- a/hotspot/src/share/vm/runtime/thread.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)thread.cpp	1.806 07/05/05 17:06:55 JVM"
+#pragma ident "@(#)thread.cpp	1.807 07/05/17 16:06:51 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -135,9 +135,6 @@
   _jvmti_env_iteration_count = 0;
   _vm_operation_started_count = 0;
   _vm_operation_completed_count = 0;
-#ifdef JVMPI_SUPPORT
-  _rawmonitor_list = NULL;
-#endif // JVMPI_SUPPORT
   _current_pending_monitor = NULL;
   _current_pending_monitor_is_from_java = true;
   _current_waiting_monitor = NULL;
@@ -631,9 +628,6 @@
 }
 #endif /* PRODUCT */
 
-#ifdef JVMPI_SUPPORT
-// Called by the jvmpi profiler to get the sp and pc for a thread_in_Java
-#endif // JVMPI_SUPPORT
 // Called by flat profiler 
 // Callers have already called wait_for_ext_suspend_completion
 // The assertion for that is currently too complex to put here:
@@ -704,9 +698,6 @@
 void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
   if      (is_VM_thread())                  st->print("VMThread");
   else if (is_Compiler_thread())            st->print("CompilerThread");
-#ifdef JVMPI_SUPPORT
-  else if (is_jvmpi_daemon_thread())        st->print("JVMPIDaemonThread");
-#endif // JVMPI_SUPPORT
   else if (is_Java_thread())                st->print("JavaThread");
   else if (is_GC_task_thread())             st->print("GCTaskThread");
   else if (is_Watcher_thread())             st->print("WatcherThread");
@@ -1174,11 +1165,6 @@
   _thread_stat = NULL;
   _thread_stat = new ThreadStatistics();
   _blocked_on_compilation = false;
-#ifdef JVMPI_SUPPORT
-  _jvmpi_data = NULL;
-  _last_sum = 0;
-  _deferred_obj_alloc_events = NULL;
-#endif // JVMPI_SUPPORT
   _jni_active_critical = 0;
   _do_not_unlock_if_synchronized = false;
   _cached_monitor_info = NULL;
@@ -1361,11 +1347,6 @@
   if (JvmtiExport::should_post_thread_life()) {
     JvmtiExport::post_thread_start(this);
   }
-#ifdef JVMPI_SUPPORT
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_THREAD_START)) {
-    jvmpi::post_thread_start_event(this);
-  }
-#endif // JVMPI_SUPPORT
 
   // We call another function to do the rest so we are sure that the stack addresses used
   // from there will be lower than the stack base just computed
@@ -1508,13 +1489,6 @@
       JvmtiExport::post_thread_end(this);
     }
 
-#ifdef JVMPI_SUPPORT
-    // notify jvmpi
-    if (jvmpi::is_event_enabled(JVMPI_EVENT_THREAD_END)) {
-      jvmpi::post_thread_end_event(this);
-    }
-#endif // JVMPI_SUPPORT
-
     // We have notified the agents that we are exiting, before we go on,
     // we must check for a pending external suspend request and honor it
     // in order to not surprise the thread that made the suspend request.
@@ -1543,7 +1517,7 @@
     }
     // no more external suspends are allowed at this point
   } else {
-    // before_exit() has already posted JVMTI/PI THREAD_END events
+    // before_exit() has already posted JVMTI THREAD_END events
   }
   
   // Notify waiters on thread object. This has to be done after exit() is called
@@ -2602,31 +2576,6 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-//
-// Fabricate heavyweight monitor information for each lightweight monitor
-// found in this Java thread.
-//
-void JavaThread::jvmpi_fab_heavy_monitors(bool query, int* index, GrowableArray<ObjectMonitor*>* fab_list) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-
-  if (!has_last_Java_frame()) return;
-  ResourceMark rm;
-  HandleMark   hm;
-  RegisterMap reg_map(this);
-  vframe* start_vf = last_java_vframe(&reg_map);
-  int count = 0;
-  for (vframe* f = start_vf; f != NULL; f = f->sender()) {
-    if (count++ >= MaxJavaStackTraceDepth) return;  // stack too deep
-
-    if (!f->is_java_frame()) continue;
-    javaVFrame* jvf = javaVFrame::cast(f);
-    jvf->jvmpi_fab_heavy_monitors(query, index, count, fab_list);
-  }
-}
-#endif // JVMPI_SUPPORT
-
-
 void JavaThread::print_stack_on(outputStream* st) {  
   if (!has_last_Java_frame()) return;
   ResourceMark rm;
@@ -3079,26 +3028,6 @@
     create_vm_init_libraries();
   }
 
-#ifdef JVMPI_SUPPORT
-  // Issue class load, thread start and object allocation events for all
-  // preloaded classes if there is an interested agent. Also, lift the
-  // event posting restriction whether there is an agent attached now or
-  // not; an agent may attach later when no restrictions apply.
-  //
-  // The JVM/PI event posting restriction can cause deadlock if there
-  // is also a JVM/DI or JVM/TI agent attached and the agent's VM_INIT
-  // event handler causes a JVM/PI event to be posted. To prevent the
-  // deadlock risk, JvmtiExport::post_vm_initialized() must be called
-  // after the JVM/PI event posting restriction is lifted.
-  jvmpi::post_vm_initialization_events();
-
-  if (jvmpi::is_event_enabled(JVMPI_EVENT_JVM_INIT_DONE)) {
-    // Tell the agent that the VM is ready and events requested (e.g. heap dump)
-    // from this point onwards will contains IDs that are all known to the agent
-    jvmpi::post_vm_initialized_event();
-  }
-#endif // JVMPI_SUPPORT
-
   // Notify JVMTI agents that VM initialization is complete - nop if no agents.
   JvmtiExport::post_vm_initialized();
 
@@ -3319,11 +3248,7 @@
 //      > run VM level shutdown hooks (they are registered through JVM_OnExit(),
 //        currently the only user of this mechanism is File.deleteOnExit())
 //      > stop flat profiler, StatSampler, watcher thread, CMS threads,
-#ifdef JVMPI_SUPPORT
-//        post thread end and vm death events to JVMTI/PI, disable JVMPI,
-#else // !JVMPI_SUPPORT
 //        post thread end and vm death events to JVMTI,
-#endif // JVMPI_SUPPORT
 //        stop signal thread
 //   + Call JavaThread::exit(), it will:
 //      > release JNI handle blocks, remove stack guard pages
@@ -3332,11 +3257,7 @@
 //   + Stop VM thread, it will bring the remaining VM to a safepoint and stop
 //     the compiler threads at safepoint
 //     <-- do not use anything that could get blocked by Safepoint -->
-#ifdef JVMPI_SUPPORT
-//   + Disable tracing at JNI/JVM/JVMPI barriers
-#else // !JVMPI_SUPPORT
 //   + Disable tracing at JNI/JVM barriers
-#endif // JVMPI_SUPPORT
 //   + Set _vm_exited flag for threads that are still running native code
 //   + Delete this thread
 //   + Call exit_globals()
@@ -3404,18 +3325,11 @@
   // simply kill or suspend them, as it is inherently deadlock-prone.
 
 #ifndef PRODUCT
-#ifdef JVMPI_SUPPORT
-  // disable function tracing at JNI/JVM/JVMPI barriers
-#else // !JVMPI_SUPPORT
   // disable function tracing at JNI/JVM barriers
-#endif // JVMPI_SUPPORT
   TraceHPI = false;
   TraceJNICalls = false;
   TraceJVMCalls = false;
   TraceRuntimeCalls = false;
-#ifdef JVMPI_SUPPORT
-  TraceJVMPI = false;
-#endif // JVMPI_SUPPORT
 #endif
 
   VM_Exit::set_vm_exited();
@@ -3595,39 +3509,6 @@
 }
 
 
-#ifdef JVMPI_SUPPORT
-//
-// Fabricate heavyweight monitor information for each lightweight monitor
-// found in every Java thread.
-//
-GrowableArray<ObjectMonitor*>* Threads::jvmpi_fab_heavy_monitors() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-
-  // In the first pass we have to count the number of monitors needed
-  int index = 0;
-  ALL_JAVA_THREADS(p) {
-    if (p->is_Compiler_thread()) continue;
-
-    p->jvmpi_fab_heavy_monitors(true, &index, NULL);
-  }
-
-  GrowableArray<ObjectMonitor*>* result = new GrowableArray<ObjectMonitor*>(index, index, NULL);
-  //
-  // In the second pass we actually get the data. We can't call append()
-  // down in javaVFrame::jvmpi_fab_heavy_monitors() because there are other
-  // uses of ResourceMark between here and there.
-  //
-  index = 0;
-  ALL_JAVA_THREADS(q) {
-    if (q->is_Compiler_thread()) continue;
-
-    q->jvmpi_fab_heavy_monitors(false, &index, result);
-  }
-  return result;
-}
-#endif // JVMPI_SUPPORT
-
-
 // Get count Java threads that are waiting to enter the specified monitor.
 GrowableArray<JavaThread*>* Threads::get_pending_threads(int count,
   address monitor, bool doLock) {
--- a/hotspot/src/share/vm/runtime/thread.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)thread.hpp	1.451 07/05/05 17:06:58 JVM"
+#pragma ident "@(#)thread.hpp	1.452 07/05/17 16:06:58 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -70,10 +70,6 @@
   void  operator delete(void* p);
  private:
 
-#ifdef JVMPI_SUPPORT
-  RawMonitor* _rawmonitor_list;                  // list of locked raw monitors (JVMPI support)
-#endif // JVMPI_SUPPORT
-
   // ***************************************************************
   // Suspend and resume support
   // ***************************************************************
@@ -85,13 +81,8 @@
   // remains.
   //
   // External suspend/resume requests come from JVM_SuspendThread,
-#ifdef JVMPI_SUPPORT
-  // JVM_ResumeThread, JVMTI SuspendThread, JVMTI ResumeThread,
-  // JVM/PI SuspendThread, and finally JVM/PI ResumeThread. External
-#else // !JVMPI_SUPPORT
   // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI
   // ResumeThread. External
-#endif // JVMPI_SUPPORT
   // suspend requests cause _external_suspend to be set and external
   // resume requests cause _external_suspend to be cleared.
   // External suspend requests do not nest on top of other external
@@ -133,11 +124,7 @@
   // Notes: 
   // 1. The suspend/resume logic no longer uses ThreadState in OSThread
   // but we still update its value to keep other part of the system (mainly 
-#ifdef JVMPI_SUPPORT
-  // JVMPI and JVMTI) happy. ThreadState is legacy code (see notes in 
-#else // !JVMPI_SUPPORT
   // JVMTI) happy. ThreadState is legacy code (see notes in 
-#endif // JVMPI_SUPPORT
   // osThread.hpp).
   // 
   // 2. It would be more natural if set_external_suspend() is private and
@@ -267,9 +254,6 @@
   // Generally will be true only of VM thread and parallel GC WorkGang
   // threads.
   virtual bool is_GC_task_thread() const             { return false; }
-#ifdef JVMPI_SUPPORT
-  virtual bool is_jvmpi_daemon_thread() const        { return false; }
-#endif // JVMPI_SUPPORT
   virtual bool is_Watcher_thread() const             { return false; }
   virtual bool is_ConcurrentGC_thread() const        { return false; }
 
@@ -368,12 +352,6 @@
   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 
-#ifdef JVMPI_SUPPORT
-  // JVMPI support
-  RawMonitor* rawmonitor_list() const            { return _rawmonitor_list; }
-  void        set_rawmonitor_list(RawMonitor* r) { _rawmonitor_list = r;    }
-#endif // JVMPI_SUPPORT
-
   // For tracking the heavyweight monitor the thread is pending on.
   ObjectMonitor* current_pending_monitor() {
     return _current_pending_monitor;
@@ -770,13 +748,6 @@
   // support for JNI critical regions
   jint    _jni_active_critical;                  // count of entries into JNI critical region
 
-#ifdef JVMPI_SUPPORT
-  void* _jvmpi_data;     // support for JVM/PI {Get,Set}ThreadLocalStorage()
-  uintptr_t  _last_sum;  // support for JVM/PI ThreadHasRun()
-                         // support for deferred OBJECT_ALLOC events
-  GrowableArray<DeferredObjAllocEvent*>* _deferred_obj_alloc_events;
-#endif // JVMPI_SUPPORT
-
   // For deadlock detection.
   int _depth_first_number;
 
@@ -1272,11 +1243,6 @@
   // Used for security checks
   klassOop security_get_caller_class(int depth);  
 
-#ifdef JVMPI_SUPPORT
-  // Fabricate heavyweight monitors for any lightweight monitors that this thread owns
-  void jvmpi_fab_heavy_monitors(bool query, int* index, GrowableArray<ObjectMonitor*>* fab_list);
-#endif // JVMPI_SUPPORT
-
   // Print stack trace in external format
   void print_stack_on(outputStream* st);
   void print_stack() { print_stack_on(tty); }
@@ -1299,21 +1265,6 @@
  public:
    bool profile_last_Java_frame(frame* fr);
 
-#ifdef JVMPI_SUPPORT
- public:
-  // support for JVM/PI {Get,Set}ThreadLocalStorage()
-  void* jvmpi_data() const                       { return _jvmpi_data; }
-  void set_jvmpi_data(void* data)                { _jvmpi_data = data; }
-
-  // support for JVM/PI ThreadHasRun()
-  uintptr_t last_sum() const                     { return _last_sum; }
-  void set_last_sum(uintptr_t s)                 { _last_sum = s; }
-
-  // support for deferred OBJECT_ALLOC events
-  GrowableArray<DeferredObjAllocEvent *>* deferred_obj_alloc_events() { return _deferred_obj_alloc_events; }
-  void set_deferred_obj_alloc_events(GrowableArray<DeferredObjAllocEvent *>* a) { _deferred_obj_alloc_events = a; }
-#endif // JVMPI_SUPPORT
-
  private:
    ThreadProfiler* _thread_profiler;
  private:
@@ -1515,23 +1466,6 @@
   return cur_sp > low_addr ? cur_sp - low_addr : 0;
 }
 
-#ifdef JVMPI_SUPPORT
-// JVMPIDaemonThread are created when a profiling agent calls jvmpi::create_system_thread
-extern "C" {
-typedef void (*JVMPIDaemonFunction)(void*);
-}
-
-class JVMPIDaemonThread: public JavaThread {  
-  friend class VMStructs;
- private:
-  JVMPIDaemonFunction _func;
- public:
-  JVMPIDaemonThread(ThreadFunction entry_point, JVMPIDaemonFunction f) : JavaThread(entry_point), _func(f) {}
-  JVMPIDaemonFunction function() const           { return _func; }
-  virtual bool is_jvmpi_daemon_thread() const    { return true;  }
-};
-#endif // JVMPI_SUPPORT
-
 // A JavaThread for low memory detection support
 class LowMemoryDetectorThread : public JavaThread {
   friend class VMStructs;
@@ -1655,11 +1589,6 @@
   }
   static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen);
 
-#ifdef JVMPI_SUPPORT
-  // Fabricate heavyweight monitor info for each lightweight monitor.
-  static GrowableArray<ObjectMonitor*>* jvmpi_fab_heavy_monitors();
-#endif // JVMPI_SUPPORT
-
   // Get Java threads that are waiting to enter a monitor. If doLock
   // is true, then Threads_lock is grabbed as needed. Otherwise, the
   // VM needs to be at a safepoint.
--- a/hotspot/src/share/vm/runtime/verificationType.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,139 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)verificationType.cpp	1.16 07/05/05 17:07:01 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_verificationType.cpp.incl"
-
-VerificationType VerificationType::from_tag(u1 tag) {
-  switch (tag) {
-    case ITEM_Top:     return bogus_type();
-    case ITEM_Integer: return integer_type();
-    case ITEM_Float:   return float_type();
-    case ITEM_Double:  return double_type();
-    case ITEM_Long:    return long_type();
-    case ITEM_Null:    return null_type();
-    default:
-      ShouldNotReachHere();
-      return bogus_type();
-  }
-}
-
-bool VerificationType::is_reference_assignable_from(
-    const VerificationType& from, instanceKlassHandle context, TRAPS) const {
-  if (from.is_null()) {
-    // null is assignable to any reference
-    return true;
-  } else if (is_null()) {
-    return false;
-  } else if (name() == from.name()) {
-    return true;
-  } else if (is_object()) {
-    // We need check the class hierarchy to check assignability
-    if (name() == vmSymbols::java_lang_Object()) {
-      // any object or array is assignable to java.lang.Object 
-      return true;
-    }
-    klassOop this_class = SystemDictionary::resolve_or_fail(
-        name_handle(), Handle(THREAD, context->class_loader()), 
-        Handle(THREAD, context->protection_domain()), true, CHECK_false);
-    if (this_class->klass_part()->is_interface()) {
-      // We treat interfaces as java.lang.Object, including 
-      // java.lang.Cloneable and java.io.Serializable
-      return true;
-    } else if (from.is_object()) {
-      klassOop from_class = SystemDictionary::resolve_or_fail(
-          from.name_handle(), Handle(THREAD, context->class_loader()), 
-          Handle(THREAD, context->protection_domain()), true, CHECK_false);
-      return instanceKlass::cast(from_class)->is_subclass_of(this_class);
-    }
-  } else if (is_array() && from.is_array()) {
-    VerificationType comp_this = get_component(CHECK_false);
-    VerificationType comp_from = from.get_component(CHECK_false);
-    return comp_this.is_assignable_from(comp_from, context, CHECK_false);
-  }
-  return false;
-}
-
-VerificationType VerificationType::get_component(TRAPS) const {
-  assert(is_array() && name()->utf8_length() >= 2, "Must be a valid array");
-  symbolOop component;
-  switch (name()->byte_at(1)) {
-    case 'Z': return VerificationType(Boolean);
-    case 'B': return VerificationType(Byte);
-    case 'C': return VerificationType(Char);
-    case 'S': return VerificationType(Short);
-    case 'I': return VerificationType(Integer);
-    case 'J': return VerificationType(Long);
-    case 'F': return VerificationType(Float);
-    case 'D': return VerificationType(Double);
-    case '[': 
-      component = SymbolTable::lookup(
-        name(), 1, name()->utf8_length(), 
-        CHECK_(VerificationType::bogus_type()));
-      return VerificationType::reference_type(component);
-    case 'L': 
-      component = SymbolTable::lookup(
-        name(), 2, name()->utf8_length() - 1, 
-        CHECK_(VerificationType::bogus_type()));
-      return VerificationType::reference_type(component);
-    default:
-      ShouldNotReachHere();
-      return VerificationType::bogus_type();
-  }
-}
-
-#ifndef PRODUCT
-
-void VerificationType::print_on(outputStream* st) const {
-  switch (_u._data) {
-    case Bogus:            st->print(" bogus "); break;
-    case Category1:        st->print(" category1 "); break;
-    case Category2:        st->print(" category2 "); break;
-    case Category2_2nd:    st->print(" category2_2nd "); break;
-    case Boolean:          st->print(" boolean "); break;
-    case Byte:             st->print(" byte "); break;
-    case Short:            st->print(" short "); break;
-    case Char:             st->print(" char "); break;
-    case Integer:          st->print(" integer "); break;
-    case Float:            st->print(" float "); break;
-    case Long:             st->print(" long "); break;
-    case Double:           st->print(" double "); break;
-    case Long_2nd:         st->print(" long_2nd "); break;
-    case Double_2nd:       st->print(" double_2nd "); break;
-    case Null:             st->print(" null "); break;
-    default:
-      if (is_uninitialized_this()) {
-        st->print(" uninitializedThis "); 
-      } else if (is_uninitialized()) {
-        st->print(" uninitialized %d ", bci()); 
-      } else {
-        st->print(" class %s ", name()->as_klass_external_name());
-      }
-  }
-}
-
-#endif
--- a/hotspot/src/share/vm/runtime/verificationType.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,308 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)verificationType.hpp	1.17 07/05/05 17:07:01 JVM"
-#endif
-/*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-enum {
-  // As specifed in the JVM spec
-  ITEM_Top = 0, 
-  ITEM_Integer = 1, 
-  ITEM_Float = 2, 
-  ITEM_Double = 3, 
-  ITEM_Long = 4, 
-  ITEM_Null = 5, 
-  ITEM_UninitializedThis = 6,
-  ITEM_Object = 7,
-  ITEM_Uninitialized = 8,
-  ITEM_Bogus = (uint)-1  
-};
-
-class VerificationType VALUE_OBJ_CLASS_SPEC {
-  private:
-    // Least significant bits of _handle are always 0, so we use these as 
-    // the indicator that the _handle is valid.  Otherwise, the _data field
-    // contains encoded data (as specified below).  Should the VM change 
-    // and the lower bits on oops aren't 0, the assert in the constructor
-    // will catch this and we'll have to add a descriminator tag to this 
-    // structure.
-    union {
-      symbolOop* _handle;
-      uintptr_t _data; 
-    } _u;
-
-    enum {
-      // These rest are not found in classfiles, but used by the verifier
-      ITEM_Boolean = 9, ITEM_Byte, ITEM_Short, ITEM_Char,
-      ITEM_Long_2nd, ITEM_Double_2nd
-    };
-
-    // Enum for the _data field
-    enum { 
-      // Bottom two bits determine if the type is a reference, primitive, 
-      // uninitialized or a query-type.
-      TypeMask           = 0x00000003, 
-
-      // Topmost types encoding
-      Reference          = 0x0,        // _handle contains the name
-      Primitive          = 0x1,        // see below for primitive list
-      Uninitialized      = 0x2,        // 0x00ffff00 contains bci
-      TypeQuery          = 0x3,        // Meta-types used for category testing
-
-      // Utility flags 
-      ReferenceFlag      = 0x00,       // For reference query types
-      Category1Flag      = 0x01,       // One-word values
-      Category2Flag      = 0x02,       // First word of a two-word value
-      Category2_2ndFlag  = 0x04,       // Second word of a two-word value
-
-      // special reference values
-      Null               = 0x00000000, // A reference with a 0 handle is null
-
-      // Primitives categories (the second byte determines the category)
-      Category1          = (Category1Flag     << 1 * BitsPerByte) | Primitive,
-      Category2          = (Category2Flag     << 1 * BitsPerByte) | Primitive,
-      Category2_2nd      = (Category2_2ndFlag << 1 * BitsPerByte) | Primitive,
-
-      // Primitive values (type descriminator stored in most-signifcant bytes)
-      Bogus              = (ITEM_Bogus      << 2 * BitsPerByte) | Category1,
-      Boolean            = (ITEM_Boolean    << 2 * BitsPerByte) | Category1,
-      Byte               = (ITEM_Byte       << 2 * BitsPerByte) | Category1,
-      Short              = (ITEM_Short      << 2 * BitsPerByte) | Category1,
-      Char               = (ITEM_Char       << 2 * BitsPerByte) | Category1,
-      Integer            = (ITEM_Integer    << 2 * BitsPerByte) | Category1,
-      Float              = (ITEM_Float      << 2 * BitsPerByte) | Category1,
-      Long               = (ITEM_Long       << 2 * BitsPerByte) | Category2,
-      Double             = (ITEM_Double     << 2 * BitsPerByte) | Category2,
-      Long_2nd           = (ITEM_Long_2nd   << 2 * BitsPerByte) | Category2_2nd,
-      Double_2nd         = (ITEM_Double_2nd << 2 * BitsPerByte) | Category2_2nd,
-
-      // Used by Uninitialized (second and third bytes hold the bci)
-      BciMask            = 0xffff << 1 * BitsPerByte,
-      BciForThis         = ((u2)-1),   // A bci of -1 is an Unintialized-This
-
-      // Query values
-      ReferenceQuery     = (ReferenceFlag     << 1 * BitsPerByte) | TypeQuery,
-      Category1Query     = (Category1Flag     << 1 * BitsPerByte) | TypeQuery,
-      Category2Query     = (Category2Flag     << 1 * BitsPerByte) | TypeQuery,
-      Category2_2ndQuery = (Category2_2ndFlag << 1 * BitsPerByte) | TypeQuery
-    };
-
-  VerificationType(uintptr_t raw_data) {
-    _u._data = raw_data;
-  }
-
- public:
-
-  VerificationType() { *this = bogus_type(); }
-
-  // Create verification types
-  static VerificationType bogus_type() { return VerificationType(Bogus); }
-  static VerificationType null_type() { return VerificationType(Null); }
-  static VerificationType integer_type() { return VerificationType(Integer); }
-  static VerificationType float_type() { return VerificationType(Float); }
-  static VerificationType long_type() { return VerificationType(Long); }
-  static VerificationType long2_type() { return VerificationType(Long_2nd); }
-  static VerificationType double_type() { return VerificationType(Double); }
-  static VerificationType boolean_type() { return VerificationType(Boolean); }
-  static VerificationType byte_type() { return VerificationType(Byte); }
-  static VerificationType char_type() { return VerificationType(Char); }
-  static VerificationType short_type() { return VerificationType(Short); }
-  static VerificationType double2_type() 
-    { return VerificationType(Double_2nd); }
-
-  // "check" types are used for queries.  A "check" type is not assignable
-  // to anything, but the specified types are assignable to a "check".  For 
-  // example, any category1 primitive is assignable to category1_check and 
-  // any reference is assignable to reference_check.
-  static VerificationType reference_check() 
-    { return VerificationType(ReferenceQuery); }
-  static VerificationType category1_check() 
-    { return VerificationType(Category1Query); }
-  static VerificationType category2_check() 
-    { return VerificationType(Category2Query); }
-  static VerificationType category2_2nd_check() 
-    { return VerificationType(Category2_2ndQuery); }
-
-  // For reference types, store the actual oop* handle 
-  static VerificationType reference_type(symbolHandle sh) { 
-      assert(((uintptr_t)sh.raw_value() & 0x3) == 0, "Oops must be aligned");
-      // If the above assert fails in the future because oop* isn't aligned, 
-      // then this type encoding system will have to change to have a tag value
-      // to descriminate between oops and primitives.
-      return VerificationType((uintptr_t)((symbolOop*)sh.raw_value())); 
-  }
-  static VerificationType reference_type(symbolOop s, TRAPS) 
-    { return reference_type(symbolHandle(THREAD, s)); }
-
-  static VerificationType uninitialized_type(u2 bci) 
-    { return VerificationType(bci << 1 * BitsPerByte | Uninitialized); }
-  static VerificationType uninitialized_this_type() 
-    { return uninitialized_type(BciForThis); }
-
-  // Create based on u1 read from classfile
-  static VerificationType from_tag(u1 tag);
-
-  bool is_bogus() const     { return (_u._data == Bogus); }
-  bool is_null() const      { return (_u._data == Null); }
-  bool is_boolean() const   { return (_u._data == Boolean); }
-  bool is_byte() const      { return (_u._data == Byte); }
-  bool is_char() const      { return (_u._data == Char); }
-  bool is_short() const     { return (_u._data == Short); }
-  bool is_integer() const   { return (_u._data == Integer); }
-  bool is_long() const      { return (_u._data == Long); }
-  bool is_float() const     { return (_u._data == Float); }
-  bool is_double() const    { return (_u._data == Double); }
-  bool is_long2() const     { return (_u._data == Long_2nd); }
-  bool is_double2() const   { return (_u._data == Double_2nd); }
-  bool is_reference() const { return ((_u._data & TypeMask) == Reference); }
-  bool is_category1() const { 
-    // This should return true for all one-word types, which are category1 
-    // primitives, and references (including uninitialized refs).  Though 
-    // the 'query' types should technically return 'false' here, if we 
-    // allow this to return true, we can perform the test using only
-    // 2 operations rather than 8 (3 masks, 3 compares and 2 logical 'ands').
-    // Since noone should call this on a query type anyway, this is ok.
-    assert(!is_check(), "Must not be a check type (wrong value returned)");
-    return ((_u._data & Category1) != Primitive);
-    // should only return false if it's a primitive, and the category1 flag
-    // is not set.
-  }
-  bool is_category2() const { return ((_u._data & Category2) == Category2); }
-  bool is_category2_2nd() const { 
-    return ((_u._data & Category2_2nd) == Category2_2nd); 
-  }
-  bool is_reference_check() const { return _u._data == ReferenceQuery; }
-  bool is_category1_check() const { return _u._data == Category1Query; }
-  bool is_category2_check() const { return _u._data == Category2Query; }
-  bool is_category2_2nd_check() const { return _u._data == Category2_2ndQuery; }
-  bool is_check() const { return (_u._data & TypeQuery) == TypeQuery; }
-
-  bool is_x_array(char sig) const { 
-    return is_null() || (is_array() && (name()->byte_at(1) == sig));
-  }
-  bool is_int_array() const { return is_x_array('I'); }
-  bool is_byte_array() const { return is_x_array('B'); }
-  bool is_bool_array() const { return is_x_array('Z'); }
-  bool is_char_array() const { return is_x_array('C'); }
-  bool is_short_array() const { return is_x_array('S'); }
-  bool is_long_array() const { return is_x_array('J'); }
-  bool is_float_array() const { return is_x_array('F'); }
-  bool is_double_array() const { return is_x_array('D'); }
-  bool is_object_array() const { return is_x_array('L'); }
-  bool is_array_array() const { return is_x_array('['); }
-  bool is_reference_array() const 
-    { return is_object_array() || is_array_array(); }
-  bool is_object() const 
-    { return (is_reference() && !is_null() && name()->utf8_length() >= 1 && 
-              name()->byte_at(0) != '['); }
-  bool is_array() const 
-    { return (is_reference() && !is_null() && name()->utf8_length() >= 2 && 
-              name()->byte_at(0) == '['); }
-  bool is_uninitialized() const 
-    { return ((_u._data & Uninitialized) == Uninitialized); }
-  bool is_uninitialized_this() const 
-    { return is_uninitialized() && bci() == BciForThis; }
-
-  VerificationType to_category2_2nd() const {
-    assert(is_category2(), "Must be a double word");
-    return VerificationType(is_long() ? Long_2nd : Double_2nd);
-  }
-
-  u2 bci() const {
-    assert(is_uninitialized(), "Must be uninitialized type");
-    return ((_u._data & BciMask) >> 1 * BitsPerByte);
-  }
-
-  symbolHandle name_handle() const { 
-    assert(is_reference() && !is_null(), "Must be a non-null reference");
-    return symbolHandle(_u._handle, true); 
-  }
-  symbolOop name() const { 
-    assert(is_reference() && !is_null(), "Must be a non-null reference");
-    return *(_u._handle); 
-  }
-
-  bool equals(const VerificationType& t) const {
-    return (_u._data == t._u._data ||
-      (is_reference() && t.is_reference() && !is_null() && !t.is_null() && 
-       name() == t.name()));
-  }
-
-  bool operator ==(const VerificationType& t) const {
-    return equals(t);
-  }
-
-  bool operator !=(const VerificationType& t) const {
-    return !equals(t);
-  }
-
-  // The whole point of this type system - check to see if one type
-  // is assignable to another.  Returns true if one can assign 'from' to 
-  // this.
-  bool is_assignable_from(
-      const VerificationType& from, instanceKlassHandle context, TRAPS) const {
-    if (equals(from) || is_bogus()) {
-      return true;
-    } else {
-      switch(_u._data) {
-        case Category1Query:
-          return from.is_category1();
-        case Category2Query:
-          return from.is_category2();
-        case Category2_2ndQuery:
-          return from.is_category2_2nd();
-        case ReferenceQuery:
-          return from.is_reference() || from.is_uninitialized();
-        case Boolean:
-        case Byte:
-        case Char:
-        case Short:
-          // An int can be assigned to boolean, byte, char or short values.
-          return from.is_integer(); 
-        default: 
-          if (is_reference() && from.is_reference()) {
-            return is_reference_assignable_from(from, context, CHECK_false);
-          } else {
-            return false;
-          }
-      }
-    }
-  }
-
-  VerificationType get_component(TRAPS) const;
-
-  int dimensions() const {
-    assert(is_array(), "Must be an array");
-    int index = 0;
-    while (name()->byte_at(index++) == '[');
-    return index;
-  }
-
-  void print_on(outputStream* st) const PRODUCT_RETURN;
-
- private:
-
-  bool is_reference_assignable_from(
-    const VerificationType&, instanceKlassHandle, TRAPS) const;
-};
--- a/hotspot/src/share/vm/runtime/verifier.cpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2196 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)verifier.cpp	1.112 07/05/05 17:07:02 JVM"
-#endif
-/*
- * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_verifier.cpp.incl"
-
-// Access to external entry for VerifyClassCodes - old byte code verifier
-
-extern "C" {
-  typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint);
-  typedef jboolean (*verify_byte_codes_fn_new_t)(JNIEnv *, jclass, char *, jint, jint);
-}
-
-static void* volatile _verify_byte_codes_fn = NULL;
-
-static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
-
-static void* verify_byte_codes_fn() {
-  if (_verify_byte_codes_fn == NULL) {
-    void *lib_handle = os::native_java_library();
-    void *func = hpi::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
-    OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
-    if (func == NULL) {
-      OrderAccess::release_store(&_is_new_verify_byte_codes_fn, false);
-      func = hpi::dll_lookup(lib_handle, "VerifyClassCodes");
-      OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
-    }
-  }
-  return (void*)_verify_byte_codes_fn;
-}
-
-
-// Methods in Verifier
-
-bool Verifier::should_verify_for(oop class_loader) {
-  return class_loader == NULL ? 
-    BytecodeVerificationLocal : BytecodeVerificationRemote;
-}
-
-bool Verifier::relax_verify_for(oop loader) {
-  bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
-  bool need_verify = 
-    // verifyAll
-    (BytecodeVerificationLocal && BytecodeVerificationRemote) || 
-    // verifyRemote
-    (!BytecodeVerificationLocal && BytecodeVerificationRemote && !trusted); 
-  return !need_verify;
-}
-
-bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, TRAPS) {
-  ResourceMark rm(THREAD);
-  HandleMark hm;
-
-  symbolHandle exception_name;
-  const size_t message_buffer_len = klass->name()->utf8_length() + 1024;
-  char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len);
-
-  const char* klassName = klass->external_name();
-
-  // If the class should be verified, first see if we can use the split
-  // verifier.  If not, or if verification fails and FailOverToOldVerifier
-  // is set, then call the inference verifier.
-  if (is_eligible_for_verification(klass)) {
-    if (TraceClassInitialization) {
-      tty->print_cr("Start class verification for: %s", klassName);
-    }
-    if (UseSplitVerifier && 
-        klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
-        ClassVerifier split_verifier(
-          klass, message_buffer, message_buffer_len, THREAD);
-        split_verifier.verify_class(THREAD);
-        exception_name = split_verifier.result();
-      if (FailOverToOldVerifier && !HAS_PENDING_EXCEPTION && 
-          (exception_name == vmSymbols::java_lang_VerifyError() ||
-           exception_name == vmSymbols::java_lang_ClassFormatError())) {
-        if (TraceClassInitialization) {
-          tty->print_cr(
-            "Fail over class verification to old verifier for: %s", klassName);
-        }
-        exception_name = inference_verify(
-          klass, message_buffer, message_buffer_len, THREAD);
-      }
-    } else {
-      exception_name = inference_verify(
-          klass, message_buffer, message_buffer_len, THREAD);
-    }
-
-    if (TraceClassInitialization) {
-      if (HAS_PENDING_EXCEPTION) {
-        tty->print("Verification for %s has", klassName);
-        tty->print_cr(" exception pending %s ",
-          instanceKlass::cast(PENDING_EXCEPTION->klass())->external_name());
-      } else if (!exception_name.is_null()) {
-        tty->print_cr("Verification for %s failed", klassName);
-      }
-      tty->print_cr("End class verification for: %s", klassName);
-    }
-  }
-
-  if (HAS_PENDING_EXCEPTION) {
-    return false; // use the existing exception
-  } else if (exception_name.is_null()) {
-    return true; // verifcation succeeded
-  } else { // VerifyError or ClassFormatError to be created and thrown
-    ResourceMark rm(THREAD);
-    instanceKlassHandle kls = 
-      SystemDictionary::resolve_or_fail(exception_name, true, CHECK_false);
-    while (!kls.is_null()) {
-      if (kls == klass) {
-        // If the class being verified is the exception we're creating 
-        // or one of it's superclasses, we're in trouble and are going 
-        // to infinitely recurse when we try to initialize the exception.
-        // So bail out here by throwing the preallocated VM error.
-        THROW_OOP_(Universe::virtual_machine_error_instance(), false);
-      }
-      kls = kls->super();
-    }
-    message_buffer[message_buffer_len - 1] = '\0'; // just to be sure
-    THROW_MSG_(exception_name, message_buffer, false);
-  }
-}
-
-bool Verifier::is_eligible_for_verification(instanceKlassHandle klass) {
-  symbolOop name = klass->name();
-  klassOop refl_magic_klass = SystemDictionary::reflect_magic_klass();
-
-  return (should_verify_for(klass->class_loader()) && 
-    // return if the class is a bootstrapping class
-    // We need to skip the following four for bootstraping
-    name != vmSymbols::java_lang_Object() &&
-    name != vmSymbols::java_lang_Class() &&
-    name != vmSymbols::java_lang_String() &&
-    name != vmSymbols::java_lang_Throwable() &&
-
-    // Can not verify the bytecodes for shared classes because they have
-    // already been rewritten to contain constant pool cache indices,
-    // which the verifier can't understand.
-    // Shared classes shouldn't have stackmaps either.
-    !klass()->is_shared() &&
-
-    // As of the fix for 4486457 we disable verification for all of the
-    // dynamically-generated bytecodes associated with the 1.4
-    // reflection implementation, not just those associated with
-    // sun/reflect/SerializationConstructorAccessor.
-    // NOTE: this is called too early in the bootstrapping process to be
-    // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
-    (refl_magic_klass == NULL || 
-     !klass->is_subtype_of(refl_magic_klass) ||
-     VerifyReflectionBytecodes)
-  );
-}
-
-symbolHandle Verifier::inference_verify(
-    instanceKlassHandle klass, char* message, size_t message_len, TRAPS) {
-  JavaThread* thread = (JavaThread*)THREAD;
-  JNIEnv *env = thread->jni_environment();
-
-  void* verify_func = verify_byte_codes_fn();
-
-  if (verify_func == NULL) {
-    jio_snprintf(message, message_len, "Could not link verifier");
-    return vmSymbols::java_lang_VerifyError();
-  }
-
-  ResourceMark rm(THREAD);
-  if (ClassVerifier::_verify_verbose) {
-    tty->print_cr("Verifying class %s with old format", klass->external_name());
-  }
-
-  jclass cls = (jclass) JNIHandles::make_local(env, klass->java_mirror());
-  jint result;
-
-  {
-    HandleMark hm(thread);
-    ThreadToNativeFromVM ttn(thread);
-    // ThreadToNativeFromVM takes care of changing thread_state, so safepoint
-    // code knows that we have left the VM
-
-    if (_is_new_verify_byte_codes_fn) {
-      verify_byte_codes_fn_new_t func =
-        CAST_TO_FN_PTR(verify_byte_codes_fn_new_t, verify_func);
-      result = (*func)(env, cls, message, (int)message_len,
-          klass->major_version());
-    } else {
-      verify_byte_codes_fn_t func =
-        CAST_TO_FN_PTR(verify_byte_codes_fn_t, verify_func);
-      result = (*func)(env, cls, message, (int)message_len);
-    }
-  }
-
-  JNIHandles::destroy_local(cls);
-
-  // These numbers are chosen so that VerifyClassCodes interface doesn't need
-  // to be changed (still return jboolean (unsigned char)), and result is
-  // 1 when verification is passed. 
-  symbolHandle nh(NULL);
-  if (result == 0) {
-    return vmSymbols::java_lang_VerifyError();
-  } else if (result == 1) {
-    return nh; // verified.
-  } else if (result == 2) {
-    THROW_MSG_(vmSymbols::java_lang_OutOfMemoryError(), message, nh);
-  } else if (result == 3) {
-    return vmSymbols::java_lang_ClassFormatError();
-  } else {
-    ShouldNotReachHere();
-    return nh;
-  }
-}
-
-// Methods in ClassVerifier
-
-bool ClassVerifier::_verify_verbose = false;
-
-ClassVerifier::ClassVerifier(
-    instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS)
-    : _thread(THREAD), _exception_type(symbolHandle()), _message(msg), 
-      _message_buffer_len(msg_len), _klass(klass) {
-  _this_type = VerificationType::reference_type(klass->name());
-}
-
-ClassVerifier::~ClassVerifier() {
-}
-
-void ClassVerifier::verify_class(TRAPS) {
-  if (_verify_verbose) {
-    tty->print_cr("Verifying class %s with new format", 
-      _klass->external_name());
-  }
-
-  objArrayHandle methods(THREAD, _klass->methods());
-  int num_methods = methods->length();
-
-  for (int index = 0; index < num_methods; index++) {
-    methodOop m = (methodOop)methods->obj_at(index);
-    if (m->is_native() || m->is_abstract()) {
-      // If m is native or abstract, skip it.  It is checked in class file
-      // parser that methods do not override a final method.
-      continue;
-    }
-    verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
-  }
-}
-
-void ClassVerifier::verify_method(methodHandle m, TRAPS) {
-  ResourceMark rm(THREAD);
-  _method = m;   // initialize _method
-  if (_verify_verbose) {
-    tty->print_cr("Verifying method %s", m->name_and_sig_as_C_string());
-  }
-
-  const char* bad_type_msg = "Bad type on operand stack in %s";
-
-  int32_t max_stack = m->max_stack();
-  int32_t max_locals = m->max_locals();
-  constantPoolHandle cp(THREAD, m->constants());
-
-  if (!SignatureVerifier::is_valid_method_signature(m->signature())) {
-    class_format_error("Invalid method signature");
-    return;
-  }
-
-  // Initial stack map frame: offset is 0, stack is initially empty.
-  StackMapFrame current_frame(max_locals, max_stack, this);
-  // Set initial locals
-  VerificationType return_type = current_frame.set_locals_from_arg(
-    m, current_type(), CHECK_VERIFY(this));
-
-  int32_t stackmap_index = 0; // index to the stackmap array
-
-  u4 code_length = m->code_size();
-
-  // Scan the bytecode and map each instruction's start offset to a number.
-  char* code_data = generate_code_data(m, code_length, CHECK_VERIFY(this));
-
-  int ex_min = code_length;
-  int ex_max = -1;
-  // Look through each item on the exception table. Each of the fields must refer
-  // to a legal instruction.
-  verify_exception_handler_table(
-    code_length, code_data, ex_min, ex_max, CHECK_VERIFY(this));
-
-  // Look through each entry on the local variable table and make sure
-  // its range of code array offsets is valid. (4169817)
-  if (m->has_localvariable_table()) {
-    verify_local_variable_table(code_length, code_data, CHECK_VERIFY(this));
-  }
-
-  typeArrayHandle stackmap_data(THREAD, m->stackmap_data());
-  StackMapStream stream(stackmap_data);
-  StackMapReader reader(this, &stream, code_data, code_length, THREAD);
-  StackMapTable stackmap_table(&reader, &current_frame, max_locals, max_stack,
-                               code_data, code_length, CHECK_VERIFY(this));
-
-  if (_verify_verbose) {
-    stackmap_table.print();
-  }
-
-  RawBytecodeStream bcs(m);
-
-  // Scan the byte code linearly from the start to the end
-  bool no_control_flow = false; // Set to true when there is no direct control
-                                // flow from current instruction to the next
-                                // instruction in sequence
-  Bytecodes::Code opcode;
-  while (!bcs.is_last_bytecode()) {
-    opcode = bcs.raw_next();
-    u2 bci = bcs.bci();
-
-    // Set current frame's offset to bci
-    current_frame.set_offset(bci);
-
-    // Make sure every offset in stackmap table point to the beginning to
-    // an instruction. Match current_frame to stackmap_table entry with
-    // the same offset if exists.
-    stackmap_index = verify_stackmap_table(
-      stackmap_index, bci, &current_frame, &stackmap_table,
-      no_control_flow, CHECK_VERIFY(this));
-
-    bool this_uninit = false;  // Set to true when invokespecial <init> initialized 'this'
-
-    // Merge with the next instruction
-    {
-      u2 index;
-      int target;
-      VerificationType type, type2;
-      VerificationType atype;
-
-#ifndef PRODUCT
-      if (_verify_verbose) {
-        current_frame.print();
-        tty->print_cr("offset = %d,  opcode = %s", bci, Bytecodes::name(opcode));
-      }
-#endif
-
-      // Make sure wide instruction is in correct format
-      if (bcs.is_wide()) {
-        if (opcode != Bytecodes::_iinc   && opcode != Bytecodes::_iload  &&
-            opcode != Bytecodes::_aload  && opcode != Bytecodes::_lload  &&
-            opcode != Bytecodes::_istore && opcode != Bytecodes::_astore &&
-            opcode != Bytecodes::_lstore && opcode != Bytecodes::_fload  &&
-            opcode != Bytecodes::_dload  && opcode != Bytecodes::_fstore &&
-            opcode != Bytecodes::_dstore) {
-          verify_error(bci, "Bad wide instruction"); 
-          return;
-        }
-      }
-
-      switch (opcode) {
-        case Bytecodes::_nop :
-          no_control_flow = false; break;
-        case Bytecodes::_aconst_null :
-          current_frame.push_stack(
-            VerificationType::null_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_iconst_m1 :
-        case Bytecodes::_iconst_0 :
-        case Bytecodes::_iconst_1 :
-        case Bytecodes::_iconst_2 :
-        case Bytecodes::_iconst_3 :
-        case Bytecodes::_iconst_4 :
-        case Bytecodes::_iconst_5 :
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_lconst_0 :
-        case Bytecodes::_lconst_1 :
-          current_frame.push_stack_2(
-            VerificationType::long_type(), 
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_fconst_0 :
-        case Bytecodes::_fconst_1 :
-        case Bytecodes::_fconst_2 :
-          current_frame.push_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dconst_0 :
-        case Bytecodes::_dconst_1 :
-          current_frame.push_stack_2(
-            VerificationType::double_type(), 
-            VerificationType::double2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_sipush :
-        case Bytecodes::_bipush :
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_ldc :
-          verify_ldc(
-            opcode, bcs.get_index(), &current_frame, 
-            cp, bci, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_ldc_w :
-        case Bytecodes::_ldc2_w :
-          verify_ldc(
-            opcode, bcs.get_index_big(), &current_frame, 
-            cp, bci, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_iload :
-          verify_iload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_iload_0 :
-        case Bytecodes::_iload_1 :
-        case Bytecodes::_iload_2 :
-        case Bytecodes::_iload_3 :
-          index = opcode - Bytecodes::_iload_0;
-          verify_iload(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_lload :
-          verify_lload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_lload_0 :
-        case Bytecodes::_lload_1 :
-        case Bytecodes::_lload_2 :
-        case Bytecodes::_lload_3 :
-          index = opcode - Bytecodes::_lload_0;
-          verify_lload(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_fload :
-          verify_fload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_fload_0 :
-        case Bytecodes::_fload_1 :
-        case Bytecodes::_fload_2 :
-        case Bytecodes::_fload_3 :
-          index = opcode - Bytecodes::_fload_0;
-          verify_fload(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dload :
-          verify_dload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dload_0 :
-        case Bytecodes::_dload_1 :
-        case Bytecodes::_dload_2 :
-        case Bytecodes::_dload_3 :
-          index = opcode - Bytecodes::_dload_0;
-          verify_dload(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_aload :
-          verify_aload(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_aload_0 :
-        case Bytecodes::_aload_1 :
-        case Bytecodes::_aload_2 :
-        case Bytecodes::_aload_3 :
-          index = opcode - Bytecodes::_aload_0;
-          verify_aload(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_iaload :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_int_array()) {
-            verify_error(bci, bad_type_msg, "iaload");
-            return;               
-          }
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_baload :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_bool_array() && !atype.is_byte_array()) {
-            verify_error(bci, bad_type_msg, "baload");
-            return;
-          }
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_caload :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_char_array()) {
-            verify_error(bci, bad_type_msg, "caload");
-            return;
-          }
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_saload :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_short_array()) {
-            verify_error(bci, bad_type_msg, "saload");
-            return;
-          }
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_laload :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_long_array()) {
-            verify_error(bci, bad_type_msg, "laload");
-            return;
-          }
-          current_frame.push_stack_2(
-            VerificationType::long_type(), 
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_faload :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_float_array()) {
-            verify_error(bci, bad_type_msg, "faload");
-            return;
-          }
-          current_frame.push_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_daload :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_double_array()) {
-            verify_error(bci, bad_type_msg, "daload");
-            return;
-          }
-          current_frame.push_stack_2(
-            VerificationType::double_type(), 
-            VerificationType::double2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_aaload : {
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_reference_array()) {
-            verify_error(bci, bad_type_msg, "aaload");
-            return;
-          }
-          if (atype.is_null()) {
-            current_frame.push_stack(
-              VerificationType::null_type(), CHECK_VERIFY(this));
-          } else {
-            VerificationType component = 
-              atype.get_component(CHECK_VERIFY(this));
-            current_frame.push_stack(component, CHECK_VERIFY(this));
-          }
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_istore :
-          verify_istore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_istore_0 :
-        case Bytecodes::_istore_1 :
-        case Bytecodes::_istore_2 :
-        case Bytecodes::_istore_3 :
-          index = opcode - Bytecodes::_istore_0;
-          verify_istore(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_lstore :
-          verify_lstore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_lstore_0 :
-        case Bytecodes::_lstore_1 :
-        case Bytecodes::_lstore_2 :
-        case Bytecodes::_lstore_3 :
-          index = opcode - Bytecodes::_lstore_0;
-          verify_lstore(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_fstore :
-          verify_fstore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_fstore_0 :
-        case Bytecodes::_fstore_1 :
-        case Bytecodes::_fstore_2 :
-        case Bytecodes::_fstore_3 :
-          index = opcode - Bytecodes::_fstore_0;
-          verify_fstore(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dstore :
-          verify_dstore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dstore_0 :
-        case Bytecodes::_dstore_1 :
-        case Bytecodes::_dstore_2 :
-        case Bytecodes::_dstore_3 :
-          index = opcode - Bytecodes::_dstore_0;
-          verify_dstore(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_astore :
-          verify_astore(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_astore_0 :
-        case Bytecodes::_astore_1 :
-        case Bytecodes::_astore_2 :
-        case Bytecodes::_astore_3 :
-          index = opcode - Bytecodes::_astore_0;
-          verify_astore(index, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_iastore :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          type2 = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_int_array()) {
-            verify_error(bci, bad_type_msg, "iastore");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_bastore :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          type2 = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_bool_array() && !atype.is_byte_array()) {
-            verify_error(bci, bad_type_msg, "bastore");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_castore :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_char_array()) {
-            verify_error(bci, bad_type_msg, "castore");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_sastore :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_short_array()) {
-            verify_error(bci, bad_type_msg, "sastore");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_lastore :
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_long_array()) {
-            verify_error(bci, bad_type_msg, "lastore");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_fastore :
-          current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack
-            (VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_float_array()) {
-            verify_error(bci, bad_type_msg, "fastore");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_dastore :
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!atype.is_double_array()) {
-            verify_error(bci, bad_type_msg, "dastore");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_aastore :
-          type = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          type2 = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          atype = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          // more type-checking is done at runtime
-          if (!atype.is_reference_array()) {
-            verify_error(bci, bad_type_msg, "aastore");
-            return;
-          }
-          // 4938384: relaxed constraint in JVMS 3nd edition.
-          no_control_flow = false; break;
-        case Bytecodes::_pop :
-          current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_pop2 :
-          type = current_frame.pop_stack(CHECK_VERIFY(this));
-          if (type.is_category1()) {
-            current_frame.pop_stack(
-              VerificationType::category1_check(), CHECK_VERIFY(this));
-          } else if (type.is_category2_2nd()) {
-            current_frame.pop_stack(
-              VerificationType::category2_check(), CHECK_VERIFY(this));
-          } else {
-            verify_error(bci, bad_type_msg, "pop2");
-            return;
-          }
-          no_control_flow = false; break;
-        case Bytecodes::_dup :
-          type = current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dup_x1 :
-          type = current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          type2 = current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dup_x2 :
-        {
-          VerificationType type3;
-          type = current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          type2 = current_frame.pop_stack(CHECK_VERIFY(this));
-          if (type2.is_category1()) {
-            type3 = current_frame.pop_stack(
-              VerificationType::category1_check(), CHECK_VERIFY(this));
-          } else if (type2.is_category2_2nd()) {
-            type3 = current_frame.pop_stack(
-              VerificationType::category2_check(), CHECK_VERIFY(this));
-          } else {
-            verify_error(bci, bad_type_msg, "dup_x2");
-            return;
-          }
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          current_frame.push_stack(type3, CHECK_VERIFY(this));
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_dup2 :
-          type = current_frame.pop_stack(CHECK_VERIFY(this));
-          if (type.is_category1()) {
-            type2 = current_frame.pop_stack(
-              VerificationType::category1_check(), CHECK_VERIFY(this));
-          } else if (type.is_category2_2nd()) {
-            type2 = current_frame.pop_stack(
-              VerificationType::category2_check(), CHECK_VERIFY(this));
-          } else {
-            verify_error(bci, bad_type_msg, "dup2");
-            return;
-          }
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dup2_x1 :
-        {
-          VerificationType type3;
-          type = current_frame.pop_stack(CHECK_VERIFY(this));
-          if (type.is_category1()) {
-            type2 = current_frame.pop_stack(
-              VerificationType::category1_check(), CHECK_VERIFY(this));
-          } else if(type.is_category2_2nd()) {
-            type2 = current_frame.pop_stack
-              (VerificationType::category2_check(), CHECK_VERIFY(this));
-          } else {
-            verify_error(bci, bad_type_msg, "dup2_x1");
-            return;
-          }
-          type3 = current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          current_frame.push_stack(type3, CHECK_VERIFY(this));
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_dup2_x2 :
-        {
-          VerificationType type3, type4;
-          type = current_frame.pop_stack(CHECK_VERIFY(this));
-          if (type.is_category1()) {
-            type2 = current_frame.pop_stack(
-              VerificationType::category1_check(), CHECK_VERIFY(this));
-          } else if (type.is_category2_2nd()) {
-            type2 = current_frame.pop_stack(
-              VerificationType::category2_check(), CHECK_VERIFY(this));
-          } else {
-            verify_error(bci, bad_type_msg, "dup2_x2");
-            return;
-          }
-          type3 = current_frame.pop_stack(CHECK_VERIFY(this));
-          if (type3.is_category1()) {
-            type4 = current_frame.pop_stack(
-              VerificationType::category1_check(), CHECK_VERIFY(this));
-          } else if (type3.is_category2_2nd()) {
-            type4 = current_frame.pop_stack(
-              VerificationType::category2_check(), CHECK_VERIFY(this));
-          } else {
-            verify_error(bci, bad_type_msg, "dup2_x2");
-            return;
-          }
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          current_frame.push_stack(type4, CHECK_VERIFY(this));
-          current_frame.push_stack(type3, CHECK_VERIFY(this));
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_swap :
-          type = current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          type2 = current_frame.pop_stack(
-            VerificationType::category1_check(), CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          current_frame.push_stack(type2, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_iadd :
-        case Bytecodes::_isub :
-        case Bytecodes::_imul :
-        case Bytecodes::_idiv :
-        case Bytecodes::_irem :
-        case Bytecodes::_ishl :
-        case Bytecodes::_ishr :
-        case Bytecodes::_iushr :
-        case Bytecodes::_ior :
-        case Bytecodes::_ixor :
-        case Bytecodes::_iand :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          // fall through
-        case Bytecodes::_ineg :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_ladd :
-        case Bytecodes::_lsub :
-        case Bytecodes::_lmul :
-        case Bytecodes::_ldiv :
-        case Bytecodes::_lrem :
-        case Bytecodes::_land :
-        case Bytecodes::_lor :
-        case Bytecodes::_lxor :
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          // fall through
-        case Bytecodes::_lneg :
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::long_type(), 
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_lshl :
-        case Bytecodes::_lshr :
-        case Bytecodes::_lushr :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::long_type(), 
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_fadd :
-        case Bytecodes::_fsub :
-        case Bytecodes::_fmul :
-        case Bytecodes::_fdiv :
-        case Bytecodes::_frem :
-          current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          // fall through
-        case Bytecodes::_fneg :
-          current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dadd :
-        case Bytecodes::_dsub :
-        case Bytecodes::_dmul :
-        case Bytecodes::_ddiv :
-        case Bytecodes::_drem :
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          // fall through
-        case Bytecodes::_dneg :
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::double_type(), 
-            VerificationType::double2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_iinc :
-          verify_iinc(bcs.get_index(), &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_i2l :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::long_type(), 
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-       case Bytecodes::_l2i :
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_i2f :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_i2d :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::double_type(), 
-            VerificationType::double2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_l2f :
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_l2d :
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::double_type(), 
-            VerificationType::double2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_f2i :
-          current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_f2l :
-          current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::long_type(), 
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_f2d :
-          current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::double_type(), 
-            VerificationType::double2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_d2i :
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_d2l :
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          current_frame.push_stack_2(
-            VerificationType::long_type(), 
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_d2f :
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_i2b :
-        case Bytecodes::_i2c :
-        case Bytecodes::_i2s :
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_lcmp :
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack_2(
-            VerificationType::long2_type(), 
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_fcmpl :
-        case Bytecodes::_fcmpg :
-          current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack( 
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_dcmpl :
-        case Bytecodes::_dcmpg :
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          current_frame.pop_stack_2(
-            VerificationType::double2_type(), 
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_if_icmpeq:
-        case Bytecodes::_if_icmpne:
-        case Bytecodes::_if_icmplt:
-        case Bytecodes::_if_icmpge:
-        case Bytecodes::_if_icmpgt:
-        case Bytecodes::_if_icmple:
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          // fall through
-        case Bytecodes::_ifeq:
-        case Bytecodes::_ifne:
-        case Bytecodes::_iflt:
-        case Bytecodes::_ifge:
-        case Bytecodes::_ifgt:
-        case Bytecodes::_ifle:
-          current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          target = bcs.dest();
-          stackmap_table.check_jump_target(
-            &current_frame, target, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_if_acmpeq :
-        case Bytecodes::_if_acmpne :
-          current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          // fall through
-        case Bytecodes::_ifnull :
-        case Bytecodes::_ifnonnull :
-          current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          target = bcs.dest();
-          stackmap_table.check_jump_target
-            (&current_frame, target, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_goto :
-          target = bcs.dest();
-          stackmap_table.check_jump_target(
-            &current_frame, target, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_goto_w :
-          target = bcs.dest_w();
-          stackmap_table.check_jump_target(
-            &current_frame, target, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_tableswitch :
-        case Bytecodes::_lookupswitch :
-          verify_switch(
-            &bcs, code_length, code_data, &current_frame, 
-            &stackmap_table, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_ireturn :
-          type = current_frame.pop_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_lreturn :
-          type2 = current_frame.pop_stack(
-            VerificationType::long2_type(), CHECK_VERIFY(this));
-          type = current_frame.pop_stack(
-            VerificationType::long_type(), CHECK_VERIFY(this));
-          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_freturn :
-          type = current_frame.pop_stack(
-            VerificationType::float_type(), CHECK_VERIFY(this));
-          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_dreturn :
-          type2 = current_frame.pop_stack(
-            VerificationType::double2_type(),  CHECK_VERIFY(this));
-          type = current_frame.pop_stack(
-            VerificationType::double_type(), CHECK_VERIFY(this));
-          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_areturn :
-          type = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          verify_return_value(return_type, type, bci, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        case Bytecodes::_return :
-          if (return_type != VerificationType::bogus_type()) {
-            verify_error(bci, "Method expects no return value");
-            return;
-          }
-          // Make sure "this" has been initialized if current method is an
-          // <init>
-          if (_method->name() == vmSymbols::object_initializer_name() && 
-              current_frame.flag_this_uninit()) {
-            verify_error(bci,
-              "Constructor must call super() or this() before return");
-            return;
-          }
-          no_control_flow = true; break;
-        case Bytecodes::_getstatic :
-        case Bytecodes::_putstatic :
-        case Bytecodes::_getfield :
-        case Bytecodes::_putfield :
-          verify_field_instructions(
-            &bcs, &current_frame, cp, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_invokevirtual :
-        case Bytecodes::_invokespecial :
-        case Bytecodes::_invokestatic :
-          verify_invoke_instructions(
-            &bcs, code_length, &current_frame,
-            &this_uninit, return_type, cp, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_invokeinterface :
-          verify_invoke_instructions(
-            &bcs, code_length, &current_frame,
-            &this_uninit, return_type, cp, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_new :
-        {
-          index = bcs.get_index_big();
-          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
-          VerificationType new_class_type = 
-            cp_index_to_type(index, cp, CHECK_VERIFY(this));
-          if (!new_class_type.is_object()) {
-            verify_error(bci, "Illegal new instruction");
-            return;
-          }
-          type = VerificationType::uninitialized_type(bci);
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_newarray :
-          type = get_newarray_type(bcs.get_index(), bci, CHECK_VERIFY(this));
-          current_frame.pop_stack(
-            VerificationType::integer_type(),  CHECK_VERIFY(this));
-          current_frame.push_stack(type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_anewarray :
-          verify_anewarray(
-            bcs.get_index_big(), cp, &current_frame, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_arraylength :
-          type = current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          if (!type.is_array()) {
-            verify_error(bci, bad_type_msg, "arraylength");
-          }
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_checkcast :
-        {
-          index = bcs.get_index_big();
-          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
-          current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          VerificationType klass_type = cp_index_to_type(
-            index, cp, CHECK_VERIFY(this));
-          current_frame.push_stack(klass_type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_instanceof : {
-          index = bcs.get_index_big();
-          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
-          current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          current_frame.push_stack(
-            VerificationType::integer_type(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_monitorenter :
-        case Bytecodes::_monitorexit :
-          current_frame.pop_stack(
-            VerificationType::reference_check(), CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        case Bytecodes::_multianewarray :
-        {
-          index = bcs.get_index_big();
-          u2 dim = *(bcs.bcp()+3);
-          verify_cp_class_type(index, cp, CHECK_VERIFY(this));
-          VerificationType new_array_type = 
-            cp_index_to_type(index, cp, CHECK_VERIFY(this));
-          if (!new_array_type.is_array()) {
-            verify_error(bci,
-              "Illegal constant pool index in multianewarray instruction");
-            return;  
-          }
-          if (dim < 1 || new_array_type.dimensions() < dim) {
-            verify_error(bci,
-              "Illegal dimension in multianewarray instruction");
-            return;
-          }
-          for (int i = 0; i < dim; i++) {
-            current_frame.pop_stack(
-              VerificationType::integer_type(), CHECK_VERIFY(this));
-          }
-          current_frame.push_stack(new_array_type, CHECK_VERIFY(this));
-          no_control_flow = false; break;
-        }
-        case Bytecodes::_athrow :
-          type = VerificationType::reference_type(
-            vmSymbols::java_lang_Throwable());
-          current_frame.pop_stack(type, CHECK_VERIFY(this));
-          no_control_flow = true; break;
-        default:
-          // We only need to check the valid bytecodes in class file.
-          // And jsr and ret are not in the new class file format in JDK1.5.
-          verify_error(bci, "Bad instruction");
-          no_control_flow = false;
-          return;
-      }  // end switch
-    }  // end Merge with the next instruction
-
-    // Look for possible jump target in exception handlers and see if it
-    // matches current_frame
-    if (bci >= ex_min && bci < ex_max) {
-      verify_exception_handler_targets(
-        bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
-    }
-  } // end while
-
-  // Make sure that control flow does not fall through end of the method
-  if (!no_control_flow) {
-    verify_error(code_length, "Control flow falls through code end");
-    return;
-  }
-}
-
-char* ClassVerifier::generate_code_data(methodHandle m, u4 code_length, TRAPS) {
-  char* code_data = NEW_RESOURCE_ARRAY(char, code_length);
-  memset(code_data, 0, sizeof(char) * code_length);
-  RawBytecodeStream bcs(m);
-
-  while (!bcs.is_last_bytecode()) {
-    if (bcs.raw_next() != Bytecodes::_illegal) {
-      int bci = bcs.bci();
-      if (bcs.code() == Bytecodes::_new) {
-        code_data[bci] = NEW_OFFSET;
-      } else {
-        code_data[bci] = BYTECODE_OFFSET;
-      }
-    } else {
-      verify_error(bcs.bci(), "Bad instruction");
-      return NULL;
-    }
-  }
-
-  return code_data;
-}
-
-void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS) {
-  typeArrayHandle exhandlers (THREAD, _method->exception_table());
-  constantPoolHandle cp (THREAD, _method->constants());
-
-  if (exhandlers() != NULL) {
-    for(int i = 0; i < exhandlers->length();) {
-      u2 start_pc = exhandlers->int_at(i++);
-      u2 end_pc = exhandlers->int_at(i++);
-      u2 handler_pc = exhandlers->int_at(i++);
-      if (start_pc >= code_length || code_data[start_pc] == 0) {
-        class_format_error("Illegal exception table start_pc %d", start_pc);
-        return;
-      }
-      if (end_pc != code_length) {   // special case: end_pc == code_length
-        if (end_pc > code_length || code_data[end_pc] == 0) {
-          class_format_error("Illegal exception table end_pc %d", end_pc); 
-          return;
-        }
-      }
-      if (handler_pc >= code_length || code_data[handler_pc] == 0) {
-        class_format_error("Illegal exception table handler_pc %d", handler_pc);
-        return;
-      }
-      int catch_type_index = exhandlers->int_at(i++);
-      if (catch_type_index != 0) {
-        VerificationType catch_type = cp_index_to_type(
-          catch_type_index, cp, CHECK_VERIFY(this));
-        VerificationType throwable = 
-          VerificationType::reference_type(vmSymbols::java_lang_Throwable());
-        bool is_subclass = throwable.is_assignable_from(
-          catch_type, current_class(), CHECK_VERIFY(this));
-        if (!is_subclass) {
-          // 4286534: should throw VerifyError according to recent spec change
-          verify_error(
-            "Catch type is not a subclass of Throwable in handler %d",
-            handler_pc);
-          return;
-        }
-      }
-      if (start_pc < min) min = start_pc;
-      if (end_pc > max) max = end_pc;
-    }
-  }
-}
-
-void ClassVerifier::verify_local_variable_table(u4 code_length, char* code_data, TRAPS) {
-  int localvariable_table_length = _method()->localvariable_table_length();
-  if (localvariable_table_length > 0) {
-    LocalVariableTableElement* table = _method()->localvariable_table_start();
-    for (int i = 0; i < localvariable_table_length; i++) {
-      u2 start_bci = table[i].start_bci;
-      u2 length = table[i].length;
-
-      if (start_bci >= code_length || code_data[start_bci] == 0) {
-        class_format_error(
-          "Illegal local variable table start_pc %d", start_bci);
-        return;
-      }
-      u4 end_bci = (u4)(start_bci + length);
-      if (end_bci != code_length) {
-        if (end_bci >= code_length || code_data[end_bci] == 0) {
-          class_format_error( "Illegal local variable table length %d", length);
-          return;
-        }
-      }
-    }
-  }
-}
-
-u2 ClassVerifier::verify_stackmap_table(u2 stackmap_index, u2 bci,
-                                        StackMapFrame* current_frame,
-                                        StackMapTable* stackmap_table,
-                                        bool no_control_flow, TRAPS) {
-  if (stackmap_index < stackmap_table->get_frame_count()) {
-    u2 this_offset = stackmap_table->get_offset(stackmap_index);
-    if (no_control_flow && this_offset > bci) {
-      verify_error(bci, "Expecting a stack map frame");
-      return 0;
-    }
-    if (this_offset == bci) {
-      // See if current stack map can be assigned to the frame in table.
-      // current_frame is the stackmap frame got from the last instruction.
-      // If matched, current_frame will be updated by this method.
-      bool match = stackmap_table->match_stackmap(
-        current_frame, this_offset, stackmap_index, 
-        !no_control_flow, true, CHECK_VERIFY_(this, 0));
-      if (!match) {
-        // report type error
-        verify_error(bci, "Instruction type does not match stack map");
-        return 0;
-      }
-      stackmap_index++;
-    } else if (this_offset < bci) {
-      // current_offset should have met this_offset.
-      class_format_error("Bad stack map offset %d", this_offset);
-      return 0;
-    }
-  } else if (no_control_flow) {
-    verify_error(bci, "Expecting a stack map frame");
-    return 0;
-  }
-  return stackmap_index;
-}
-
-void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, StackMapFrame* current_frame,
-                                                     StackMapTable* stackmap_table, TRAPS) {
-  constantPoolHandle cp (THREAD, _method->constants());
-  typeArrayHandle exhandlers (THREAD, _method->exception_table());
-  if (exhandlers() != NULL) {
-    for(int i = 0; i < exhandlers->length();) {
-      u2 start_pc = exhandlers->int_at(i++);
-      u2 end_pc = exhandlers->int_at(i++);
-      u2 handler_pc = exhandlers->int_at(i++);
-      int catch_type_index = exhandlers->int_at(i++);
-      if(bci >= start_pc && bci < end_pc) {
-        u1 flags = current_frame->flags();
-        if (this_uninit) {  flags |= FLAG_THIS_UNINIT; }
-
-        ResourceMark rm(THREAD);
-        StackMapFrame* new_frame = current_frame->frame_in_exception_handler(flags);
-        if (catch_type_index != 0) {
-          // We know that this index refers to a subclass of Throwable
-          VerificationType catch_type = cp_index_to_type(
-            catch_type_index, cp, CHECK_VERIFY(this));
-          new_frame->push_stack(catch_type, CHECK_VERIFY(this));
-        } else {
-          VerificationType throwable = 
-            VerificationType::reference_type(vmSymbols::java_lang_Throwable());
-          new_frame->push_stack(throwable, CHECK_VERIFY(this));
-        }
-        bool match = stackmap_table->match_stackmap(
-          new_frame, handler_pc, true, false, CHECK_VERIFY(this));
-        if (!match) {
-          verify_error(bci,
-            "Stack map does not match the one at exception handler %d", 
-            handler_pc);
-          return;
-        }
-      }
-    }
-  }
-}
-
-void ClassVerifier::verify_cp_index(constantPoolHandle cp, int index, TRAPS) {
-  int nconstants = cp->length();
-  if ((index <= 0) || (index >= nconstants)) {
-    verify_error("Illegal constant pool index %d in class %s", 
-      index, instanceKlass::cast(cp->pool_holder())->external_name());
-    return;
-  }
-}
-
-void ClassVerifier::verify_cp_type(
-    int index, constantPoolHandle cp, unsigned int types, TRAPS) {
-
-  // In some situations, bytecode rewriting may occur while we're verifying.
-  // In this case, a constant pool cache exists and some indices refer to that
-  // instead.  Get the original index for the tag check
-  constantPoolCacheOop cache = cp->cache();
-  if (cache != NULL &&
-       ((types == (1 <<  JVM_CONSTANT_InterfaceMethodref)) || 
-        (types == (1 <<  JVM_CONSTANT_Methodref)) || 
-        (types == (1 <<  JVM_CONSTANT_Fieldref)))) {
-    assert((index >= 0) && (index < cache->length()), 
-      "Must be a legal index into the cp cache");
-    index = cache->entry_at(index)->constant_pool_index();
-  }
-
-  verify_cp_index(cp, index, CHECK_VERIFY(this));
-  unsigned int tag = cp->tag_at(index).value();
-  if ((types & (1 << tag)) == 0) {
-    verify_error(
-      "Illegal type at constant pool entry %d in class %s", 
-      index, instanceKlass::cast(cp->pool_holder())->external_name());
-    return;
-  }
-}
-
-void ClassVerifier::verify_cp_class_type(
-    int index, constantPoolHandle cp, TRAPS) {
-  verify_cp_index(cp, index, CHECK_VERIFY(this));
-  constantTag tag = cp->tag_at(index);
-  if (!tag.is_klass() && !tag.is_unresolved_klass()) {
-    verify_error("Illegal type at constant pool entry %d in class %s", 
-      index, instanceKlass::cast(cp->pool_holder())->external_name());
-    return;
-  }
-}
-
-void ClassVerifier::format_error_message(
-    const char* fmt, int offset, va_list va) {
-  ResourceMark rm(_thread);
-  stringStream message(_message, _message_buffer_len);
-  message.vprint(fmt, va);
-  if (!_method.is_null()) {
-    message.print(" in method %s", _method->name_and_sig_as_C_string());
-  }
-  if (offset != -1) {
-    message.print(" at offset %d", offset);
-  }
-}
-
-void ClassVerifier::verify_error(u2 offset, const char* fmt, ...) {
-  _exception_type = vmSymbols::java_lang_VerifyError();
-  va_list va;
-  va_start(va, fmt);
-  format_error_message(fmt, offset, va);
-  va_end(va);
-}
-
-void ClassVerifier::verify_error(const char* fmt, ...) {
-  _exception_type = vmSymbols::java_lang_VerifyError();
-  va_list va;
-  va_start(va, fmt);
-  format_error_message(fmt, -1, va);
-  va_end(va);
-}
-
-void ClassVerifier::class_format_error(const char* msg, ...) {
-  _exception_type = vmSymbols::java_lang_ClassFormatError();
-  va_list va;
-  va_start(va, msg);
-  format_error_message(msg, -1, va);
-  va_end(va);
-}
-
-klassOop ClassVerifier::load_class(symbolHandle name, TRAPS) {
-  // Get current loader and protection domain first.
-  oop loader = current_class()->class_loader();
-  oop protection_domain = current_class()->protection_domain();
-
-  return SystemDictionary::resolve_or_fail(
-    name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
-    true, CHECK_NULL);
-}
-
-bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
-                                        klassOop target_class,
-                                        symbolOop field_name,
-                                        symbolOop field_sig,
-                                        bool is_method) {
-  No_Safepoint_Verifier nosafepoint;
-
-  // If target class isn't a super class of this class, we don't worry about this case
-  if (!this_class->is_subclass_of(target_class)) {
-    return false;
-  }
-  // Check if the specified method or field is protected
-  instanceKlass* target_instance = instanceKlass::cast(target_class);
-  fieldDescriptor fd;
-  if (is_method) {
-    methodOop m = target_instance->uncached_lookup_method(field_name, field_sig);
-    if (m != NULL && m->is_protected()) {
-      if (!this_class->is_same_class_package(m->method_holder())) {
-        return true;
-      }
-    }
-  } else {
-    klassOop member_klass = target_instance->find_field(field_name, field_sig, &fd);
-    if(member_klass != NULL && fd.is_protected()) {
-      if (!this_class->is_same_class_package(member_klass)) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-void ClassVerifier::verify_ldc(
-    int opcode, u2 index, StackMapFrame *current_frame,
-     constantPoolHandle cp, u2 bci, TRAPS) {
-  verify_cp_index(cp, index, CHECK_VERIFY(this));
-  constantTag tag = cp->tag_at(index);
-  unsigned int types;
-  if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) {
-    if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) {
-      types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float)
-            | (1 << JVM_CONSTANT_String)  | (1 << JVM_CONSTANT_Class);
-      verify_cp_type(index, cp, types, CHECK_VERIFY(this));
-    }
-  } else {
-    assert(opcode == Bytecodes::_ldc2_w, "must be ldc2_w");
-    types = (1 << JVM_CONSTANT_Double) | (1 << JVM_CONSTANT_Long);
-    verify_cp_type(index, cp, types, CHECK_VERIFY(this));
-  }
-  if (tag.is_string() || tag.is_unresolved_string()) {
-    current_frame->push_stack(
-      VerificationType::reference_type(
-        vmSymbols::java_lang_String()), CHECK_VERIFY(this));
-  } else if (tag.is_klass() || tag.is_unresolved_klass()) {
-    current_frame->push_stack(
-      VerificationType::reference_type(
-        vmSymbols::java_lang_Class()), CHECK_VERIFY(this));
-  } else if (tag.is_int()) {
-    current_frame->push_stack(
-      VerificationType::integer_type(), CHECK_VERIFY(this));
-  } else if (tag.is_float()) {
-    current_frame->push_stack(
-      VerificationType::float_type(), CHECK_VERIFY(this));
-  } else if (tag.is_double()) {
-    current_frame->push_stack_2(
-      VerificationType::double_type(), 
-      VerificationType::double2_type(), CHECK_VERIFY(this));
-  } else if (tag.is_long()) {
-    current_frame->push_stack_2(
-      VerificationType::long_type(), 
-      VerificationType::long2_type(), CHECK_VERIFY(this));
-  } else {
-    verify_error(bci, "Invalid index in ldc");
-    return;
-  }
-}
-
-void ClassVerifier::verify_switch(
-    RawBytecodeStream* bcs, u4 code_length, char* code_data,
-    StackMapFrame* current_frame, StackMapTable* stackmap_table, TRAPS) {
-  int bci = bcs->bci();
-  address bcp = bcs->bcp();
-  address aligned_bcp = (address) round_to((intptr_t)(bcp + 1), jintSize);
-
-  // 4639449 & 4647081: padding bytes must be 0
-  u2 padding_offset = 1;
-  while ((bcp + padding_offset) < aligned_bcp) {
-    if(*(bcp + padding_offset) != 0) {
-      verify_error(bci, "Nonzero padding byte in lookswitch or tableswitch");
-      return;
-    }
-    padding_offset++;
-  }
-  int default_offset = (int) Bytes::get_Java_u4(aligned_bcp);
-  int keys, delta;
-  current_frame->pop_stack(
-    VerificationType::integer_type(), CHECK_VERIFY(this));
-  if (bcs->code() == Bytecodes::_tableswitch) {
-    jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
-    jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
-    if (low > high) {
-      verify_error(bci,
-        "low must be less than or equal to high in tableswitch");
-      return;
-    }
-    keys = high - low + 1;
-    if (keys < 0) {
-      verify_error(bci, "too many keys in tableswitch");
-      return;
-    }
-    delta = 1;
-  } else {
-    keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
-    if (keys < 0) {
-      verify_error(bci, "number of keys in lookupswitch less than 0");
-      return;
-    }
-    delta = 2;
-    // Make sure that the lookupswitch items are sorted
-    for (int i = 0; i < (keys - 1); i++) {
-      jint this_key = Bytes::get_Java_u4(aligned_bcp + (2+2*i)*jintSize);
-      jint next_key = Bytes::get_Java_u4(aligned_bcp + (2+2*i+2)*jintSize);
-      if (this_key >= next_key) {
-        verify_error(bci, "Bad lookupswitch instruction");
-        return;
-      }
-    }
-  }
-  int target = bci + default_offset;
-  stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
-  for (int i = 0; i < keys; i++) {
-    target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
-    stackmap_table->check_jump_target(
-      current_frame, target, CHECK_VERIFY(this));
-  }
-}
-
-bool ClassVerifier::name_in_supers(
-    symbolOop ref_name, instanceKlassHandle current) {
-  klassOop super = current->super();
-  while (super != NULL) {
-    if (super->klass_part()->name() == ref_name) {
-      return true;
-    }
-    super = super->klass_part()->super();
-  }
-  return false;
-}
-
-void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
-                                              StackMapFrame* current_frame,
-                                              constantPoolHandle cp,
-                                              TRAPS) {
-  u2 index = bcs->get_index_big();
-  verify_cp_type(index, cp, 1 << JVM_CONSTANT_Fieldref, CHECK_VERIFY(this));
-
-  // Get field name and signature
-  symbolHandle field_name = symbolHandle(THREAD, cp->name_ref_at(index));
-  symbolHandle field_sig = symbolHandle(THREAD, cp->signature_ref_at(index));
-
-  if (!SignatureVerifier::is_valid_type_signature(field_sig)) {
-    class_format_error(
-      "Invalid signature for field in class %s referenced "
-      "from constant pool index %d", _klass->external_name(), index);
-    return;
-  }
-
-  // Get referenced class type
-  VerificationType ref_class_type = cp_ref_index_to_type(
-    index, cp, CHECK_VERIFY(this));
-  if (!ref_class_type.is_object()) {
-    verify_error(
-      "Expecting reference to class in class %s at constant pool index %d",
-      _klass->external_name(), index);
-    return;
-  }
-  VerificationType target_class_type = ref_class_type;
-
-  assert(sizeof(VerificationType) == sizeof(uintptr_t), 
-        "buffer type must match VerificationType size");
-  uintptr_t field_type_buffer[2];
-  VerificationType* field_type = (VerificationType*)field_type_buffer;
-  // If we make a VerificationType[2] array directly, the compiler calls
-  // to the c-runtime library to do the allocation instead of just 
-  // stack allocating it.  Plus it would run constructors.  This shows up
-  // in performance profiles.
-
-  SignatureStream sig_stream(field_sig, false);
-  VerificationType stack_object_type;
-  int n = change_sig_to_verificationType(
-    &sig_stream, field_type, CHECK_VERIFY(this));
-  u2 bci = bcs->bci();
-  bool is_assignable;
-  switch (bcs->code()) {
-    case Bytecodes::_getstatic: {
-      for (int i = 0; i < n; i++) {
-        current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
-      }
-      break;
-    }
-    case Bytecodes::_putstatic: {
-      for (int i = n - 1; i >= 0; i--) {
-        current_frame->pop_stack(field_type[i], CHECK_VERIFY(this));
-      }
-      break;
-    }
-    case Bytecodes::_getfield: {
-      stack_object_type = current_frame->pop_stack(
-        target_class_type, CHECK_VERIFY(this));
-      for (int i = 0; i < n; i++) {
-        current_frame->push_stack(field_type[i], CHECK_VERIFY(this));
-      }
-      goto check_protected;
-    }
-    case Bytecodes::_putfield: {
-      for (int i = n - 1; i >= 0; i--) {
-        current_frame->pop_stack(field_type[i], CHECK_VERIFY(this));
-      }
-      stack_object_type = current_frame->pop_stack(CHECK_VERIFY(this));
-
-      // The JVMS 2nd edition allows field initialization before the superclass
-      // initializer, if the field is defined within the current class.
-      fieldDescriptor fd;
-      if (stack_object_type == VerificationType::uninitialized_this_type() &&
-          target_class_type.equals(current_type()) &&
-          _klass->find_local_field(field_name(), field_sig(), &fd)) {
-        stack_object_type = current_type();
-      }
-      is_assignable = target_class_type.is_assignable_from(
-        stack_object_type, current_class(), CHECK_VERIFY(this));
-      if (!is_assignable) {
-        verify_error(bci, "Bad type on operand stack in putfield");
-        return;
-      }
-    }
-    check_protected: {
-      if (_this_type == stack_object_type)
-        break; // stack_object_type must be assignable to _current_class_type
-      symbolHandle ref_class_name = symbolHandle(THREAD,
-        cp->klass_name_at(cp->klass_ref_index_at(index)));
-      if (!name_in_supers(ref_class_name(), current_class()))
-        // stack_object_type must be assignable to _current_class_type since:
-        // 1. stack_object_type must be assignable to ref_class.
-        // 2. ref_class must be _current_class or a subclass of it. It can't
-        //    be a superclass of it. See revised JVMS 5.4.4.
-        break;
-
-      klassOop ref_class_oop = load_class(ref_class_name, CHECK);
-      if (is_protected_access(current_class(), ref_class_oop, field_name(), 
-                              field_sig(), false)) {
-        // It's protected access, check if stack object is assignable to
-        // current class.
-        is_assignable = current_type().is_assignable_from(
-          stack_object_type, current_class(), CHECK_VERIFY(this));
-        if (!is_assignable) {
-          verify_error(bci, "Bad access to protected data in getfield");
-          return;
-        }
-      }
-      break;
-    }
-    default: ShouldNotReachHere();
-  }
-}
-
-void ClassVerifier::verify_invoke_init(
-    RawBytecodeStream* bcs, VerificationType ref_class_type, 
-    StackMapFrame* current_frame, u4 code_length, bool *this_uninit, 
-    constantPoolHandle cp, TRAPS) {
-  u2 bci = bcs->bci();
-  VerificationType type = current_frame->pop_stack(
-    VerificationType::reference_check(), CHECK_VERIFY(this));
-  if (type == VerificationType::uninitialized_this_type()) {
-    // The method must be an <init> method of either this class, or one of its
-    // superclasses
-    klassOop oop = current_class()();
-    Klass* klass = oop->klass_part();
-    while (klass != NULL && ref_class_type.name() != klass->name()) {
-      klass = klass->super()->klass_part();
-    }
-    if (klass == NULL) {
-      verify_error(bci, "Bad <init> method call");
-      return;
-    }
-    current_frame->initialize_object(type, current_type());
-    *this_uninit = true;
-  } else if (type.is_uninitialized()) {
-    u2 new_offset = type.bci();
-    address new_bcp = bcs->bcp() - bci + new_offset;
-    if (new_offset > (code_length - 3) || (*new_bcp) != Bytecodes::_new) {
-      verify_error(new_offset, "Expecting new instruction");
-      return;
-    }
-    u2 new_class_index = Bytes::get_Java_u2(new_bcp + 1);
-    verify_cp_class_type(new_class_index, cp, CHECK_VERIFY(this));
-
-    // The method must be an <init> method of the indicated class
-    VerificationType new_class_type = cp_index_to_type(
-      new_class_index, cp, CHECK_VERIFY(this));
-    if (!new_class_type.equals(ref_class_type)) {
-      verify_error(bci, "Call to wrong <init> method");
-      return;
-    }
-    // According to the VM spec, if the referent class is a superclass of the
-    // current class, and is in a different runtime package, and the method is
-    // protected, then the objectref must be the current class or a subclass
-    // of the current class.
-    VerificationType objectref_type = new_class_type;
-    if (name_in_supers(ref_class_type.name(), current_class())) { 
-      klassOop ref_klass = load_class(
-        ref_class_type.name(), CHECK_VERIFY(this));
-      methodOop m = instanceKlass::cast(ref_klass)->uncached_lookup_method(
-        vmSymbols::object_initializer_name(), 
-        cp->signature_ref_at(bcs->get_index_big()));
-      instanceKlassHandle mh(THREAD, m->method_holder());
-      if (m->is_protected() && !mh->is_same_class_package(_klass())) {
-        bool assignable = current_type().is_assignable_from(
-          objectref_type, current_class(), CHECK_VERIFY(this));
-        if (!assignable) {
-          verify_error(bci, "Bad access to protected <init> method");
-          return;
-        }
-      }
-    }
-    current_frame->initialize_object(type, new_class_type);
-  } else {
-    verify_error(bci, "Bad operand type when invoking <init>");
-    return;
-  }
-}
-
-void ClassVerifier::verify_invoke_instructions(
-    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame, 
-    bool *this_uninit, VerificationType return_type, 
-    constantPoolHandle cp, TRAPS) {
-  // Make sure the constant pool item is the right type
-  u2 index = bcs->get_index_big();
-  Bytecodes::Code opcode = bcs->code();
-  unsigned int types = (opcode == Bytecodes::_invokeinterface
-                                ? 1 << JVM_CONSTANT_InterfaceMethodref
-                                : 1 << JVM_CONSTANT_Methodref);
-  verify_cp_type(index, cp, types, CHECK_VERIFY(this));
-
-  // Get method name and signature
-  symbolHandle method_name(THREAD, cp->name_ref_at(index));
-  symbolHandle method_sig(THREAD, cp->signature_ref_at(index));
-
-  if (!SignatureVerifier::is_valid_method_signature(method_sig)) {
-    class_format_error(
-      "Invalid method signature in class %s referenced "
-      "from constant pool index %d", _klass->external_name(), index);
-    return;
-  }
-
-  // Get referenced class type
-  VerificationType ref_class_type = cp_ref_index_to_type(
-    index, cp, CHECK_VERIFY(this));
-
-  // For a small signature length, we just allocate 128 bytes instead
-  // of parsing the signature once to find its size.
-  // -3 is for '(', ')' and return descriptor; multiply by 2 is for
-  // longs/doubles to be consertive.
-  assert(sizeof(VerificationType) == sizeof(uintptr_t), 
-        "buffer type must match VerificationType size");
-  uintptr_t on_stack_sig_types_buffer[128];
-  // If we make a VerificationType[128] array directly, the compiler calls
-  // to the c-runtime library to do the allocation instead of just 
-  // stack allocating it.  Plus it would run constructors.  This shows up
-  // in performance profiles.
-
-  VerificationType* sig_types;
-  int size = (method_sig->utf8_length() - 3) * 2;
-  if (size > 128) {
-    // Long and double occupies two slots here.
-    ArgumentSizeComputer size_it(method_sig);
-    size = size_it.size();
-    sig_types = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, VerificationType, size);
-  } else{
-    sig_types = (VerificationType*)on_stack_sig_types_buffer;
-  }
-  SignatureStream sig_stream(method_sig);
-  int sig_i = 0;
-  while (!sig_stream.at_return_type()) {
-    sig_i += change_sig_to_verificationType(
-      &sig_stream, &sig_types[sig_i], CHECK_VERIFY(this));
-    sig_stream.next();
-  }
-  int nargs = sig_i;
-
-#ifdef ASSERT
-  {
-    ArgumentSizeComputer size_it(method_sig);
-    assert(nargs == size_it.size(), "Argument sizes do not match");
-    assert(nargs <= (method_sig->utf8_length() - 3) * 2, "estimate of max size isn't conservative enough");
-  }
-#endif
-
-  // Check instruction operands
-  u2 bci = bcs->bci();
-  if (opcode == Bytecodes::_invokeinterface) {
-    address bcp = bcs->bcp();
-    // 4905268: count operand in invokeinterface should be nargs+1, not nargs.
-    // JSR202 spec: The count operand of an invokeinterface instruction is valid if it is
-    // the difference between the size of the operand stack before and after the instruction
-    // executes.
-    if (*(bcp+3) != (nargs+1)) {
-      verify_error(bci, "Inconsistent args count operand in invokeinterface");
-      return;
-    }
-    if (*(bcp+4) != 0) {
-      verify_error(bci, "Fourth operand byte of invokeinterface must be zero");
-      return;
-    }
-  }
-
-  if (method_name->byte_at(0) == '<') {
-    // Make sure <init> can only be invoked by invokespecial
-    if (opcode != Bytecodes::_invokespecial || 
-        method_name() != vmSymbols::object_initializer_name()) {
-      verify_error(bci, "Illegal call to internal method");
-      return;
-    }
-  } else if (opcode == Bytecodes::_invokespecial
-             && !ref_class_type.equals(current_type())
-             && !ref_class_type.equals(VerificationType::reference_type(
-                  current_class()->super()->klass_part()->name()))) {
-    bool subtype = ref_class_type.is_assignable_from(
-      current_type(), current_class(), CHECK_VERIFY(this));
-    if (!subtype) {
-      verify_error(bci, "Bad invokespecial instruction: "
-          "current class isn't assignable to reference class.");
-       return;
-    }
-  }
-  // Match method descriptor with operand stack
-  for (int i = nargs - 1; i >= 0; i--) {  // Run backwards
-    current_frame->pop_stack(sig_types[i], CHECK_VERIFY(this));
-  }
-  // Check objectref on operand stack
-  if (opcode != Bytecodes::_invokestatic) {
-    if (method_name() == vmSymbols::object_initializer_name()) {  // <init> method
-      verify_invoke_init(bcs, ref_class_type, current_frame, 
-        code_length, this_uninit, cp, CHECK_VERIFY(this));
-    } else {   // other methods
-      // Ensures that target class is assignable to method class.
-      if (opcode == Bytecodes::_invokespecial) {
-        current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
-      } else if (opcode == Bytecodes::_invokevirtual) {
-        VerificationType stack_object_type =
-          current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
-        if (current_type() != stack_object_type) {
-          assert(cp->cache() == NULL, "not rewritten yet");
-          symbolHandle ref_class_name = symbolHandle(THREAD,
-            cp->klass_name_at(cp->klass_ref_index_at(index)));
-          // See the comments in verify_field_instructions() for
-          // the rationale behind this.
-          if (name_in_supers(ref_class_name(), current_class())) {
-            klassOop ref_class = load_class(ref_class_name, CHECK);
-            if (is_protected_access(
-                  _klass, ref_class, method_name(), method_sig(), true)) {
-              // It's protected access, check if stack object is
-              // assignable to current class.
-              bool is_assignable = current_type().is_assignable_from(
-                stack_object_type, current_class(), CHECK_VERIFY(this));
-              if (!is_assignable) {
-                if (ref_class_type.name() == vmSymbols::java_lang_Object()
-                    && stack_object_type.is_array()
-                    && method_name() == vmSymbols::clone_name()) {
-                  // Special case: arrays pretend to implement public Object
-                  // clone().
-                } else {
-                  verify_error(bci,
-                    "Bad access to protected data in invokevirtual");
-                  return;
-                }
-              }
-            }
-          }
-        }
-      } else {
-        assert(opcode == Bytecodes::_invokeinterface, "Unexpected opcode encountered");
-        current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
-      }
-    }
-  }
-  // Push the result type.
-  if (sig_stream.type() != T_VOID) {
-    if (method_name() == vmSymbols::object_initializer_name()) {
-      // <init> method must have a void return type
-      verify_error(bci, "Return type must be void in <init> method");
-      return;
-    }
-    VerificationType return_type[2];
-    int n = change_sig_to_verificationType(
-      &sig_stream, return_type, CHECK_VERIFY(this));
-    for (int i = 0; i < n; i++) {
-      current_frame->push_stack(return_type[i], CHECK_VERIFY(this)); // push types backwards
-    }
-  }
-}
-
-VerificationType ClassVerifier::get_newarray_type(
-    u2 index, u2 bci, TRAPS) {
-  const char* from_bt[] = {
-    NULL, NULL, NULL, NULL, "[Z", "[C", "[F", "[D", "[B", "[S", "[I", "[J", 
-  };
-  if (index < T_BOOLEAN || index > T_LONG) {
-    verify_error(bci, "Illegal newarray instruction");
-    return VerificationType::bogus_type();
-  }
-
-  // from_bt[index] contains the array signature which has a length of 2
-  symbolHandle sig = oopFactory::new_symbol_handle(
-    from_bt[index], 2, CHECK_(VerificationType::bogus_type()));
-  return VerificationType::reference_type(sig);
-}
-
-void ClassVerifier::verify_anewarray(
-    u2 index, constantPoolHandle cp, StackMapFrame* current_frame, TRAPS) {
-  verify_cp_class_type(index, cp, CHECK_VERIFY(this));
-  current_frame->pop_stack(
-    VerificationType::integer_type(), CHECK_VERIFY(this));
-
-  VerificationType component_type = 
-    cp_index_to_type(index, cp, CHECK_VERIFY(this));
-  ResourceMark rm(THREAD);
-  int length;
-  char* arr_sig_str;
-  if (component_type.is_array()) {     // it's an array
-    const char* component_name = component_type.name()->as_utf8();
-    // add one dimension to component
-    length = (int)strlen(component_name) + 1;
-    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length);
-    arr_sig_str[0] = '[';
-    strncpy(&arr_sig_str[1], component_name, length - 1);
-  } else {         // it's an object or interface
-    const char* component_name = component_type.name()->as_utf8();
-    // add one dimension to component with 'L' prepended and ';' postpended.
-    length = (int)strlen(component_name) + 3;
-    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length);
-    arr_sig_str[0] = '[';
-    arr_sig_str[1] = 'L';
-    strncpy(&arr_sig_str[2], component_name, length - 2);
-    arr_sig_str[length - 1] = ';';
-  }
-  symbolHandle arr_sig = oopFactory::new_symbol_handle(
-    arr_sig_str, length, CHECK_VERIFY(this));
-  VerificationType new_array_type = VerificationType::reference_type(arr_sig);
-  current_frame->push_stack(new_array_type, CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_iload(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->get_local(
-    index, VerificationType::integer_type(), CHECK_VERIFY(this));
-  current_frame->push_stack(
-    VerificationType::integer_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_lload(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->get_local_2(
-    index, VerificationType::long_type(), 
-    VerificationType::long2_type(), CHECK_VERIFY(this));
-  current_frame->push_stack_2(
-    VerificationType::long_type(), 
-    VerificationType::long2_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_fload(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->get_local(
-    index, VerificationType::float_type(), CHECK_VERIFY(this));
-  current_frame->push_stack(
-    VerificationType::float_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_dload(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->get_local_2(
-    index, VerificationType::double_type(), 
-    VerificationType::double2_type(), CHECK_VERIFY(this));
-  current_frame->push_stack_2(
-    VerificationType::double_type(), 
-    VerificationType::double2_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_aload(u2 index, StackMapFrame* current_frame, TRAPS) {
-  VerificationType type = current_frame->get_local(
-    index, VerificationType::reference_check(), CHECK_VERIFY(this));
-  current_frame->push_stack(type, CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_istore(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->pop_stack(
-    VerificationType::integer_type(), CHECK_VERIFY(this));
-  current_frame->set_local(
-    index, VerificationType::integer_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_lstore(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->pop_stack_2(
-    VerificationType::long2_type(), 
-    VerificationType::long_type(), CHECK_VERIFY(this));
-  current_frame->set_local_2(
-    index, VerificationType::long_type(), 
-    VerificationType::long2_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_fstore(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->pop_stack(VerificationType::float_type(), CHECK_VERIFY(this));
-  current_frame->set_local(
-    index, VerificationType::float_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_dstore(u2 index, StackMapFrame* current_frame, TRAPS) {
-  current_frame->pop_stack_2(
-    VerificationType::double2_type(), 
-    VerificationType::double_type(), CHECK_VERIFY(this));
-  current_frame->set_local_2(
-    index, VerificationType::double_type(), 
-    VerificationType::double2_type(), CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_astore(u2 index, StackMapFrame* current_frame, TRAPS) {
-  VerificationType type = current_frame->pop_stack(
-    VerificationType::reference_check(), CHECK_VERIFY(this));
-  current_frame->set_local(index, type, CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_iinc(u2 index, StackMapFrame* current_frame, TRAPS) {
-  VerificationType type = current_frame->get_local(
-    index, VerificationType::integer_type(), CHECK_VERIFY(this));
-  current_frame->set_local(index, type, CHECK_VERIFY(this));
-}
-
-void ClassVerifier::verify_return_value(
-    VerificationType return_type, VerificationType type, u2 bci, TRAPS) {
-  if (return_type == VerificationType::bogus_type()) {
-    verify_error(bci, "Method expects a return value");
-    return;
-  }
-  bool match = return_type.is_assignable_from(type, _klass, CHECK_VERIFY(this));
-  if (!match) {
-    verify_error(bci, "Bad return type");
-    return;
-  }
-}
--- a/hotspot/src/share/vm/runtime/verifier.hpp	Tue May 08 19:38:19 2007 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,242 +0,0 @@
-#ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)verifier.hpp	1.41 07/05/05 17:07:02 JVM"
-#endif
-/*
- * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *  
- */
-
-// The verifier class
-class Verifier : AllStatic {
- public:
-  enum { STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50 };
-  typedef enum { ThrowException, NoException } Mode;
-
-  /**
-   * Verify the bytecodes for a class.  If 'throw_exception' is true
-   * then the appropriate VerifyError or ClassFormatError will be thrown.
-   * Otherwise, no exception is thrown and the return indicates the 
-   * error.
-   */
-  static bool verify(instanceKlassHandle klass, Mode mode, TRAPS);
-
-  // Return false if the class is loaded by the bootstrap loader.
-  static bool should_verify_for(oop class_loader);
-
-  // Relax certain verifier checks to enable some broken 1.1 apps to run on 1.2.
-  static bool relax_verify_for(oop class_loader);
-
- private:
-  static bool is_eligible_for_verification(instanceKlassHandle klass);
-  static symbolHandle inference_verify(
-    instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS);
-};
-
-class RawBytecodeStream;
-class StackMapFrame;
-class StackMapTable;
-
-// Summary of verifier's memory usage:
-// StackMapTable is stack allocated.
-// StackMapFrame are resource allocated. There is one ResourceMark
-// for each method.
-// There is one mutable StackMapFrame (current_frame) which is updated
-// by abstract bytecode interpretation. frame_in_exception_handler() returns
-// a frame that has a mutable one-item stack (ready for pushing the
-// catch type exception object). All the other StackMapFrame's
-// are immutable (including their locals and stack arrays) after
-// their constructions.
-// locals/stack arrays in StackMapFrame are resource allocated.
-// locals/stack arrays can be shared between StackMapFrame's, except
-// the mutable StackMapFrame (current_frame).
-// Care needs to be taken to make sure resource objects don't outlive
-// the lifetime of their ResourceMark.
-
-// These macros are used similarly to CHECK macros but also check 
-// the status of the verifier and return if that has an error.
-#define CHECK_VERIFY(verifier) \
-  CHECK); if ((verifier)->has_error()) return; (0
-#define CHECK_VERIFY_(verifier, result) \
-  CHECK_(result)); if ((verifier)->has_error()) return (result); (0
-
-// A new instance of this class is created for each class being verified
-class ClassVerifier : public StackObj {
- private:
-  Thread* _thread;
-  symbolHandle _exception_type;
-  char* _message;
-  size_t _message_buffer_len;
-
-  void verify_method(methodHandle method, TRAPS);
-  char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
-  void verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS);
-  void verify_local_variable_table(u4 code_length, char* code_data, TRAPS);
-
-  VerificationType cp_ref_index_to_type(
-      int index, constantPoolHandle cp, TRAPS) {
-    return cp_index_to_type(cp->klass_ref_index_at(index), cp, THREAD);
-  }
-
-  bool is_protected_access(
-    instanceKlassHandle this_class, klassOop target_class,
-    symbolOop field_name, symbolOop field_sig, bool is_method);
-
-  void verify_cp_index(constantPoolHandle cp, int index, TRAPS);
-  void verify_cp_type(
-    int index, constantPoolHandle cp, unsigned int types, TRAPS);
-  void verify_cp_class_type(int index, constantPoolHandle cp, TRAPS);
-
-  u2 verify_stackmap_table(
-    u2 stackmap_index, u2 bci, StackMapFrame* current_frame, 
-    StackMapTable* stackmap_table, bool no_control_flow, TRAPS);
-
-  void verify_exception_handler_targets(
-    u2 bci, bool this_uninit, StackMapFrame* current_frame, 
-    StackMapTable* stackmap_table, TRAPS);
-
-  void verify_ldc(
-    int opcode, u2 index, StackMapFrame *current_frame, 
-    constantPoolHandle cp, u2 bci, TRAPS);
-
-  void verify_switch(
-    RawBytecodeStream* bcs, u4 code_length, char* code_data, 
-    StackMapFrame* current_frame, StackMapTable* stackmap_table, TRAPS);
-
-  void verify_field_instructions(
-    RawBytecodeStream* bcs, StackMapFrame* current_frame, 
-    constantPoolHandle cp, TRAPS);
-
-  void verify_invoke_init(
-    RawBytecodeStream* bcs, VerificationType ref_class_type, 
-    StackMapFrame* current_frame, u4 code_length, bool* this_uninit, 
-    constantPoolHandle cp, TRAPS);
-
-  void verify_invoke_instructions(
-    RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame, 
-    bool* this_uninit, VerificationType return_type, 
-    constantPoolHandle cp, TRAPS);
-
-  VerificationType get_newarray_type(u2 index, u2 bci, TRAPS);
-  void verify_anewarray(
-    u2 index, constantPoolHandle cp, StackMapFrame* current_frame, TRAPS);
-  void verify_return_value(
-    VerificationType return_type, VerificationType type, u2 offset, TRAPS);
-
-  void verify_iload (u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_lload (u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_fload (u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_dload (u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_aload (u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_istore(u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_lstore(u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_fstore(u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_dstore(u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_astore(u2 index, StackMapFrame* current_frame, TRAPS);
-  void verify_iinc  (u2 index, StackMapFrame* current_frame, TRAPS);
-
-  bool name_in_supers(symbolOop ref_name, instanceKlassHandle current);
-
-  instanceKlassHandle _klass;  // the class being verified
-  methodHandle        _method; // current method being verified
-  VerificationType    _this_type; // the verification type of the current class
-
- public:
-  enum {
-    BYTECODE_OFFSET = 1,
-    NEW_OFFSET = 2
-  };
-
-  // constructor
-  ClassVerifier(instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS);
-
-  // destructor
-  ~ClassVerifier();
-
-  Thread* thread()             { return _thread; }
-  methodHandle method()        { return _method; }
-  instanceKlassHandle current_class() const { return _klass; }
-  VerificationType current_type() const { return _this_type; }
-
-  // Verifies the class.  If a verify or class file format error occurs, 
-  // the '_exception_name' symbols will set to the exception name and 
-  // the message_buffer will be filled in with the exception message.
-  void verify_class(TRAPS);
-
-  // Return status modes
-  symbolHandle result() const { return _exception_type; }
-  bool has_error() const { return !(result().is_null()); }
-
-  // Called when verify or class format errors are encountered.  
-  // May throw an exception based upon the mode.
-  void verify_error(u2 offset, const char* fmt, ...);
-  void verify_error(const char* fmt, ...);
-  void class_format_error(const char* fmt, ...);
-  void format_error_message(const char* fmt, int offset, va_list args);
-
-  klassOop load_class(symbolHandle name, TRAPS);
-
-  int change_sig_to_verificationType(
-    SignatureStream* sig_type, VerificationType* inference_type, TRAPS);
-
-  VerificationType cp_index_to_type(int index, constantPoolHandle cp, TRAPS) {
-    return VerificationType::reference_type(
-      symbolHandle(THREAD, cp->klass_name_at(index)));
-  }
-
-  static bool _verify_verbose;  // for debugging
-};
-
-inline int ClassVerifier::change_sig_to_verificationType(
-    SignatureStream* sig_type, VerificationType* inference_type, TRAPS) {
-  BasicType bt = sig_type->type();
-  switch (bt) {
-    case T_OBJECT:
-    case T_ARRAY:
-      { 
-        symbolOop name = sig_type->as_symbol(CHECK_0); 
-        *inference_type = 
-          VerificationType::reference_type(symbolHandle(THREAD, name));
-        return 1;
-      }
-    case T_LONG: 
-      *inference_type = VerificationType::long_type();
-      *++inference_type = VerificationType::long2_type();
-      return 2;
-    case T_DOUBLE:
-      *inference_type = VerificationType::double_type();
-      *++inference_type = VerificationType::double2_type();
-      return 2;
-    case T_INT:
-    case T_BOOLEAN:
-    case T_BYTE:
-    case T_CHAR:
-    case T_SHORT:
-      *inference_type = VerificationType::integer_type();
-      return 1;
-    case T_FLOAT:
-      *inference_type = VerificationType::float_type();
-      return 1;
-    default:
-      ShouldNotReachHere();
-      return 1;
-  }
-}
--- a/hotspot/src/share/vm/runtime/vframe.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/vframe.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)vframe.cpp	1.161 07/05/05 17:07:00 JVM"
+#pragma ident "@(#)vframe.cpp	1.162 07/05/17 16:07:02 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -93,52 +93,6 @@
 
 // ------------- javaVFrame --------------
 
-#ifdef JVMPI_SUPPORT
-//
-// Fabricate heavyweight monitor information for each lightweight monitor
-// found in the Java VFrame.
-//
-void javaVFrame::jvmpi_fab_heavy_monitors(bool query, int* fab_index, int frame_count, GrowableArray<ObjectMonitor*>* fab_list) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  ResourceMark rm;
-
-  GrowableArray<MonitorInfo*>* mons = monitors();
-  if (mons->is_empty()) return;
-
-  bool found_first_monitor = false;
-  for (int index = (mons->length()-1); index >= 0; index--) {
-    MonitorInfo* monitor = mons->at(index);
-    if (monitor->owner() == NULL) continue; // skip unowned monitor
-    //
-    // If we haven't found a monitor before, this is the first frame, and
-    // the thread is blocked, then we are trying to enter this monitor.
-    // We skip it because we have already seen it before from the monitor 
-    // cache walk.
-    //
-    if (!found_first_monitor && frame_count == 0) {
-      switch (thread()->thread_state()) {
-      case _thread_blocked:
-      case _thread_blocked_trans:
-        continue;
-      }
-    }
-    found_first_monitor = true;
-
-    markOop mark = monitor->owner()->mark();
-    if (mark->has_locker()) {
-      if (!query) {   // not just counting so create and store at the current element
-        // fabricate the heavyweight monitor from lightweight info
-        ObjectMonitor *heavy = new ObjectMonitor();
-        heavy->set_object(monitor->owner());  // use the owning object
-        heavy->set_owner(thread());           // use thread instead of stack address for speed
-        fab_list->at_put(*fab_index, heavy);
-      }
-      (*fab_index)++;
-    }
-  }
-}
-#endif // JVMPI_SUPPORT
-
 GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() {
   assert(SafepointSynchronize::is_at_safepoint() || JavaThread::current() == thread(),
          "must be at safepoint or it's a java frame of the current thread");
--- a/hotspot/src/share/vm/runtime/vframe.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/vframe.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)vframe.hpp	1.88 07/05/05 17:06:52 JVM"
+#pragma ident "@(#)vframe.hpp	1.89 07/05/17 16:07:04 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -122,11 +122,6 @@
     return (javaVFrame*) vf;
   }
 
-#ifdef JVMPI_SUPPORT
-  // fabricate heavyweight monitors for lightweight monitors
-  void jvmpi_fab_heavy_monitors(bool query, int* index, int frame_count, GrowableArray<ObjectMonitor*>* fab_list);
-#endif // JVMPI_SUPPORT
-
   // Return an array of monitors locked by this frame in the youngest to oldest order
   GrowableArray<MonitorInfo*>* locked_monitors();
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)vmStructs.cpp	1.183 07/05/05 17:07:03 JVM"
+#pragma ident "@(#)vmStructs.cpp	1.184 07/05/17 16:07:08 JVM"
 #endif
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -854,16 +854,6 @@
   /* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must  */
   /* be present there)                                                */
 
-#ifdef JVMPI_SUPPORT
-#define JVMPI_VM_STRUCTS(static_field) \
-  static_field(Universe, _jvmpi_alloc_event_enabled,                 Universe::JVMPIState) \
-  static_field(Universe, _jvmpi_move_event_enabled,                  bool) \
-  static_field(Universe, _jvmpi_jni_global_alloc_event_enabled,      bool) \
-  static_field(Universe, _jvmpi_jni_global_free_event_enabled,       bool) \
-  static_field(Universe, _jvmpi_jni_weak_global_alloc_event_enabled, bool) \
-  static_field(Universe, _jvmpi_jni_weak_global_free_event_enabled,  bool)
-#endif // JVMPI_SUPPORT
-
 //--------------------------------------------------------------------------------
 // VM_TYPES
 //
@@ -1394,12 +1384,6 @@
   /* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must be */
   /* present there)                                                   */
 
-#ifdef JVMPI_SUPPORT
-#define JVMPI_VM_TYPES(declare_type, declare_integer_type) \
-   declare_type(JVMPIDaemonThread, JavaThread) \
-   declare_integer_type(Universe::JVMPIState)
-#endif // JVMPI_SUPPORT
-
 //--------------------------------------------------------------------------------
 // VM_INT_CONSTANTS
 //
@@ -2075,10 +2059,6 @@
              GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY, \
              GENERATE_VM_STRUCT_LAST_ENTRY)
 
-#ifdef JVMPI_SUPPORT
-  JVMPI_VM_STRUCTS(GENERATE_STATIC_VM_STRUCT_ENTRY)
-#endif // JVMPI_SUPPORT
-
   VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
                  GENERATE_STATIC_VM_STRUCT_ENTRY, \
                  GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY, \
@@ -2112,9 +2092,6 @@
            GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY,
            GENERATE_VM_TYPE_LAST_ENTRY)
 
-#ifdef JVMPI_SUPPORT
-  JVMPI_VM_TYPES(GENERATE_VM_TYPE_ENTRY, GENERATE_INTEGER_VM_TYPE_ENTRY)
-#endif // JVMPI_SUPPORT
 
   VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
                GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
@@ -2200,9 +2177,6 @@
              CHECK_NO_OP,
              CHECK_SENTINEL);
 
-#ifdef JVMPI_SUPPORT
-  JVMPI_VM_STRUCTS(CHECK_STATIC_VM_STRUCT_ENTRY);
-#endif // JVMPI_SUPPORT
 
   VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
                  CHECK_STATIC_VM_STRUCT_ENTRY,
@@ -2234,9 +2208,6 @@
            CHECK_C2_TOPLEVEL_VM_TYPE_ENTRY,
            CHECK_SENTINEL);
 
-#ifdef JVMPI_SUPPORT
-  JVMPI_VM_TYPES(CHECK_VM_TYPE_ENTRY, CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
-#endif // JVMPI_SUPPORT
 
   VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
                CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
@@ -2295,9 +2266,6 @@
                         CHECK_NO_OP, \
                         CHECK_NO_OP, \
                         CHECK_SENTINEL));
-#ifdef JVMPI_SUPPORT
-  debug_only(JVMPI_VM_STRUCTS(ENSURE_FIELD_TYPE_PRESENT));
-#endif // JVMPI_SUPPORT
   debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
                             ENSURE_FIELD_TYPE_PRESENT, \
                             CHECK_NO_OP, \
--- a/hotspot/src/share/vm/services/threadService.cpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/services/threadService.cpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_SRC
-#pragma ident "@(#)threadService.cpp	1.53 07/05/05 17:07:06 JVM"
+#pragma ident "@(#)threadService.cpp	1.54 07/05/17 16:07:12 JVM"
 #endif
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -817,13 +817,8 @@
           owner_desc = "\n  in JNI, which is held by";
         }
       } else {
-#ifdef JVMPI_SUPPORT
-        // No Java object associated - a JVMTI/JVMPI raw monitor
-        owner_desc = " (JVMTI/JVMPI raw monitor),\n  which is held by";
-#else // !JVMPI_SUPPORT
         // No Java object associated - a JVMTI raw monitor
         owner_desc = " (JVMTI raw monitor),\n  which is held by";
-#endif // JVMPI_SUPPORT
       }
       currentThread = Threads::owning_thread_from_monitor_owner(
         (address)waitingToLockMonitor->owner(), false /* no locking needed */);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/copy.cpp	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,76 @@
+#ifdef USE_PRAGMA_IDENT_SRC
+#pragma ident "@(#)copy.cpp	1.2 07/04/13 10:35:00 JVM"
+#endif
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * SUN PROPRIETARY/CONFIDENTIAL.  Use is subject to license terms.
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_copy.cpp.incl"
+
+
+// Copy bytes; larger units are filled atomically if everything is aligned.
+void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) {
+  address src = (address) from;
+  address dst = (address) to;
+  uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size;
+
+  // (Note:  We could improve performance by ignoring the low bits of size,
+  // and putting a short cleanup loop after each bulk copy loop.
+  // There are plenty of other ways to make this faster also,
+  // and it's a slippery slope.  For now, let's keep this code simple
+  // since the simplicity helps clarify the atomicity semantics of
+  // this operation.  There are also CPU-specific assembly versions
+  // which may or may not want to include such optimizations.)
+
+  if (bits % sizeof(jlong) == 0) {
+    Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong));
+  } else if (bits % sizeof(jint) == 0) {
+    Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint));
+  } else if (bits % sizeof(jshort) == 0) {
+    Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
+  } else {
+    // Not aligned, so no need to be atomic.
+    Copy::conjoint_bytes((void*) src, (void*) dst, size);
+  }
+}
+
+
+// Fill bytes; larger units are filled atomically if everything is aligned.
+void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
+  address dst = (address) to;
+  uintptr_t bits = (uintptr_t) to | (uintptr_t) size;
+  if (bits % sizeof(jlong) == 0) {
+    jlong fill = (julong)( (jubyte)value ); // zero-extend
+    if (fill != 0) {
+      fill += fill << 8;
+      fill += fill << 16;
+      fill += fill << 32;
+    }
+    //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
+    for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
+      *(jlong*)(dst + off) = fill;
+    }
+  } else if (bits % sizeof(jint) == 0) {
+    jint fill = (juint)( (jubyte)value ); // zero-extend
+    if (fill != 0) {
+      fill += fill << 8;
+      fill += fill << 16;
+    }
+    //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
+    for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
+      *(jint*)(dst + off) = fill;
+    }
+  } else if (bits % sizeof(jshort) == 0) {
+    jshort fill = (jushort)( (jubyte)value ); // zero-extend
+    fill += fill << 8;
+    //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
+    for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
+      *(jshort*)(dst + off) = fill;
+    }
+  } else {
+    // Not aligned, so no need to be atomic.
+    Copy::fill_to_bytes(dst, size, value);
+  }
+}
--- a/hotspot/src/share/vm/utilities/copy.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/utilities/copy.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)copy.hpp	1.14 07/05/05 17:07:08 JVM"
+#pragma ident "@(#)copy.hpp	1.15 07/05/17 16:07:14 JVM"
 #endif
 /*
  * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
@@ -156,6 +156,12 @@
     pd_conjoint_oops_atomic(from, to, count);
   }
 
+  // Copy a span of memory.  If the span is an integral number of aligned
+  // longs, words, or ints, copy those units atomically.
+  // The largest atomic transfer unit is 8 bytes, or the largest power
+  // of two which divides all of from, to, and size, whichever is smaller.
+  static void conjoint_memory_atomic(void* from, void* to, size_t size);
+
   // bytes,                 conjoint array, atomic on each byte (not that it matters)
   static void arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
     assert_non_zero(count);
@@ -241,6 +247,12 @@
     pd_fill_to_bytes(to, count, value);
   }
 
+  // Fill a span of memory.  If the span is an integral number of aligned
+  // longs, words, or ints, store to those units atomically.
+  // The largest atomic transfer unit is 8 bytes, or the largest power
+  // of two which divides both to and size, whichever is smaller.
+  static void fill_to_memory_atomic(void* to, size_t size, jubyte value = 0);
+
   // Zero-fill methods
 
   // Zero word-aligned words, not atomic on each word
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Tue May 08 19:38:19 2007 +0000
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 #ifdef USE_PRAGMA_IDENT_HDR
-#pragma ident "@(#)globalDefinitions.hpp	1.215 07/05/05 17:07:09 JVM"
+#pragma ident "@(#)globalDefinitions.hpp	1.216 07/05/17 16:07:16 JVM"
 #endif
 /*
  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
@@ -1086,10 +1086,3 @@
 # endif /* ASSERT */
 
 #define ARRAY_SIZE(array) (sizeof(array)/sizeof((array)[0]))
-
-
-//---------------------------------------------------------------------------
-//
-// Identifies code that supports JVM/PI in preparation for removal (4914266).
-// Phase 2 of 4914266 disables inclusion of JVM/PI code by default.
-// #define JVMPI_SUPPORT
--- a/j2se/make/common/Defs-linux.gmk	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/common/Defs-linux.gmk	Fri May 25 00:49:14 2007 +0000
@@ -24,7 +24,7 @@
 #
 
 #
-# @(#)Defs-linux.gmk	1.163 07/05/05
+# @(#)Defs-linux.gmk	1.164 07/05/08
 #
 # Makefile to specify compiler flags for programs and libraries
 # targeted to Linux.  Should not contain any rules.
@@ -108,17 +108,21 @@
 #    We need this frame pointer to make it easy to walk the stacks.
 #    This should be the default on X86, but ia64 and amd64 may not have this
 #    as the default.
-CFLAGS_REQUIRED_amd64 += -fno-omit-frame-pointer
-CFLAGS_REQUIRED_i586  += -fno-omit-frame-pointer
-CFLAGS_REQUIRED_ia64  += -fno-omit-frame-pointer
-CFLAGS_REQUIRED	       = $(CFLAGS_REQUIRED_$((ARCH))
+CFLAGS_REQUIRED_amd64   += -fno-omit-frame-pointer -D_LITTLE_ENDIAN
+CFLAGS_REQUIRED_i586    += -fno-omit-frame-pointer -D_LITTLE_ENDIAN
+CFLAGS_REQUIRED_ia64    += -fno-omit-frame-pointer -D_LITTLE_ENDIAN
+CFLAGS_REQUIRED_sparcv9 += -m64 -mcpu=v9
+LDFLAGS_COMMON_sparcv9  += -m64 -mcpu=v9
+CFLAGS_REQUIRED_sparc   += -m32 -mcpu=v9
+LDFLAGS_COMMON_sparc    += -m32 -mcpu=v9
+CFLAGS_REQUIRED         =  $(CFLAGS_REQUIRED_$(ARCH))
+LDFLAGS_COMMON          += $(LDFLAGS_COMMON_$(ARCH))
 
 # Add in platform specific optimizations for all opt levels
 CC_HIGHEST_OPT += $(_OPT_$(ARCH))
 CC_HIGHER_OPT  += $(_OPT_$(ARCH))
 CC_LOWER_OPT   += $(_OPT_$(ARCH))
 
-
 # If NO_OPTIMIZATIONS is defined in the environment, turn all optimzations off
 ifdef NO_OPTIMIZATIONS
   CC_HIGHEST_OPT = $(CC_NO_OPT)
@@ -181,7 +185,7 @@
 endif
 
 CPPFLAGS_COMMON = -D$(ARCH) -DARCH='"$(ARCH)"' -DLINUX $(VERSION_DEFINES) \
-		  -D_LARGEFILE64_SOURCE -D_GNU_SOURCE -D_REENTRANT -D_LITTLE_ENDIAN 
+		  -D_LARGEFILE64_SOURCE -D_GNU_SOURCE -D_REENTRANT
 
 ifeq ($(ARCH_DATA_MODEL), 64)
 CPPFLAGS_COMMON += -D_LP64=1
--- a/j2se/make/common/shared/Compiler-gcc.gmk	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/common/shared/Compiler-gcc.gmk	Fri May 25 00:49:14 2007 +0000
@@ -70,19 +70,28 @@
   else
     CXX            = $(COMPILER_PATH)g++
   endif
+  ifneq ("$(findstring sparc,$(ARCH))", "")
+    # sparc or sparcv9
+    REQUIRED_CC_VER = 4.0
+    REQUIRED_GCC_VER = 4.0.*
+  else
   ifeq ($(ARCH_DATA_MODEL), 32)
+    # i586
     REQUIRED_CC_VER = 3.2
     REQUIRED_GCC_VER = 3.2.1*
     REQUIRED_GCC_VER_INT = 3.2.1-7a
   else
-    ifeq ($(ARCH), amd64)
-      REQUIRED_CC_VER = 3.2
-      REQUIRED_GCC_VER = 3.2.*
-    endif
-    ifeq ($(ARCH), ia64)
-      REQUIRED_CC_VER = 3.2
-      REQUIRED_GCC_VER = 2.9[56789].*
-    endif
+  ifeq ($(ARCH), amd64)
+    # amd64
+    REQUIRED_CC_VER = 3.2
+    REQUIRED_GCC_VER = 3.2.*
+  endif
+  ifeq ($(ARCH), ia64)
+    # ia64
+    REQUIRED_CC_VER = 3.2
+    REQUIRED_GCC_VER = 2.9[56789].*
+  endif
+  endif
   endif
   # Option used to create a shared library
   SHARED_LIBRARY_FLAG = -shared -mimpure-text
--- a/j2se/make/common/shared/Platform.gmk	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/common/shared/Platform.gmk	Fri May 25 00:49:14 2007 +0000
@@ -40,7 +40,7 @@
 PLATFORM_SHARED=done
 
 # Possible Input variables:
-#     ARCH_DATA_MODEL             32 or 64
+#     ARCH_DATA_MODEL             32 or 64, default to 32
 #     CROSS_COMPILE_ARCH          windows only: cross compile to ARCH needed
 #                                 (must set ARCH_DATA_MODEL=64 too)
 #     USER, LOGNAME               user name (runs logname, or id if not set)
@@ -58,11 +58,11 @@
 #     OS_VENDOR                   company name
 #     TEMP_DISK                   /tmp or C:/temp
 #     ARCH_DATA_MODEL             32 or 64
-#     ARCH                        sparc, i386, sparcv9, amd64, or ia64
-#     ARCH_FAMILY                 sparc or i386
-#     ARCHPROP                    sparc, i386, or x86
+#     ARCH                        sparc, sparcv9, i586, amd64, or ia64
+#     ARCH_FAMILY                 sparc or i586
+#     ARCHPROP                    sparc or x86
 #     ARCH_VM_SUBDIR              jre/bin, jre/lib/sparc, etc.
-#     LIBARCH                     sparc, i386, sparcv9, or amd64
+#     LIBARCH                     sparc, sparcv9, i386, amd64, or ia64
 #     DEV_NULL                    destination of /dev/null, NUL or /dev/NULL
 #     CLASSPATH_SEPARATOR         separator in classpath, ; or :
 #     LIB_PREFIX                  dynamic or static library prefix, lib or empty
@@ -190,7 +190,7 @@
   OS_VERSION := $(shell uname -r)
   # Arch and OS name/version
   mach := $(shell uname -m)
-  archExpr = case "$(mach)" in  \
+  archExpr = case "$(mach)" in \
                 i[3-9]86) \
                     echo i586 \
                     ;; \
@@ -200,31 +200,49 @@
                 x86_64) \
                     echo amd64 \
                     ;; \
-                sparc*)  \
+                sparc*) \
                     echo sparc \
                     ;; \
                 *) \
-                    echo $(mach)  \
+                    echo $(mach) \
                     ;; \
-        esac
+      esac
   ARCH        := $(shell $(archExpr) )
-  ARCH_FAMILY = $(ARCH)
+  ARCH_FAMILY := $(ARCH)
+
   # Linux builds may be 32-bit or 64-bit data model.
-  ifndef ARCH_DATA_MODEL
-    ifeq ($(ARCH), i586)
+  ifeq ($(ARCH), sparc)
+    # Linux sparc build can be either 32-bit or 64-bit.
+    #   Default to 32, but allow explicit setting to 32 or 64.
+    ifndef ARCH_DATA_MODEL
       ARCH_DATA_MODEL=32
+    endif
+    ifeq ($(ARCH_DATA_MODEL), 32)
+      ARCH=sparc
     else
-      ARCH_DATA_MODEL=64
+      ARCH=sparcv9
+    endif
+  else
+    # i586 is 32-bit, amd64 is 64-bit
+    ifndef ARCH_DATA_MODEL
+      ifeq ($(ARCH), i586)
+        ARCH_DATA_MODEL=32
+      else
+        ARCH_DATA_MODEL=64
+      endif
     endif
   endif
+
   # Need to maintain the jre/lib/i386 location for 32-bit Intel
   ifeq ($(ARCH), i586)
     LIBARCH = i386
   else
     LIBARCH = $(ARCH)
   endif
+
   # Value of Java os.arch property
-  ARCHPROP   = $(LIBARCH)
+  ARCHPROP  = $(LIBARCH)
+
   # Suffix for file bundles used in previous release
   BUNDLE_FILE_SUFFIX=.tar.gz
   # Minimum disk space needed as determined by running 'du -sk' on 
--- a/j2se/make/sun/awt/Makefile	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/sun/awt/Makefile	Fri May 25 00:49:14 2007 +0000
@@ -135,7 +135,7 @@
 
 # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv SOLARIS-SPARC
 # solaris-sparc and solaris-sparcv9 both build 'vis'
-ifeq ($(ARCH_FAMILY), sparc)
+ifeq ("$(PLATFORM)-$(ARCH_FAMILY)", "solaris-sparc")
   FILES_c += $(FILES_2D_vis)
   ASFLAGS += -P
   FILES_s += $(TARGDIR)mlib_v_ImageCopy_blk.s
--- a/j2se/make/sun/awt/mawt.gmk	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/sun/awt/mawt.gmk	Fri May 25 00:49:14 2007 +0000
@@ -141,7 +141,7 @@
 CFLAGS += -DMOTIF_VERSION=$(MOTIF_VERSION)
 
 ifeq ($(STATIC_MOTIF),true)
-    LIBXM = $(MOTIF_LIB)/libXm.a -lXp 
+    LIBXM = $(MOTIF_LIB)/libXm.a -lXp -lXmu
     ifeq ($(PLATFORM), linux)
 	ifeq ($(ARCH_DATA_MODEL), 64)
             LIBXT = -lXt
--- a/j2se/make/sun/jdbc/Makefile	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/sun/jdbc/Makefile	Fri May 25 00:49:14 2007 +0000
@@ -114,7 +114,7 @@
 	$(COMPILE.c) $(CC_OBJECT_OUTPUT_FLAG)$@ $(CFLAGS_GPROF) $<
 $(ODBC_FAKE_LIBRARIES): $(TEMPDIR)/dummyodbc.o
 	@$(prep-target)
-	$(CC) $(SHARED_LIBRARY_FLAG) -o $@ $< $(EXTRA_LIBS)
+	$(CC) $(SHARED_LIBRARY_FLAG) $(LDFLAGS_COMMON) -o $@ $< $(EXTRA_LIBS)
 clean::
 	$(RM) -f $(ODBC_FAKE_LIBRARIES)
 	$(RM) -f $(TEMPDIR)/dummyodbc.c
--- a/j2se/make/templates/bsd-header	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/templates/bsd-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All rights reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions
--- a/j2se/make/templates/gpl-cp-header	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/templates/gpl-cp-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All Rights Reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/j2se/make/templates/gpl-header	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/make/templates/gpl-header	Fri May 25 00:49:14 2007 +0000
@@ -1,4 +1,4 @@
-Copyright (c) %year% Sun Microsystems, Inc.  All Rights Reserved.
+Copyright %YEARS% Sun Microsystems, Inc.  All Rights Reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/j2se/src/share/back/ThreadReferenceImpl.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/back/ThreadReferenceImpl.c	Fri May 25 00:49:14 2007 +0000
@@ -651,7 +651,8 @@
                 break;
     
             default:
-                return JDWP_ERROR(INVALID_TAG);
+                error =  AGENT_ERROR_INVALID_TAG;
+                break;
         }
     }
     {
--- a/j2se/src/share/back/util.h	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/back/util.h	Fri May 25 00:49:14 2007 +0000
@@ -289,8 +289,8 @@
 #define jlong_zero       ((jlong) 0)
 #define jlong_one        ((jlong) 1)
 
-#define jlong_to_ptr(a)  ((void*)(a))
-#define ptr_to_jlong(a)  ((jlong)(a))
+#define jlong_to_ptr(a)  ((void*)(intptr_t)(a))
+#define ptr_to_jlong(a)  ((jlong)(intptr_t)(a))
 #define jint_to_jlong(a) ((jlong)(a))
 #define jlong_to_jint(a) ((jint)(a))
 
--- a/j2se/src/share/classes/com/sun/corba/se/impl/activation/CommandHandler.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/corba/se/impl/activation/CommandHandler.java	Fri May 25 00:49:14 2007 +0000
@@ -29,7 +29,7 @@
 import java.io.PrintStream;
 
 /**
- * @version     1.21, 07/05/06
+ * @version     1.21, 07/05/24
  * @author      Rohit Garg
  * @since       JDK1.2
  */
--- a/j2se/src/share/classes/com/sun/jdi/connect/IllegalConnectorArgumentsException.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/jdi/connect/IllegalConnectorArgumentsException.java	Fri May 25 00:49:14 2007 +0000
@@ -38,7 +38,7 @@
  */
 public class IllegalConnectorArgumentsException extends Exception
 {
-    List names;
+    List<String> names;
 
     /**
      * Construct an <code>IllegalConnectorArgumentsException</code>
@@ -50,7 +50,7 @@
     public IllegalConnectorArgumentsException(String s,
                                               String name) {
         super(s);
-        names = new ArrayList(1);
+        names = new ArrayList<String>(1);
         names.add(name);
     }
 
@@ -65,7 +65,7 @@
     public IllegalConnectorArgumentsException(String s, List<String> names) {
         super(s);
 
-        this.names = new ArrayList(names);
+        this.names = new ArrayList<String>(names);
     }
 
     /**
--- a/j2se/src/share/classes/com/sun/script/javascript/ExternalScriptable.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/script/javascript/ExternalScriptable.java	Fri May 25 00:49:14 2007 +0000
@@ -36,6 +36,7 @@
  * @author A. Sundararajan
  * @since 1.6
  */
+
 final class ExternalScriptable implements Scriptable {
     /* Underlying ScriptContext that we use to store
      * named variables of this scope.
@@ -51,7 +52,7 @@
      * to store such variables of this scope. This map is not exposed to
      * JSR 223 API. We can just script objects "as is" and need not convert.
      */
-    private Map indexedProps;
+    private Map<Object, Object> indexedProps;
 
     // my prototype
     private Scriptable prototype;
@@ -59,10 +60,10 @@
     private Scriptable parent;
 
     ExternalScriptable(ScriptContext context) {
-        this(context, new HashMap());
+        this(context, new HashMap<Object, Object>());
     }
 
-    ExternalScriptable(ScriptContext context, Map indexedProps) {
+    ExternalScriptable(ScriptContext context, Map<Object, Object> indexedProps) {
         if (context == null) {
             throw new NullPointerException("context is null");
         }
--- a/j2se/src/share/classes/com/sun/script/javascript/JavaAdapter.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/script/javascript/JavaAdapter.java	Fri May 25 00:49:14 2007 +0000
@@ -80,7 +80,7 @@
     public Scriptable construct(Context cx, Scriptable scope, Object[] args)
     throws RhinoException {
         if (args.length == 2) {
-            Class clazz = null;
+            Class<?> clazz = null;
             Object obj1 = args[0];
             if (obj1 instanceof Wrapper) {
                 Object o = ((Wrapper)obj1).unwrap();
--- a/j2se/src/share/classes/com/sun/script/javascript/RhinoClassShutter.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/script/javascript/RhinoClassShutter.java	Fri May 25 00:49:14 2007 +0000
@@ -38,7 +38,7 @@
  * @since 1.6
  */
 final class RhinoClassShutter implements ClassShutter {
-    private static Map protectedClasses;
+    private static Map<String, Boolean> protectedClasses;
     private static RhinoClassShutter theInstance;
     
     private RhinoClassShutter() {
@@ -47,7 +47,7 @@
     static synchronized ClassShutter getInstance() {
         if (theInstance == null) {
             theInstance = new RhinoClassShutter();
-            protectedClasses = new HashMap();
+            protectedClasses = new HashMap<String, Boolean>();
             
             // For now, we just have AccessController. Allowing scripts
             // to this class will allow it to execute doPrivileged in
--- a/j2se/src/share/classes/com/sun/script/javascript/RhinoScriptEngine.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/script/javascript/RhinoScriptEngine.java	Fri May 25 00:49:14 2007 +0000
@@ -57,7 +57,7 @@
     /* map used to store indexed properties in engine scope
      * refer to comment on 'indexedProps' in ExternalScriptable.java.
      */
-    private Map indexedProps;
+    private Map<Object, Object> indexedProps;
 
     private ScriptEngineFactory factory;
     private InterfaceImplementor implementor;
@@ -95,7 +95,7 @@
             cx.exit();
         }
       
-        indexedProps = new HashMap();
+        indexedProps = new HashMap<Object, Object>();
  
         //construct object used to implement getInterface
         implementor = new InterfaceImplementor(this) {
--- a/j2se/src/share/classes/com/sun/script/util/InterfaceImplementor.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/script/util/InterfaceImplementor.java	Fri May 25 00:49:14 2007 +0000
@@ -64,7 +64,7 @@
             Object result;
             final Method m = method;
             final Object[] a = args;
-            result = AccessController.doPrivileged(new PrivilegedExceptionAction() {
+            result = AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
                 public Object run() throws Exception {
                     if (thiz == null) {
                         return engine.invokeFunction(m.getName(), a);
--- a/j2se/src/share/classes/com/sun/tools/corba/se/idl/first.set	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/corba/se/idl/first.set	Fri May 25 00:49:14 2007 +0000
@@ -6,7 +6,7 @@
  *    THIS PRODUCT CONTAINS RESTRICTED MATERIALS OF IBM                        
  *    5639-D57, (C) COPYRIGHT International Business Machines Corp., 1997, 1998
  *                                        
- *    @(#) 1.3 @(#)first.set	1.3 04/30/07 19:05:31 [05/06/07 02:10:47]
+ *    @(#) 1.3 @(#)first.set	1.3 04/30/07 19:05:31 [05/24/07 00:42:10]
  */
 
 CORBA IDL
--- a/j2se/src/share/classes/com/sun/tools/corba/se/idl/follow.set	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/corba/se/idl/follow.set	Fri May 25 00:49:14 2007 +0000
@@ -6,7 +6,7 @@
  *    THIS PRODUCT CONTAINS RESTRICTED MATERIALS OF IBM                        
  *    5639-D57, (C) COPYRIGHT International Business Machines Corp., 1997, 1998
  *                                        
- *    @(#) 1.3 @(#)follow.set	1.3 04/30/07 19:05:31 [05/06/07 02:10:47]
+ *    @(#) 1.3 @(#)follow.set	1.3 04/30/07 19:05:31 [05/24/07 00:42:10]
  */
 
 CORBA IDL
--- a/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ChildSession.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ChildSession.java	Fri May 25 00:49:14 2007 +0000
@@ -62,7 +62,8 @@
     }
 	
     public ChildSession(ExecutionManager runtime,
-			LaunchingConnector connector, Map arguments,
+			LaunchingConnector connector, 
+		        Map<String, Connector.Argument> arguments,
 			InputListener input,
 			OutputListener output,
 			OutputListener error,
@@ -180,15 +181,15 @@
 					String cmdLine) {
         VirtualMachineManager manager = Bootstrap.virtualMachineManager();
         LaunchingConnector connector = manager.defaultConnector();
-        Map arguments = connector.defaultArguments();
-        ((Connector.Argument)arguments.get("options")).setValue(userVMArgs);
-        ((Connector.Argument)arguments.get("main")).setValue(cmdLine);
+        Map<String, Connector.Argument> arguments = connector.defaultArguments();
+        arguments.get("options").setValue(userVMArgs);
+        arguments.get("main").setValue(cmdLine);
         return generalGetVM(diagnostics, connector, arguments);
     }
 
     static private VirtualMachine generalGetVM(OutputListener diagnostics,
                                                LaunchingConnector connector, 
-                                               Map arguments) {
+                                               Map<String, Connector.Argument> arguments) {
         VirtualMachine vm = null;
         try {
             diagnostics.putString("Starting child.");
--- a/j2se/src/share/classes/com/sun/tools/example/debug/bdi/EventRequestSpecList.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/bdi/EventRequestSpecList.java	Fri May 25 00:49:14 2007 +0000
@@ -33,8 +33,8 @@
 class EventRequestSpecList {
     
     // all specs
-    private List eventRequestSpecs = Collections.synchronizedList(
-                                                  new ArrayList());
+    private List<EventRequestSpec> eventRequestSpecs = Collections.synchronizedList(
+                                                  new ArrayList<EventRequestSpec>());
 
     final ExecutionManager runtime;
 
@@ -123,10 +123,10 @@
         //### notify delete - here?
     }
 
-    List eventRequestSpecs() {
+    List<EventRequestSpec> eventRequestSpecs() {
         // We need to make a copy to avoid synchronization problems
         synchronized (eventRequestSpecs) {
-            return new ArrayList(eventRequestSpecs);
+            return new ArrayList<EventRequestSpec>(eventRequestSpecs);
         }
     }
 
--- a/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ExecutionManager.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ExecutionManager.java	Fri May 25 00:49:14 2007 +0000
@@ -56,7 +56,7 @@
 
   // Session Listeners
 
-    Vector sessionListeners = new Vector();
+    Vector<SessionListener> sessionListeners = new Vector<SessionListener>();
 
     public void addSessionListener(SessionListener listener) {
 	sessionListeners.add(listener);
@@ -68,7 +68,7 @@
 
   // Spec Listeners
 
-  Vector specListeners = new Vector();
+  Vector<SpecListener> specListeners = new Vector<SpecListener>();
 
     public void addSpecListener(SpecListener cl) {
 	specListeners.add(cl);
@@ -80,7 +80,7 @@
 
     // JDI Listeners
 
-    Vector jdiListeners = new Vector();
+    Vector<JDIListener> jdiListeners = new Vector<JDIListener>();
 
     /**
      * Adds a JDIListener
@@ -105,7 +105,7 @@
 
   // App Echo Listeners 
 
-    private Vector appEchoListeners = new Vector();
+    private Vector<OutputListener> appEchoListeners = new Vector<OutputListener>();
 
     public void addApplicationEchoListener(OutputListener l) {
 	appEchoListeners.addElement(l);
@@ -117,7 +117,7 @@
 
   // App Output Listeners
 
-    private Vector appOutputListeners = new Vector();
+    private Vector<OutputListener> appOutputListeners = new Vector<OutputListener>();
 
     public void addApplicationOutputListener(OutputListener l) {
 	appOutputListeners.addElement(l);
@@ -129,7 +129,7 @@
 
   // App Error Listeners
 
-    private Vector appErrorListeners = new Vector();
+    private Vector<OutputListener> appErrorListeners = new Vector<OutputListener>();
 
     public void addApplicationErrorListener(OutputListener l) {
 	appErrorListeners.addElement(l);
@@ -141,7 +141,7 @@
 
   // Diagnostic Listeners
 
-    private Vector diagnosticsListeners = new Vector();
+    private Vector<OutputListener> diagnosticsListeners = new Vector<OutputListener>();
 
     public void addDiagnosticsListener(OutputListener l) {
 	diagnosticsListeners.addElement(l);
@@ -196,7 +196,7 @@
      * currently loaded classes and interfaces.
      * Array types are not returned.
      */
-    public List allClasses() throws NoSessionException {
+    public List<ReferenceType> allClasses() throws NoSessionException {
 	ensureActiveSession();
 	return vm().allClasses();
     }
@@ -211,7 +211,7 @@
      * multiple class loaders could have loaded a class
      * with the same fully-qualified name.
      */
-    public List findClassesByName(String name) throws NoSessionException {
+    public List<ReferenceType> findClassesByName(String name) throws NoSessionException {
 	ensureActiveSession();
 	return vm().classesByName(name);
     }
@@ -225,10 +225,10 @@
      * component may optionally be a "*" character, designating
      * an arbitrary prefix.
      */
-    public List findClassesMatchingPattern(String pattern)
+    public List<ReferenceType> findClassesMatchingPattern(String pattern)
 						throws NoSessionException {
 	ensureActiveSession();
-	List result = new ArrayList();  //### Is default size OK?
+	List<ReferenceType> result = new ArrayList<ReferenceType>();  //### Is default size OK?
 	if (pattern.startsWith("*.")) {
 	    // Wildcard matches any leading package name.
 	    pattern = pattern.substring(1);
@@ -254,7 +254,7 @@
      * thread terminates.
      */
 
-    public List allThreads() throws NoSessionException {
+    public List<ThreadReference> allThreads() throws NoSessionException {
 	ensureActiveSession();
 	return vm().allThreads();
     }
@@ -266,7 +266,7 @@
      * descendents.
      */
     
-    public List topLevelThreadGroups() throws NoSessionException {
+    public List<ThreadGroupReference> topLevelThreadGroups() throws NoSessionException {
 	ensureActiveSession();
 	return vm().topLevelThreadGroups();
     }
@@ -320,7 +320,7 @@
 	//### Would be cleaner if we could just bring up VM already suspended.
 	if (suspended) {
 	    //### Set breakpoint at 'main(java.lang.String[])'.
-	    List argList = new ArrayList(1);
+	    List<String> argList = new ArrayList<String>(1);
 	    argList.add("java.lang.String[]");
 	    createMethodBreakpoint(className, "main", argList);
 	}
@@ -351,7 +351,7 @@
         VirtualMachineManager mgr = Bootstrap.virtualMachineManager();
         List connectors = mgr.attachingConnectors();
         AttachingConnector connector = (AttachingConnector)connectors.get(0);
-        Map arguments = connector.defaultArguments();
+        Map<String, Connector.Argument> arguments = connector.defaultArguments();
         ((Connector.Argument)arguments.get("port")).setValue(portName);
 
         Session newSession = internalAttach(connector, arguments);
@@ -361,7 +361,7 @@
     }
 
     private Session internalAttach(AttachingConnector connector, 
-                                   Map arguments) {
+                                   Map<String, Connector.Argument> arguments) {
         try {
             VirtualMachine vm = connector.attach(arguments);
 	    return new Session(vm, this, diagnostics);
@@ -376,7 +376,7 @@
     }
 
     private Session internalListen(ListeningConnector connector, 
-                                   Map arguments) {
+                                   Map<String, Connector.Argument> arguments) {
         try {
             VirtualMachine vm = connector.accept(arguments);
 	    return new Session(vm, this, diagnostics);
@@ -395,7 +395,7 @@
      * Connect via user specified arguments
      * @return true on success
      */
-    public boolean explictStart(Connector connector, Map arguments) 
+    public boolean explictStart(Connector connector, Map<String, Connector.Argument> arguments) 
                                            throws VMLaunchFailureException {
         Session newSession = null;
 
@@ -583,9 +583,9 @@
      * ThreadInfo objects -- Allow query of thread status and stack.
      */
 
-    private List threadInfoList = new LinkedList();
+    private List<ThreadInfo> threadInfoList = new LinkedList<ThreadInfo>();
     //### Should be weak! (in the value, not the key)
-    private HashMap threadInfoMap = new HashMap();
+    private HashMap<ThreadReference, ThreadInfo> threadInfoMap = new HashMap<ThreadReference, ThreadInfo>();
 
     public ThreadInfo threadInfo(ThreadReference thread) {
         if (session == null || thread == null) {
@@ -677,11 +677,11 @@
      */
 
     private Object inputLock = new Object();
-    private LinkedList inputBuffer = new LinkedList();
+    private LinkedList<String> inputBuffer = new LinkedList<String>();
 
     private void resetInputBuffer() {
 	synchronized (inputLock) {
-	    inputBuffer = new LinkedList();
+	    inputBuffer = new LinkedList<String>();
 	}
     }
 
--- a/j2se/src/share/classes/com/sun/tools/example/debug/bdi/MethodBreakpointSpec.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/bdi/MethodBreakpointSpec.java	Fri May 25 00:49:14 2007 +0000
@@ -285,9 +285,9 @@
                                                NoSessionException  {
 
         // Normalize the argument string once before looping below.
-        List argTypeNames = null;
+        List<String> argTypeNames = null;
         if (methodArgs() != null) {
-            argTypeNames = new ArrayList(methodArgs().size());
+            argTypeNames = new ArrayList<String>(methodArgs().size());
             Iterator iter = methodArgs().iterator();
             while (iter.hasNext()) {
                 String name = (String)iter.next();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ThreadGroupIterator.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ThreadGroupIterator.java	Fri May 25 00:49:14 2007 +0000
@@ -37,14 +37,15 @@
  * @author Robert G. Field
  */
 public class ThreadGroupIterator implements Iterator {
-    private final Stack stack = new Stack();
+    private final Stack<Iterator<ThreadGroupReference>> stack 
+			= new Stack<Iterator<ThreadGroupReference>>();
 
-    public ThreadGroupIterator(List tgl) {
+    public ThreadGroupIterator(List<ThreadGroupReference> tgl) {
         push(tgl);
     }
 
     public ThreadGroupIterator(ThreadGroupReference tg) {
-        List tgl = new ArrayList();
+        List<ThreadGroupReference> tgl = new ArrayList<ThreadGroupReference>();
         tgl.add(tg);
         push(tgl);
     }
@@ -65,7 +66,7 @@
      * empty, there is no top.  This method assures
      * this invariant.
      */
-    private void push(List tgl) {
+    private void push(List<ThreadGroupReference> tgl) {
         stack.push(tgl.iterator());
         while (!stack.isEmpty() && !top().hasNext()) {
             stack.pop();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ThreadIterator.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/bdi/ThreadIterator.java	Fri May 25 00:49:14 2007 +0000
@@ -39,7 +39,7 @@
     }
 
     //### make this package access only?
-    public ThreadIterator(List tgl) {
+    public ThreadIterator(List<ThreadGroupReference> tgl) {
         tgi = new ThreadGroupIterator(tgl);
     }
 
--- a/j2se/src/share/classes/com/sun/tools/example/debug/expr/ExpressionParser.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/expr/ExpressionParser.java	Fri May 25 00:49:14 2007 +0000
@@ -31,6 +31,7 @@
 import java.util.List;
 import java.util.ArrayList;
 
+@SuppressWarnings("unchecked")
 public class ExpressionParser implements ExpressionParserConstants {
 
   Stack stack = new Stack();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/expr/LValue.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/expr/LValue.java	Fri May 25 00:49:14 2007 +0000
@@ -57,7 +57,7 @@
                    throws ParseException, InvalidTypeException, 
                           ClassNotLoadedException;
 
-    abstract void invokeWith(List arguments) throws ParseException;
+    abstract void invokeWith(List<Value> arguments) throws ParseException;
 
     void setValue(Value value) throws ParseException {
         try {
@@ -115,7 +115,7 @@
 
             ThreadReference thread = frame.thread();
             LValue toStringMember = memberLValue("toString", thread);
-            toStringMember.invokeWith(new ArrayList());
+            toStringMember.invokeWith(new ArrayList<Value>());
             return toStringMember.interiorGetValue();
         }
         return vv;
@@ -205,7 +205,7 @@
         return list;
     }
 
-    static List primitiveTypeNames = new ArrayList();
+    static List<String> primitiveTypeNames = new ArrayList<String>();
     static {
         primitiveTypeNames.add("boolean");
         primitiveTypeNames.add("byte");
@@ -434,7 +434,7 @@
             jdiValue = val;
         }
 
-        void invokeWith(List arguments) throws ParseException {
+        void invokeWith(List<Value> arguments) throws ParseException {
             throw new ParseException(var.name() + " is not a method");
         }
     }
@@ -445,7 +445,7 @@
         final Field matchingField;
         final List overloads;
         Method matchingMethod = null;
-        List methodArguments = null;
+        List<Value> methodArguments = null;
 
         LValueInstanceMember(Value value, 
                             String memberName, 
@@ -497,7 +497,7 @@
             jdiValue = val;
         }
 
-        void invokeWith(List arguments) throws ParseException {
+        void invokeWith(List<Value> arguments) throws ParseException {
             if (matchingMethod != null) {
                 throw new ParseException("Invalid consecutive invocations");
             }
@@ -512,7 +512,7 @@
         final Field matchingField;
         final List overloads;
         Method matchingMethod = null;
-        List methodArguments = null;
+        List<Value> methodArguments = null;
 
         LValueStaticMember(ReferenceType refType, 
                           String memberName,
@@ -564,7 +564,7 @@
             jdiValue = val;
         }
 
-        void invokeWith(List arguments) throws ParseException {
+        void invokeWith(List<Value> arguments) throws ParseException {
             if (matchingMethod != null) {
                 throw new ParseException("Invalid consecutive invocations");
             }
@@ -602,7 +602,7 @@
             throw new ParseException("Cannot set constant: " + value);
         }
 
-        void invokeWith(List arguments) throws ParseException {
+        void invokeWith(List<Value> arguments) throws ParseException {
             throw new ParseException("Array element is not a method");
         }
     }
@@ -633,7 +633,7 @@
             jdiValue = val;
         }
 
-        void invokeWith(List arguments) throws ParseException {
+        void invokeWith(List<Value> arguments) throws ParseException {
             throw new ParseException("Array element is not a method");
         }
     }
@@ -656,7 +656,7 @@
             throw new ParseException("Cannot set constant: " + value);
         }
 
-        void invokeWith(List arguments) throws ParseException {
+        void invokeWith(List<Value> arguments) throws ParseException {
             throw new ParseException("Constant is not a method");
         }
     }
@@ -764,7 +764,7 @@
 
     static LValue makeNewObject(VirtualMachine vm, 
                                  ExpressionParser.GetFrame frameGetter, 
-                                String className, List arguments) throws ParseException {
+                                String className, List<Value> arguments) throws ParseException {
         List classes = vm.classesByName(className);
         if (classes.size() == 0) {
             throw new ParseException("No class named: " + className);
@@ -783,7 +783,7 @@
         }
 
         ClassType classType = (ClassType)refType;
-        List methods = new ArrayList(classType.methods()); // writable
+        List<Method> methods = new ArrayList<Method>(classType.methods()); // writable
         Iterator iter = methods.iterator();
         while (iter.hasNext()) {
             Method method = (Method)iter.next();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/CommandInterpreter.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/CommandInterpreter.java	Fri May 25 00:49:14 2007 +0000
@@ -73,7 +73,7 @@
     private ThreadReference[] threads() throws NoSessionException {
         if (threads == null) {
             ThreadIterator ti = new ThreadIterator(getDefaultThreadGroup());
-            List tlist = new ArrayList();
+            List<ThreadReference> tlist = new ArrayList<ThreadReference>();
             while (ti.hasNext()) {
                 tlist.add(ti.nextThread());
             }
@@ -861,7 +861,7 @@
                 }
                 String methodName = token.substring(idot + 1);
                 String classId = token.substring(0, idot);
-                List argumentList = null;
+                List<String> argumentList = null;
                 if (rest != null) {
                     if (!rest.startsWith("(") || !rest.endsWith(")")) {
 			//### Should throw exception with error message
@@ -872,7 +872,7 @@
                     // Trim the parens
 		    //### What about spaces in arglist?
                     rest = rest.substring(1, rest.length() - 1);
-                    argumentList = new ArrayList();
+                    argumentList = new ArrayList<String>();
                     t = new StringTokenizer(rest, ",");
                     while (t.hasMoreTokens()) {
                         argumentList.add(t.nextToken());
@@ -930,7 +930,7 @@
             if (!iter.hasNext()) {
                 env.notice("No breakpoints set.");
             } else { 
-                List toDelete = new ArrayList();
+                List<BreakpointSpec> toDelete = new ArrayList<BreakpointSpec>();
                 while (iter.hasNext()) {
                     BreakpointSpec spec = (BreakpointSpec)iter.next();
                     if (spec.equals(bpSpec)) {
@@ -941,8 +941,7 @@
                 if (toDelete.size() <= 1) {
                     env.notice("No matching breakpoint set.");
                 }
-                for (Iterator it = toDelete.iterator(); it.hasNext();) {
-                    BreakpointSpec spec = (BreakpointSpec)it.next();
+		for (BreakpointSpec spec : toDelete) {
                     runtime.delete(spec);
                 }                    
             }
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/ContextManager.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/ContextManager.java	Fri May 25 00:49:14 2007 +0000
@@ -46,7 +46,7 @@
 
     private boolean verbose;
 
-    private Vector contextListeners = new Vector();
+    private Vector<ContextListener> contextListeners = new Vector<ContextListener>();
 
     public ContextManager(Environment env) {
 	classManager = env.getClassManager();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/JDBFileFilter.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/JDBFileFilter.java	Fri May 25 00:49:14 2007 +0000
@@ -58,7 +58,7 @@
     private static String TYPE_UNKNOWN = "Type Unknown";
     private static String HIDDEN_FILE = "Hidden File";
 
-    private Hashtable filters = null;
+    private Hashtable<String, JDBFileFilter> filters = null;
     private String description = null;
     private String fullDescription = null;
     private boolean useExtensionsInDescription = true;
@@ -70,7 +70,7 @@
      * @see #addExtension
      */
     public JDBFileFilter() {
-	this.filters = new Hashtable();
+	this.filters = new Hashtable<String, JDBFileFilter>();
     }
 
     /**
@@ -181,7 +181,7 @@
      */
     public void addExtension(String extension) {
 	if(filters == null) {
-	    filters = new Hashtable(5);
+	    filters = new Hashtable<String, JDBFileFilter>(5);
 	}
 	filters.put(extension.toLowerCase(), this);
 	fullDescription = null;
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/JDBMenuBar.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/JDBMenuBar.java	Fri May 25 00:49:14 2007 +0000
@@ -129,7 +129,7 @@
         Frame frame = JOptionPane.getRootFrame();
         JDialog dialog = new JDialog(frame, "Specify Breakpoint");
         Container contents = dialog.getContentPane();
-        Vector classes = new Vector();
+        Vector<String> classes = new Vector<String>();
         classes.add("Foo");
         classes.add("Bar");
         JList list = new JList(classes);
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/LaunchTool.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/LaunchTool.java	Fri May 25 00:49:14 2007 +0000
@@ -132,7 +132,7 @@
         final ButtonGroup radioGroup = new ButtonGroup();
         VirtualMachineManager manager = Bootstrap.virtualMachineManager();
         List all = manager.allConnectors();
-        Map modelToConnector = new HashMap(all.size(), 0.5f);
+        Map<ButtonModel, Connector> modelToConnector = new HashMap<ButtonModel, Connector>(all.size(), 0.5f);
 
         dialog.setModal(true);
         dialog.setTitle("Select Connector Type");
@@ -172,7 +172,7 @@
 
     private void configureAndConnect(final Connector connector) {
         final JDialog dialog = new JDialog();
-        final Map args = connector.defaultArguments();
+        final Map<String, Connector.Argument> args = connector.defaultArguments();
         
         dialog.setModal(true);
         dialog.setTitle("Connector Arguments");
@@ -187,7 +187,7 @@
 
         //        guts.add(new JLabel(connector.description()));
 
-        final List argReps = new ArrayList(args.size());
+        final List<ArgRep> argReps = new ArrayList<ArgRep>(args.size());
         for (Iterator it = args.values().iterator(); it.hasNext(); ) {
             Object arg = it.next();
             ArgRep ar;
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/MonitorListModel.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/MonitorListModel.java	Fri May 25 00:49:14 2007 +0000
@@ -31,7 +31,7 @@
 
 public class MonitorListModel extends AbstractListModel {
 
-    private final List monitors = new ArrayList();
+    private final List<String> monitors = new ArrayList<String>();
 	
     MonitorListModel(Environment env) {
 	
@@ -66,7 +66,7 @@
         fireIntervalRemoved(this, index, index);
     }
 
-    public List monitors() {
+    public List<String> monitors() {
         return Collections.unmodifiableList(monitors);
     }
 
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/SearchPath.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/SearchPath.java	Fri May 25 00:49:14 2007 +0000
@@ -37,7 +37,7 @@
     public SearchPath(String searchPath) {
 	//### Should check searchpath for well-formedness.
 	StringTokenizer st = new StringTokenizer(searchPath, File.pathSeparator);
-	List dlist = new ArrayList();
+	List<String> dlist = new ArrayList<String>();
 	while (st.hasMoreTokens()) {
 	    dlist.add(st.nextToken());
 	}
@@ -75,7 +75,7 @@
 	// the one corresponding to the earliest entry on the
 	// classpath is retained.  This is the one that will be
 	// found if we later do a 'resolve'.
-	SortedSet s = new TreeSet();  // sorted, no duplicates
+	SortedSet<String> s = new TreeSet<String>();  // sorted, no duplicates
         for (int i = 0; i < pathArray.length; i++) {
             File path = new File(pathArray[i], relativeDirName);
             if (path.exists()) {
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/SourceManager.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/SourceManager.java	Fri May 25 00:49:14 2007 +0000
@@ -42,12 +42,12 @@
     //### TODO: The source cache should be aged, and some cap
     //### put on memory consumption by source files loaded into core.
 
-    private List sourceList;
+    private List<SourceModel> sourceList;
     private SearchPath sourcePath;
 
-    private Vector sourceListeners = new Vector();
+    private Vector<SourceListener> sourceListeners = new Vector<SourceListener>();
 
-    private Map classToSource = new HashMap();
+    private Map<ReferenceType, SourceModel> classToSource = new HashMap<ReferenceType, SourceModel>();
 
     private Environment env;
 
@@ -62,7 +62,7 @@
     
     public SourceManager(Environment env, SearchPath sourcePath) {
         this.env = env;
-	this.sourceList = new LinkedList();
+	this.sourceList = new LinkedList<SourceModel>();
 	this.sourcePath = sourcePath;
         env.getExecutionManager().addJDIListener(classListener);
     }
@@ -73,9 +73,9 @@
     public void setSourcePath(SearchPath sp) {
 	sourcePath = sp;
 	// Old cached sources are now invalid.
-	sourceList = new LinkedList();
+	sourceList = new LinkedList<SourceModel>();
 	notifySourcepathChanged();
-        classToSource = new HashMap();
+        classToSource = new HashMap<ReferenceType, SourceModel>();
     }
 
     public void addSourceListener(SourceListener l) {
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/SourceModel.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/SourceModel.java	Fri May 25 00:49:14 2007 +0000
@@ -45,7 +45,7 @@
 
     boolean isActuallySource = true;
 
-    private List classes = new ArrayList();
+    private List<ReferenceType> classes = new ArrayList<ReferenceType>();
 
     private Environment env;
 
@@ -58,7 +58,7 @@
     /**
      * List of Line.
      */
-    private List sourceLines = null;
+    private List<Line> sourceLines = null;
 
     public static class Line {
         public String text;
@@ -95,7 +95,7 @@
 
     private void setMessage(String message) {
         isActuallySource = false;
-        sourceLines = new ArrayList();
+        sourceLines = new ArrayList<Line>();
         sourceLines.add(new Line(message));
     }
 
@@ -133,7 +133,7 @@
 	if (index >= sourceLines.size() || index < 0) {
 	    return null;
 	} else {
-	    return (Line)sourceLines.get(index);  
+	    return sourceLines.get(index);  
 	}
     }
 
@@ -160,7 +160,7 @@
      * @return List of currently known {@link com.sun.jdi.ReferenceType} 
      * in this source file.
      */
-    public List referenceTypes() {
+    public List<ReferenceType> referenceTypes() {
         return Collections.unmodifiableList(classes);
     }
 
@@ -213,7 +213,7 @@
     }
 
     private void rawInit() throws IOException {
-	sourceLines = new ArrayList();
+	sourceLines = new ArrayList<Line>();
 	BufferedReader reader = sourceReader();
 	try {
 	    String line = reader.readLine();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/gui/ThreadTreeTool.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/gui/ThreadTreeTool.java	Fri May 25 00:49:14 2007 +0000
@@ -112,11 +112,11 @@
         //### remove listeners on exit!
     }
 
-    HashMap threadTable = new HashMap();
+    HashMap<ThreadReference, List<String>> threadTable = new HashMap<ThreadReference, List<String>>();
 
-    private List threadPath(ThreadReference thread) {
+    private List<String> threadPath(ThreadReference thread) {
 	// May exit abnormally if VM disconnects.
-	List l = new ArrayList();
+	List<String> l = new ArrayList<String>();
 	l.add(0, thread.name());
 	ThreadGroupReference group = thread.threadGroup();
 	while (group != null) {
@@ -164,7 +164,7 @@
 	    root = createThreadTree(HEADING);
 	    treeModel = new DefaultTreeModel(root);
 	    tree.setModel(treeModel);
-	    threadTable = new HashMap();
+	    threadTable = new HashMap<ThreadReference, List<String>>();
 	}
 
     }
@@ -227,7 +227,7 @@
 	    if (threadTable.get(thread) == null) {
 		// Add thread only if not already present.
 		try {
-		    List path = threadPath(thread);
+		    List<String> path = threadPath(thread);
 		    // May not get here due to exception.
 		    // If we get here, we are committed.
 		    // We must not leave the tree partially updated.
@@ -280,7 +280,7 @@
 	}
 
 	public void removeThread(ThreadReference thread) {
-	    List threadPath = (List)threadTable.get(thread);
+	    List<String> threadPath = threadTable.get(thread);
 	    // Only remove thread if we recorded it in table.
 	    // Original add may have failed due to VM disconnect.
 	    if (threadPath != null) {
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/BreakpointSpec.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/BreakpointSpec.java	Fri May 25 00:49:14 2007 +0000
@@ -328,9 +328,9 @@
                                                NoSuchMethodException {
 
         // Normalize the argument string once before looping below.
-        List argTypeNames = null;
+        List<String> argTypeNames = null;
         if (methodArgs() != null) {
-            argTypeNames = new ArrayList(methodArgs().size());
+            argTypeNames = new ArrayList<String>(methodArgs().size());
             Iterator iter = methodArgs().iterator();
             while (iter.hasNext()) {
                 String name = (String)iter.next();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/Commands.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/Commands.java	Fri May 25 00:49:14 2007 +0000
@@ -585,8 +585,8 @@
         MessageOutput.println("The load command is no longer supported.");
     }
 
-    private List allThreads(ThreadGroupReference group) {
-        List list = new ArrayList();
+    private List<ThreadReference> allThreads(ThreadGroupReference group) {
+        List<ThreadReference> list = new ArrayList<ThreadReference>();
         list.addAll(group.threads());
         Iterator iter = group.threadGroups().iterator();
         while (iter.hasNext()) {
@@ -1118,7 +1118,7 @@
                 }
                 String methodName = token.substring(idot + 1);
                 String classId = token.substring(0, idot);
-                List argumentList = null;
+                List<String> argumentList = null;
                 if (rest != null) {
                     if (!rest.startsWith("(") || !rest.endsWith(")")) {
                         MessageOutput.println("Invalid method specification:",
@@ -1129,7 +1129,7 @@
                     // Trim the parens
                     rest = rest.substring(1, rest.length() - 1);
 
-                    argumentList = new ArrayList();
+                    argumentList = new ArrayList<String>();
                     t = new StringTokenizer(rest, ",");
                     while (t.hasMoreTokens()) {
                         argumentList.add(t.nextToken());
@@ -1209,8 +1209,8 @@
         }
     }
 
-    private List parseWatchpointSpec(StringTokenizer t) {
-        List list = new ArrayList();
+    private List<EventRequestSpec> parseWatchpointSpec(StringTokenizer t) {
+        List<EventRequestSpec> list = new ArrayList<EventRequestSpec>();
         boolean access = false;
         boolean modification = false;
         int suspendPolicy = EventRequest.SUSPEND_ALL;
@@ -1614,7 +1614,7 @@
             if (frame == null) {
                 throw new AbsentInformationException();
             }
-            List vars = frame.visibleVariables();
+            List<LocalVariable> vars = frame.visibleVariables();
     
             if (vars.size() == 0) {
                 MessageOutput.println("No local variables");
@@ -2074,7 +2074,7 @@
                              new Object [] {fileName, exc.toString()});
                 return;
             }
-            Map map = new HashMap();
+            Map<ReferenceType, byte[]> map = new HashMap<ReferenceType, byte[]>();
             map.put(refType, bytes);
             try {
                 Env.vm().redefineClasses(map);
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/Env.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/Env.java	Fri May 25 00:49:14 2007 +0000
@@ -41,12 +41,12 @@
     private static VMConnection connection;
 
     private static SourceMapper sourceMapper = new SourceMapper("");
-    private static List excludes;
+    private static List<String> excludes;
 
     private static final int SOURCE_CACHE_SIZE = 5;
-    private static List sourceCache = new LinkedList();
+    private static List<SourceCode> sourceCache = new LinkedList<SourceCode>();
 
-    private static HashMap savedValues = new HashMap();
+    private static HashMap<String, Value> savedValues = new HashMap<String, Value>();
     private static Method atExitMethod;
 
     static void init(String connectSpec, boolean openNow, int flags) {
@@ -98,7 +98,7 @@
         return sourceMapper.getSourcePath();
     }
 
-    static private List excludes() {
+    static private List<String> excludes() {
         if (excludes == null) {
             setExcludes("java.*, javax.*, sun.*, com.sun.*");
         }
@@ -142,7 +142,7 @@
 
     static void setExcludes(String excludeString) {
         StringTokenizer t = new StringTokenizer(excludeString, " ,;");
-        List list = new ArrayList();
+        List<String> list = new ArrayList<String>();
         while (t.hasMoreTokens()) {
             list.add(t.nextToken());
         }
@@ -303,7 +303,7 @@
 
     static class SourceCode {
         private String fileName;
-        private List sourceLines = new ArrayList();
+        private List<String> sourceLines = new ArrayList<String>();
 
         SourceCode(String fileName, BufferedReader reader)  throws IOException {
             this.fileName = fileName;
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/EventRequestSpec.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/EventRequestSpec.java	Fri May 25 00:49:14 2007 +0000
@@ -100,7 +100,7 @@
                  * Note: Class patterns apply only to ExceptionRequests,
                  * so that is all we need to examine.
                  */
-                ArrayList deleteList = new ArrayList();
+                ArrayList<ExceptionRequest> deleteList = new ArrayList<ExceptionRequest>();
                 Iterator iter =
                     Env.vm().eventRequestManager().exceptionRequests().iterator();
                 while (iter.hasNext()) {
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/EventRequestSpecList.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/EventRequestSpecList.java	Fri May 25 00:49:14 2007 +0000
@@ -42,8 +42,8 @@
     private static final int statusError = 3;
     
     // all specs
-    private List eventRequestSpecs = Collections.synchronizedList(
-                                                  new ArrayList());
+    private List<EventRequestSpec> eventRequestSpecs = Collections.synchronizedList(
+                                                  new ArrayList<EventRequestSpec>());
 
     EventRequestSpecList() {
     }
@@ -164,10 +164,10 @@
         }
     }
 
-    List eventRequestSpecs() {
+    List<EventRequestSpec> eventRequestSpecs() {
        // We need to make a copy to avoid synchronization problems
         synchronized (eventRequestSpecs) {
-            return new ArrayList(eventRequestSpecs);
+            return new ArrayList<EventRequestSpec>(eventRequestSpecs);
         }
     }
 }
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/MessageOutput.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/MessageOutput.java	Fri May 25 00:49:14 2007 +0000
@@ -33,7 +33,7 @@
  * the only class that should be printing directly or otherwise
  * accessing System.[out,err].
  *
- * @version     @(#) MessageOutput.java 1.14 07/05/05 02:10:49
+ * @version     @(#) MessageOutput.java 1.14 07/05/05 00:42:13
  * @bug 4348376
  * @author Tim Bell
  */
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/SourceMapper.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/SourceMapper.java	Fri May 25 00:49:14 2007 +0000
@@ -44,7 +44,7 @@
          * sourcepath can arrive from the debugee as a List.
          * (via PathSearchingVirtualMachine.classPath())
          */
-        List dirList = new ArrayList();
+        List<String> dirList = new ArrayList<String>();
         Iterator iter = sourcepath.iterator();
         while (iter.hasNext()) {
             String element = (String)iter.next();
@@ -69,7 +69,7 @@
          */
         StringTokenizer st = new StringTokenizer(sourcepath,
                                                  File.pathSeparator);
-        List dirList = new ArrayList();
+        List<String> dirList = new ArrayList<String>();
         while (st.hasMoreTokens()) {
             String s = st.nextToken();
             //XXX remove .jar and .zip files; we want only directories on
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/TTY.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/TTY.java	Fri May 25 00:49:14 2007 +0000
@@ -39,7 +39,7 @@
     /**
      * List of Strings to execute at each stop.
      */
-    private List monitorCommands = new ArrayList();
+    private List<String> monitorCommands = new ArrayList<String>();
     private int monitorCount = 0;
 
     /**
@@ -601,7 +601,7 @@
     void readCommand(StringTokenizer t) {
         if (t.hasMoreTokens()) {
             String cmdfname = t.nextToken();
-            if (!readCommandFile(cmdfname)) {
+            if (!readCommandFile(new File(cmdfname))) {
                 MessageOutput.println("Could not open:", cmdfname);
             }
         } else {
@@ -610,16 +610,15 @@
     }
 
     /**
-     * Read and execute a command file.  Return true if the
-     * file could be opened.
+     * Read and execute a command file.  Return true if the file was read
+     * else false;
      */
-    boolean readCommandFile(String filename) {
-        File f = new File(filename);
+    boolean readCommandFile(File f) {
         BufferedReader inFile = null;
         try {
             if (f.canRead()) {
-                MessageOutput.println("*** Reading commands from", f.getCanonicalPath());
                 // Process initial commands.
+                MessageOutput.println("*** Reading commands from", f.getPath());
                 inFile = new BufferedReader(new FileReader(f));
                 String ln;
                 while ((ln = inFile.readLine()) != null) {
@@ -641,6 +640,35 @@
         return inFile != null;
     }
 
+    /**
+     * Try to read commands from dir/fname, unless
+     * the canonical path passed in is the same as that
+     * for dir/fname.
+     * Return null if that file doesn't exist,
+     * else return the canonical path of that file.
+     */
+    String readStartupCommandFile(String dir, String fname, String canonPath) {
+        File dotInitFile = new File(dir, fname);
+        if (!dotInitFile.exists()) {
+            return null;
+        }
+
+        String myCanonFile;
+        try {
+            myCanonFile = dotInitFile.getCanonicalPath();
+        } catch (IOException ee) {
+            MessageOutput.println("Could not open:", dotInitFile.getPath());
+            return null;
+        }
+        if (canonPath == null || !canonPath.equals(myCanonFile)) {
+            if (!readCommandFile(dotInitFile)) { 
+                MessageOutput.println("Could not open:", dotInitFile.getPath());
+            }
+        }
+        return myCanonFile;
+    }
+
+
     public TTY() throws Exception {
 
         MessageOutput.println("Initializing progname", progname);
@@ -662,22 +690,38 @@
             Thread.currentThread().setPriority(Thread.NORM_PRIORITY);
     
             /*
-             * Try reading user's home startup file. Handle Unix and 
-             * and Win32 conventions for the names of these files. 
+             * Read start up files.  This mimics the behavior
+             * of gdb which will read both ~/.gdbinit and then
+             * ./.gdbinit if they exist.  We have the twist that
+             * we allow two different names, so we do this:
+             *  if ~/jdb.ini exists, 
+             *      read it
+             *  else if ~/.jdbrc exists, 
+             *      read it
+             *
+             *  if ./jdb.ini exists, 
+             *      if it hasn't been read, read it
+             *      It could have been read above because ~ == .
+             *      or because of symlinks, ...
+             *  else if ./jdbrx exists
+             *      if it hasn't been read, read it
              */
-            if (!readCommandFile(System.getProperty("user.home") + 
-                                 File.separator + "jdb.ini")) {
-                readCommandFile(System.getProperty("user.home") + 
-                                File.separator + ".jdbrc");
-            }
-    
-            // Try startup file in local directory
-            if (!readCommandFile(System.getProperty("user.dir") + 
-                                 File.separator + "jdb.ini")) {
-                readCommandFile(System.getProperty("user.dir") + 
-                                File.separator + ".jdbrc");
-            }
-    
+            {
+                String userHome = System.getProperty("user.home");
+                String canonPath;
+
+                if ((canonPath = readStartupCommandFile(userHome, "jdb.ini", null)) == null) {
+                    // Doesn't exist, try alternate spelling
+                    canonPath = readStartupCommandFile(userHome, ".jdbrc", null);
+                }
+
+                String userDir = System.getProperty("user.dir");
+                if (readStartupCommandFile(userDir, "jdb.ini", canonPath) == null) {
+                    // Doesn't exist, try alternate spelling
+                    readStartupCommandFile(userDir, ".jdbrc", canonPath);
+                }
+            }                
+
             // Process interactive commands.
             MessageOutput.printPrompt();
             while (true) {
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadGroupIterator.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadGroupIterator.java	Fri May 25 00:49:14 2007 +0000
@@ -37,14 +37,14 @@
  * @author Robert G. Field
  */
 class ThreadGroupIterator implements Iterator {
-    private final Stack stack = new Stack();
+    private final Stack<Iterator<ThreadGroupReference>> stack = new Stack<Iterator<ThreadGroupReference>>();
 
-    ThreadGroupIterator(List tgl) {
+    ThreadGroupIterator(List<ThreadGroupReference> tgl) {
         push(tgl);
     }
 
     ThreadGroupIterator(ThreadGroupReference tg) {
-        List tgl = new ArrayList();
+        List<ThreadGroupReference> tgl = new ArrayList<ThreadGroupReference>();
         tgl.add(tg);
         push(tgl);
     }
@@ -63,7 +63,7 @@
      * empty, there is no top.  This method assures
      * this invariant.
      */
-    private void push(List tgl) {
+    private void push(List<ThreadGroupReference> tgl) {
         stack.push(tgl.iterator());
         while (!stack.isEmpty() && !top().hasNext()) {
             stack.pop();
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadInfo.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadInfo.java	Fri May 25 00:49:14 2007 +0000
@@ -38,7 +38,7 @@
 class ThreadInfo {
     // This is a list of all known ThreadInfo objects. It survives 
     // ThreadInfo.invalidateAll, unlike the other static fields below. 
-    private static List threads = Collections.synchronizedList(new ArrayList());
+    private static List<ThreadInfo> threads = Collections.synchronizedList(new ArrayList<ThreadInfo>());
     private static boolean gotInitialThreads = false;
 
     private static ThreadInfo current = null;
@@ -101,11 +101,11 @@
         threads.remove(getThreadInfo(thread));
     }
 
-    static List threads() {
+    static List<ThreadInfo> threads() {
         synchronized(threads) {
             initThreads();
             // Make a copy to allow iteration without synchronization
-            return new ArrayList(threads);
+            return new ArrayList<ThreadInfo>(threads);
         }
     }
 
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadIterator.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/ThreadIterator.java	Fri May 25 00:49:14 2007 +0000
@@ -38,7 +38,7 @@
         tgi = new ThreadGroupIterator(tg);
     }
 
-    ThreadIterator(List tgl) {
+    ThreadIterator(List<ThreadGroupReference> tgl) {
         tgi = new ThreadGroupIterator(tgl);
     }
 
--- a/j2se/src/share/classes/com/sun/tools/example/debug/tty/VMConnection.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/debug/tty/VMConnection.java	Fri May 25 00:49:14 2007 +0000
@@ -43,7 +43,7 @@
     private int outputCompleteCount = 0;
 
     private final Connector connector;
-    private final Map connectorArgs;
+    private final Map<String, com.sun.jdi.connect.Connector.Argument> connectorArgs;
     private final int traceFlags;
 
     synchronized void notifyOutputComplete() {
@@ -72,8 +72,8 @@
         return null;
     }
 
-    private Map parseConnectorArgs(Connector connector, String argString) {
-        Map arguments = connector.defaultArguments();
+    private Map <String, com.sun.jdi.connect.Connector.Argument> parseConnectorArgs(Connector connector, String argString) {
+        Map<String, com.sun.jdi.connect.Connector.Argument> arguments = connector.defaultArguments();
 
         /*
          * We are parsing strings of the form:
--- a/j2se/src/share/classes/com/sun/tools/example/doc/index.html	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/doc/index.html	Fri May 25 00:49:14 2007 +0000
@@ -79,7 +79,7 @@
     <address><a href="mailto:java-debugger@java.sun.com">java-debugger@java.sun.com</a></address>
     </P>
 <P>
-<FONT SIZE=-1>@(#) index.html 1.2 01/06/04 02:10:49</FONT>
+<FONT SIZE=-1>@(#) index.html 1.2 01/06/04 00:42:13</FONT>
 </P>
 <!-- Created: Mon Feb  7 18:56:28 PST 2000 -->
 </body>
--- a/j2se/src/share/classes/com/sun/tools/example/doc/javadt.html	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/doc/javadt.html	Fri May 25 00:49:14 2007 +0000
@@ -169,7 +169,7 @@
     <address><a href="mailto:java-debugger@java.sun.com">java-debugger@java.sun.com</a></address>
     </P>
 <P>
-<FONT SIZE=-1>@(#) javadt.html 1.3 07/04/04 02:10:49</FONT>
+<FONT SIZE=-1>@(#) javadt.html 1.3 07/04/04 00:42:13</FONT>
 </P>
 </BODY>
 </HTML>
--- a/j2se/src/share/classes/com/sun/tools/example/doc/jdb.html	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/doc/jdb.html	Fri May 25 00:49:14 2007 +0000
@@ -99,7 +99,7 @@
     <address><a href="mailto:java-debugger@java.sun.com">java-debugger@java.sun.com</a></address>
     </P>
 <P>
-<FONT SIZE=-1>@(#) jdb.html 1.3 07/04/04 02:10:49</FONT>
+<FONT SIZE=-1>@(#) jdb.html 1.3 07/04/04 00:42:13</FONT>
 </P>
   </BODY>
 </HTML>
--- a/j2se/src/share/classes/com/sun/tools/example/doc/trace.html	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/doc/trace.html	Fri May 25 00:49:14 2007 +0000
@@ -66,7 +66,7 @@
     <address><a href="mailto:java-debugger@java.sun.com">java-debugger@java.sun.com</a></address>
     </P>
 <P>
-<FONT SIZE=-1>@(#) trace.html 1.4 07/04/04 02:10:49</FONT>
+<FONT SIZE=-1>@(#) trace.html 1.4 07/04/04 00:42:13</FONT>
 </P>
 </BODY>
 </HTML>
--- a/j2se/src/share/classes/com/sun/tools/example/trace/EventThread.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/trace/EventThread.java	Fri May 25 00:49:14 2007 +0000
@@ -35,7 +35,7 @@
 /**
  * This class processes incoming JDI events and displays them
  *
- * @version     @(#) EventThread.java 1.12 07/05/05 02:10:49
+ * @version     @(#) EventThread.java 1.12 07/05/05 00:42:13
  * @author Robert Field
  */
 public class EventThread extends Thread {
--- a/j2se/src/share/classes/com/sun/tools/example/trace/StreamRedirectThread.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/trace/StreamRedirectThread.java	Fri May 25 00:49:14 2007 +0000
@@ -31,7 +31,7 @@
  * StreamRedirectThread is a thread which copies it's input to
  * it's output and terminates when it completes.
  *
- * @version     @(#) StreamRedirectThread.java 1.12 07/05/05 02:10:49
+ * @version     @(#) StreamRedirectThread.java 1.12 07/05/05 00:42:13
  * @author Robert Field
  */
 class StreamRedirectThread extends Thread {
--- a/j2se/src/share/classes/com/sun/tools/example/trace/Trace.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/example/trace/Trace.java	Fri May 25 00:49:14 2007 +0000
@@ -42,7 +42,7 @@
  * See "java Trace -help".
  * It is a simple example of the use of the Java Debug Interface.
  *
- * @version     @(#) Trace.java 1.12 07/05/05 02:10:49
+ * @version     @(#) Trace.java 1.12 07/05/05 00:42:13
  * @author Robert Field
  */
 public class Trace {
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/model/JavaClass.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/model/JavaClass.java	Fri May 25 00:49:14 2007 +0000
@@ -50,7 +50,7 @@
 
 /**
  *
- * @version     @(#)JavaClass.java	1.18 07/05/05 17:12:42
+ * @version     @(#)JavaClass.java	1.19 07/05/09 17:45:57
  * @author      Bill Foote
  */
 
@@ -77,7 +77,7 @@
     private JavaClass[] subclasses = EMPTY_CLASS_ARRAY;
 
     // my instances
-    private Vector instances = new Vector();
+    private Vector<JavaHeapObject> instances = new Vector<JavaHeapObject>();
 
     // Who I belong to.  Set on resolve.
     private Snapshot mySnapshot;
@@ -314,11 +314,11 @@
      * Includes superclass fields
      */
     public JavaField[] getFieldsForInstance() {
-	Vector v = new Vector();
+	Vector<JavaField> v = new Vector<JavaField>();
 	addFields(v);
 	JavaField[] result = new JavaField[v.size()];
 	for (int i = 0; i < v.size(); i++) {
-	    result[i] = (JavaField) v.elementAt(i);
+	    result[i] =  v.elementAt(i);
 	}
 	return result;
     }
@@ -488,7 +488,7 @@
     }
 
     // Internals only below this point
-    private void addFields(Vector v) {
+    private void addFields(Vector<JavaField> v) {
 	if (superclass != null) {
 	    ((JavaClass) superclass).addFields(v);
 	}
@@ -497,7 +497,7 @@
 	}
     }
 
-    private void addSubclassInstances(Vector v) {
+    private void addSubclassInstances(Vector<JavaHeapObject> v) {
 	for (int i = 0; i < subclasses.length; i++) {
 	    subclasses[i].addSubclassInstances(v);
 	}
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/model/ReachableExcludesImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/model/ReachableExcludesImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -59,7 +59,7 @@
  * transitive closure of objects reachable from a given object, allowing
  * some kind of real determination of the "size" of that object.
  *
- * @version     1.1, 03/06/98 [jhat @(#)ReachableExcludesImpl.java	1.7 07/05/05]
+ * @version     1.1, 03/06/98 [jhat @(#)ReachableExcludesImpl.java	1.8 07/05/09]
  * @author      Bill Foote
  */
 public class ReachableExcludesImpl implements ReachableExcludes {
@@ -89,7 +89,7 @@
 
     private void readFile() {
 	long lm = excludesFile.lastModified();
-	Hashtable m = new Hashtable();
+	Hashtable<String, String> m = new Hashtable<String, String>();
 
 	try {
 	    BufferedReader r = new BufferedReader(new InputStreamReader(
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/model/ReachableObjects.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/model/ReachableObjects.java	Fri May 25 00:49:14 2007 +0000
@@ -59,9 +59,9 @@
                             final ReachableExcludes excludes) {
         this.root = root;
 
-	final Hashtable bag = new Hashtable();
-	final Hashtable fieldsExcluded = new Hashtable();  // Bag<String>
-	final Hashtable fieldsUsed = new Hashtable();	// Bag<String>
+	final Hashtable<JavaHeapObject, JavaHeapObject> bag = new Hashtable<JavaHeapObject, JavaHeapObject>();
+	final Hashtable<String, String> fieldsExcluded = new Hashtable<String, String>();  //Bag<String>
+	final Hashtable<String, String> fieldsUsed = new Hashtable<String, String>();	// Bag<String>
 	JavaHeapObjectVisitor visitor = new AbstractJavaHeapObjectVisitor() {
 	    public void visit(JavaHeapObject t) {
 		// Size is zero for things like integer fields
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/model/Snapshot.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/model/Snapshot.java	Fri May 25 00:49:14 2007 +0000
@@ -50,7 +50,7 @@
 
 /**
  *
- * @version     1.26, 10/08/98 [jhat @(#)Snapshot.java	1.21 07/05/05]
+ * @version     1.26, 10/08/98 [jhat @(#)Snapshot.java	1.22 07/05/09]
  * @author      Bill Foote
  */
 
@@ -417,7 +417,7 @@
         JavaClass clazz = findClass("java.lang.ref.Finalizer");
         JavaObject queue = (JavaObject) clazz.getStaticField("queue");
         JavaThing tmp = queue.getField("head");
-        Vector finalizables = new Vector();
+        Vector<JavaHeapObject> finalizables = new Vector<JavaHeapObject>();
         if (tmp != getNullThing()) {
             JavaObject head = (JavaObject) tmp;
             while (true) {
@@ -450,16 +450,16 @@
 
     public ReferenceChain[] 
     rootsetReferencesTo(JavaHeapObject target, boolean includeWeak) {
-	Vector fifo = new Vector();  // This is slow... A real fifo would help
+	Vector<ReferenceChain> fifo = new Vector<ReferenceChain>();  // This is slow... A real fifo would help
 	    // Must be a fifo to go breadth-first
-	Hashtable visited = new Hashtable();
+	Hashtable<JavaHeapObject, JavaHeapObject> visited = new Hashtable<JavaHeapObject, JavaHeapObject>();
 	// Objects are added here right after being added to fifo.
-	Vector result = new Vector();
+	Vector<ReferenceChain> result = new Vector<ReferenceChain>();
 	visited.put(target, target);
 	fifo.addElement(new ReferenceChain(target, null));
 
 	while (fifo.size() > 0) {
-	    ReferenceChain chain = (ReferenceChain) fifo.elementAt(0);
+	    ReferenceChain chain = fifo.elementAt(0);
 	    fifo.removeElementAt(0);
 	    JavaHeapObject curr = chain.getObj();
 	    if (curr.getRoot() != null) {
@@ -481,7 +481,7 @@
 
 	ReferenceChain[] realResult = new ReferenceChain[result.size()];
 	for (int i = 0; i < result.size(); i++) {
-	    realResult[i] = (ReferenceChain) result.elementAt(i);
+	    realResult[i] =  result.elementAt(i);
 	}
 	return realResult;
     }
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/oql/OQLEngine.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/oql/OQLEngine.java	Fri May 25 00:49:14 2007 +0000
@@ -51,14 +51,14 @@
 /**
  * This is Object Query Language Interpreter
  *
- * @author A. Sundararajan [jhat @(#)OQLEngine.java	1.14 07/05/05]
+ * @author A. Sundararajan [jhat @(#)OQLEngine.java	1.15 07/05/09]
  */
 public class OQLEngine {
     static {
         try {
             // Do we have javax.script support?
             // create ScriptEngineManager
-            Class managerClass = Class.forName("javax.script.ScriptEngineManager");
+            Class<?> managerClass = Class.forName("javax.script.ScriptEngineManager");
             Object manager = managerClass.newInstance();
  
             // create JavaScript engine
@@ -272,7 +272,7 @@
         this.snapshot = snapshot;
         try {
             // create ScriptEngineManager
-            Class managerClass = Class.forName("javax.script.ScriptEngineManager");
+            Class<?> managerClass = Class.forName("javax.script.ScriptEngineManager");
             Object manager = managerClass.newInstance();
  
             // create JavaScript engine
@@ -282,14 +282,14 @@
 
             // initialize engine with init file (hat.js)
             InputStream strm = getInitStream();
-            Class engineClass = Class.forName("javax.script.ScriptEngine");   
+            Class<?> engineClass = Class.forName("javax.script.ScriptEngine");   
             evalMethod = engineClass.getMethod("eval",
                                 new Class[] { Reader.class });
             evalMethod.invoke(engine, new Object[] {new InputStreamReader(strm)});
  
             // initialize ScriptEngine.eval(String) and
             // Invocable.invokeFunction(String, Object[]) methods.
-            Class invocableClass = Class.forName("javax.script.Invocable");
+            Class<?> invocableClass = Class.forName("javax.script.Invocable");
  
             evalMethod = engineClass.getMethod("eval",
                                   new Class[] { String.class });
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/parser/HprofReader.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/parser/HprofReader.java	Fri May 25 00:49:14 2007 +0000
@@ -51,7 +51,7 @@
 /**
  * Object that's used to read a hprof file.
  *
- * @version     1.20, 06/03/99 [jhat @(#)HprofReader.java	1.15 07/05/05]
+ * @version     1.20, 06/03/99 [jhat @(#)HprofReader.java	1.16 07/05/09]
  * @author      Bill Foote
  */
 
@@ -137,28 +137,28 @@
     private boolean callStack;	// If true, read the call stack of objects
 
     private int identifierSize;		// Size, in bytes, of identifiers.
-    private Hashtable names;		// Hashtable<Integer, String>
+    private Hashtable<Long, String> names;	
 
     // Hashtable<Integer, ThreadObject>, used to map the thread sequence number
     // (aka "serial number") to the thread object ID for 
     // HPROF_GC_ROOT_THREAD_OBJ.  ThreadObject is a trivial inner class,
     // at the end of this file.
-    private Hashtable threadObjects;
+    private Hashtable<Integer, ThreadObject> threadObjects;
 
-    // Hashtable<Integer, String>, maps class object ID to class name
+    // Hashtable<Long, String>, maps class object ID to class name
     // (with / converted to .)
-    private Hashtable classNameFromObjectID;
+    private Hashtable<Long, String> classNameFromObjectID;
 
     // Hashtable<Integer, Integer>, maps class serial # to class object ID
-    private Hashtable classNameFromSerialNo;
+    private Hashtable<Integer, String> classNameFromSerialNo;
 
-    // Hashtable<Integer, StackFrame> maps stack frame ID to StackFrame.
+    // Hashtable<Long, StackFrame> maps stack frame ID to StackFrame.
     // Null if we're not tracking them.
-    private Hashtable stackFrames;
+    private Hashtable<Long, StackFrame> stackFrames;
 
     // Hashtable<Integer, StackTrace> maps stack frame ID to StackTrace
     // Null if we're not tracking them.
-    private Hashtable stackTraces;
+    private Hashtable<Integer, StackTrace> stackTraces;
 
     private Snapshot snapshot;
 
@@ -171,13 +171,13 @@
 	this.dumpsToSkip = dumpNumber - 1;
 	this.callStack = callStack;
 	this.debugLevel = debugLevel;
-	names = new Hashtable();
-	threadObjects = new Hashtable(43);
-	classNameFromObjectID = new Hashtable();
+	names = new Hashtable<Long, String>();
+	threadObjects = new Hashtable<Integer, ThreadObject>(43);
+	classNameFromObjectID = new Hashtable<Long, String>();
 	if (callStack) {
-	    stackFrames = new Hashtable(43);
-	    stackTraces = new Hashtable(43);
-	    classNameFromSerialNo = new Hashtable();
+	    stackFrames = new Hashtable<Long, StackFrame>(43);
+	    stackTraces = new Hashtable<Integer, StackTrace>(43);
+	    classNameFromSerialNo = new Hashtable<Integer, String>();
 	}
     }
 
@@ -306,8 +306,7 @@
 			String methodSig = getNameFromID(readID());
 			String sourceFile = getNameFromID(readID());
 			int classSer = in.readInt();
-			String className = (String)
-			      classNameFromSerialNo.get(new Integer(classSer));
+			String className = classNameFromSerialNo.get(new Integer(classSer));
 			int lineNumber = in.readInt();
 			if (lineNumber < StackFrame.LINE_NUMBER_NATIVE) {
 			    warn("Weird stack frame line number:  " + lineNumber);
@@ -329,8 +328,7 @@
 			StackFrame[] frames = new StackFrame[in.readInt()];
 			for (int i = 0; i < frames.length; i++) {
 			    long fid = readID();
-			    frames[i] = (StackFrame) 
-				    stackFrames.get(new Long(fid));
+			    frames[i] = stackFrames.get(new Long(fid));
 			    if (frames[i] == null) {
 				throw new IOException("Stack frame " + toHex(fid) + " not found");
 			    }
@@ -621,8 +619,7 @@
 
     private ThreadObject getThreadObjectFromSequence(int threadSeq) 
 	    throws IOException {
-	ThreadObject to = (ThreadObject) 
-		threadObjects.get(new Integer(threadSeq));
+	ThreadObject to = threadObjects.get(new Integer(threadSeq));
 	if (to == null) {
 	    throw new IOException("Thread " + threadSeq + 
 			          " not found for JNI local ref");
@@ -638,7 +635,7 @@
 	if (id.longValue() == 0L) {
 	    return "";
 	}
-	String result = (String) names.get(id);
+	String result = names.get(id);
 	if (result == null) {
 	    warn("Name not found at " + toHex(id.longValue()));
 	    return "unresolved name " + toHex(id.longValue());
@@ -650,7 +647,7 @@
 	if (stackTraces == null) {
 	    return null;
 	}
-	StackTrace result = (StackTrace) stackTraces.get(new Integer(ser));
+	StackTrace result = stackTraces.get(new Integer(ser));
 	if (result == null) {
 	    warn("Stack trace not found for serial # " + ser);
 	}
@@ -715,7 +712,7 @@
 	    String signature = "" + ((char) type);
 	    fields[i] = new JavaField(fieldName, signature);
 	}
-	String name = (String) classNameFromObjectID.get(new Long(id));
+	String name = classNameFromObjectID.get(new Long(id));
 	if (name == null) {
 	    warn("Class name not found for " + toHex(id));
             name = "unknown-name@" + toHex(id);
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/server/FinalizerSummaryQuery.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/server/FinalizerSummaryQuery.java	Fri May 25 00:49:14 2007 +0000
@@ -87,7 +87,7 @@
 
     private void printFinalizerSummary(Enumeration objs) {
         int count = 0;
-        Map map = new HashMap();
+        Map<JavaClass, HistogramElement> map = new HashMap<JavaClass, HistogramElement>();
 
         while (objs.hasMoreElements()) {
             JavaHeapObject obj = (JavaHeapObject) objs.nextElement();
@@ -96,7 +96,7 @@
             if (! map.containsKey(clazz)) {
                 map.put(clazz, new HistogramElement(clazz));
             }
-            HistogramElement element = (HistogramElement) map.get(clazz);
+            HistogramElement element = map.get(clazz);
             element.updateCount();
         }
 
@@ -119,10 +119,9 @@
         // calculate and print histogram
         HistogramElement[] elements = new HistogramElement[map.size()];
         map.values().toArray(elements);
-        Arrays.sort(elements, new Comparator() {
-                    public int compare(Object o1, Object o2) {
-                        return ((HistogramElement)o1).compare( 
-                                          (HistogramElement)o2);
+        Arrays.sort(elements, new Comparator<HistogramElement>() {
+                    public int compare(HistogramElement o1, HistogramElement o2) {
+                        return o1.compare(o2);
                     }
                 });
 
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/server/HistogramQuery.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/server/HistogramQuery.java	Fri May 25 00:49:14 2007 +0000
@@ -50,36 +50,30 @@
 /**
  * Prints histogram sortable by class name, count and size.
  *
- * @author A. Sundararajan [jhat @(#)HistogramQuery.java	1.8 07/05/05]
+ * @author A. Sundararajan [jhat @(#)HistogramQuery.java	1.9 07/05/09]
  */
 public class HistogramQuery extends QueryHandler {
     public void run() {
         JavaClass[] classes = snapshot.getClassesArray();
-        Comparator comparator;
+        Comparator<JavaClass> comparator;
         if (query.equals("count")) {
-            comparator = new Comparator() {
-                public int compare(Object o1, Object o2) {
-                    JavaClass first = (JavaClass) o1;
-                    JavaClass second = (JavaClass) o2;
+            comparator = new Comparator<JavaClass>() {
+                public int compare(JavaClass first, JavaClass second) {
                     long diff = (second.getInstancesCount(false) -
                              first.getInstancesCount(false));
                     return (diff == 0)? 0: ((diff < 0)? -1 : + 1);
                 }
             };
         } else if (query.equals("class")) {
-            comparator = new Comparator() {
-                public int compare(Object o1, Object o2) {
-                    JavaClass first = (JavaClass) o1;
-                    JavaClass second = (JavaClass) o2;
+            comparator = new Comparator<JavaClass>() {
+                public int compare(JavaClass first, JavaClass second) {
                     return first.getName().compareTo(second.getName()); 
                 }
             };
         } else {
             // default sort is by total size
-            comparator = new Comparator() {
-                public int compare(Object o1, Object o2) {
-                    JavaClass first = (JavaClass) o1;
-                    JavaClass second = (JavaClass) o2;
+            comparator = new Comparator<JavaClass>() {
+                public int compare(JavaClass first, JavaClass second) {
                     long diff = (second.getTotalInstanceSize() -
                              first.getTotalInstanceSize());
                     return (diff == 0)? 0: ((diff < 0)? -1 : + 1);
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/server/PlatformClasses.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/server/PlatformClasses.java	Fri May 25 00:49:14 2007 +0000
@@ -56,7 +56,7 @@
  * class or not.  It's a platform class if its name starts with one of
  * the prefixes to be found in /com/sun/tools/hat/resources/platform_names.txt.
  *
- * @version ? [jhat @(#)PlatformClasses.java	1.9 07/05/05]
+ * @version ? [jhat @(#)PlatformClasses.java	1.10 07/05/09]
  * @author      Bill Foote
  */
 
@@ -67,7 +67,7 @@
 
     public static synchronized String[] getNames() {
 	if (names == null) {
-	    LinkedList list = new LinkedList();
+	    LinkedList<String> list = new LinkedList<String>();
 	    InputStream str 
 		= PlatformClasses.class
 		    .getResourceAsStream("/com/sun/tools/hat/resources/platform_names.txt");
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/server/RefsByTypeQuery.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/server/RefsByTypeQuery.java	Fri May 25 00:49:14 2007 +0000
@@ -49,7 +49,7 @@
 /**
  * References by type summary
  *
- * @author A. Sundararajan [jhat @(#)RefsByTypeQuery.java	1.8 07/05/05]
+ * @author A. Sundararajan [jhat @(#)RefsByTypeQuery.java	1.9 07/05/09]
  */
 public class RefsByTypeQuery extends QueryHandler {
     public void run() {
@@ -57,8 +57,8 @@
         if (clazz == null) {
             error("class not found: " + query);
         } else {
-            Map referrersStat = new HashMap(); // <JavaClass, Long>
-            final Map refereesStat = new HashMap(); // <JavaClass, Long>
+            Map<JavaClass, Long> referrersStat = new HashMap<JavaClass, Long>(); 
+            final Map<JavaClass, Long> refereesStat = new HashMap<JavaClass, Long>(); 
             Enumeration instances = clazz.getInstances(false);
             while (instances.hasMoreElements()) {
                 JavaHeapObject instance = (JavaHeapObject) instances.nextElement();
@@ -73,7 +73,7 @@
                          System.out.println("null class for " + ref);
                          continue;
                     }
-                    Long count = (Long) referrersStat.get(cl);
+                    Long count = referrersStat.get(cl);
                     if (count == null) { 
                         count = new Long(1);
                     } else {
@@ -85,7 +85,7 @@
                     new AbstractJavaHeapObjectVisitor() {
                         public void visit(JavaHeapObject obj) {
                             JavaClass cl = obj.getClazz();
-                            Long count = (Long) refereesStat.get(cl);
+                            Long count = refereesStat.get(cl);
                             if (count == null) { 
                                 count = new Long(1);
                             } else {
@@ -119,17 +119,15 @@
         }  // clazz != null
     } // run
 
-    private void print(final Map map) {
+    private void print(final Map<JavaClass, Long> map) {
         out.println("<table border='1' align='center'>");
-        Set keys = map.keySet();
+        Set<JavaClass> keys = map.keySet();
         JavaClass[] classes = new JavaClass[keys.size()];
         keys.toArray(classes);
-        Arrays.sort(classes, new Comparator() {
-            public int compare(Object o1, Object o2) {
-                JavaClass first = (JavaClass) o1;
-                JavaClass second = (JavaClass) o2;
-                Long count1 = (Long)map.get(first);
-                Long count2 = (Long)map.get(second);
+        Arrays.sort(classes, new Comparator<JavaClass>() {
+            public int compare(JavaClass first, JavaClass second) {
+                Long count1 = map.get(first);
+                Long count2 = map.get(second);
                 return count2.compareTo(count1);
             }
         });
--- a/j2se/src/share/classes/com/sun/tools/hat/internal/util/VectorSorter.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/hat/internal/util/VectorSorter.java	Fri May 25 00:49:14 2007 +0000
@@ -56,7 +56,7 @@
  *  });
  * </pre>
  *
- * @version     1.5, 03/06/98 [jhat @(#)VectorSorter.java	1.8 07/05/05]
+ * @version     1.5, 03/06/98 [jhat @(#)VectorSorter.java	1.9 07/05/09]
  * @author      Bill Foote
  */
 
@@ -66,7 +66,7 @@
     /**
      * Sort the given vector, using c for comparison
     **/
-    static public void sort(Vector v, Comparer c)  {
+    static public void sort(Vector<Object> v, Comparer c)  {
 	quickSort(v, c, 0, v.size()-1);
     }
 
@@ -74,7 +74,7 @@
     /**
      * Sort a vector of strings, using String.compareTo()
     **/
-    static public void sortVectorOfStrings(Vector v) {
+    static public void sortVectorOfStrings(Vector<Object> v) {
 	sort(v, new Comparer() {
 	    public int compare(Object lhs, Object rhs) {
 		return ((String) lhs).compareTo((String) rhs);
@@ -83,7 +83,7 @@
     }
 
 
-    static private void swap(Vector v, int a, int b) {
+    static private void swap(Vector<Object> v, int a, int b) {
 	Object tmp = v.elementAt(a);
 	v.setElementAt(v.elementAt(b), a);
 	v.setElementAt(tmp, b);
@@ -97,7 +97,7 @@
     // in O(n log n).  It's well-behaved if the list is already sorted,
     // or nearly so.
     //
-    static private void quickSort(Vector v, Comparer c, int from, int to) {
+    static private void quickSort(Vector<Object> v, Comparer c, int from, int to) {
 	if (to <= from)
 	    return;
 	int mid = (from + to) / 2;
--- a/j2se/src/share/classes/com/sun/tools/javac/jvm/ClassWriter.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/javac/jvm/ClassWriter.java	Fri May 25 00:49:14 2007 +0000
@@ -55,7 +55,7 @@
  *  This code and its internal interfaces are subject to change or
  *  deletion without notice.</b>
  */
-@Version("@(#)ClassWriter.java	1.130 07/05/05")
+@Version("@(#)ClassWriter.java	1.130 07/05/06")
 public class ClassWriter extends ClassFile {
     protected static final Context.Key<ClassWriter> classWriterKey =
         new Context.Key<ClassWriter>();
@@ -907,7 +907,8 @@
              l.nonEmpty();
              l = l.tail) {
             ClassSymbol inner = l.head;
-            char flags = (char) adjustFlags(inner.flags_field);
+            char flags = (char) adjustFlags(inner.flags_field);            	    
+	    if ((flags & INTERFACE) != 0) flags |= ABSTRACT; // Interfaces are always ABSTRACT
             if (dumpInnerClassModifiers) {
                 log.errWriter.println("INNERCLASS  " + inner.name);
                 log.errWriter.println("---" + flagNames(flags));
--- a/j2se/src/share/classes/com/sun/tools/jdi/AbstractLauncher.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/AbstractLauncher.java	Fri May 25 00:49:14 2007 +0000
@@ -70,7 +70,7 @@
                                                         true);
         String quoted = null;
         String pending = null;
-        List tokenList = new ArrayList();
+        List<String> tokenList = new ArrayList<String>();
         while (tokenizer.hasMoreTokens()) {
             String token = tokenizer.nextToken();
             if (quoted != null) {
--- a/j2se/src/share/classes/com/sun/tools/jdi/ArrayReferenceImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ArrayReferenceImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -29,6 +29,7 @@
 
 import java.util.List;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Iterator;
 
 public class ArrayReferenceImpl extends ObjectReferenceImpl
@@ -73,7 +74,7 @@
         return (Value)list.get(0);
     }
 
-    public List getValues() {
+    public List<Value> getValues() {
         return getValues(0, -1);
     }
     
@@ -100,19 +101,23 @@
         }
     }
 
-    public List getValues(int index, int length) {
+    @SuppressWarnings("unchecked")
+    private static <T> T cast(Object x) {
+	return (T)x;
+    }
+
+    public List<Value> getValues(int index, int length) {
         if (length == -1) { // -1 means the rest of the array
            length = length() - index;
         }
         validateArrayAccess(index, length);
         if (length == 0) {
-            return new ArrayList();
+            return new ArrayList<Value>();
         }
 
-        List vals;
+        List<Value> vals;
         try {
-            vals = JDWP.ArrayReference.GetValues.
-                process(vm, this, index, length).values;
+	    vals = cast(JDWP.ArrayReference.GetValues.process(vm, this, index, length).values);
         } catch (JDWPException exc) {
             throw exc.toJDIException();
         }
@@ -123,7 +128,7 @@
     public void setValue(int index, Value value)
             throws InvalidTypeException,
                    ClassNotLoadedException {
-        List list = new ArrayList(1);
+        List<Value> list = new ArrayList<Value>(1);
         list.add(value);
         setValues(index, list, 0, 1);
     }
--- a/j2se/src/share/classes/com/sun/tools/jdi/ArrayTypeImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ArrayTypeImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -65,8 +65,8 @@
         // arrays don't have methods
     }
 
-    public List allMethods() {
-        return new ArrayList(0);   // arrays don't have methods
+    public List<Method> allMethods() {
+        return new ArrayList<Method>(0);   // arrays don't have methods
     }
 
     /*
@@ -147,8 +147,8 @@
         }
     }
 
-    List inheritedTypes() {
-        return new ArrayList(0);
+    List<ReferenceType> inheritedTypes() {
+        return new ArrayList<ReferenceType>(0);
     }
 
     void getModifiers() {
--- a/j2se/src/share/classes/com/sun/tools/jdi/ClassLoaderReferenceImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ClassLoaderReferenceImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -33,7 +33,7 @@
 
     // This is cached only while the VM is suspended
     private static class Cache extends ObjectReferenceImpl.Cache {
-        List visibleClasses = null;
+        List<ReferenceType> visibleClasses = null;
     }
 
     protected ObjectReferenceImpl.Cache newCache() {
@@ -49,11 +49,9 @@
         return "ClassLoaderReference " + uniqueID();
     }
 
-    public List definedClasses() {
-        ArrayList definedClasses = new ArrayList();
-        Iterator iter = vm.allClasses().iterator();
-        while (iter.hasNext()) {
-            ReferenceType type = (ReferenceType)iter.next();
+    public List<ReferenceType> definedClasses() {
+        ArrayList<ReferenceType> definedClasses = new ArrayList<ReferenceType>();
+	for (ReferenceType type :  vm.allClasses()) {
             if (type.isPrepared() &&
 		equals(type.classLoader())) {
                 definedClasses.add(type);
@@ -62,8 +60,8 @@
         return definedClasses;
     }
 
-    public List visibleClasses() {
-        List classes = null;
+    public List<ReferenceType> visibleClasses() {
+        List<ReferenceType> classes = null;
         try {
             Cache local = (Cache)getCache();
 
@@ -74,7 +72,7 @@
                 JDWP.ClassLoaderReference.VisibleClasses.ClassInfo[] 
                   jdwpClasses = JDWP.ClassLoaderReference.VisibleClasses.
                                             process(vm, this).classes;
-                classes = new ArrayList(jdwpClasses.length);
+                classes = new ArrayList<ReferenceType>(jdwpClasses.length);
                 for (int i = 0; i < jdwpClasses.length; ++i) {
                     classes.add(vm.referenceType(jdwpClasses[i].typeID, 
                                                  jdwpClasses[i].refTypeTag));
@@ -96,7 +94,7 @@
     }
 
     Type findType(String signature) throws ClassNotLoadedException {
-        List types = visibleClasses();
+        List<ReferenceType> types = visibleClasses();
         Iterator iter = types.iterator();
         while (iter.hasNext()) {
             ReferenceType type = (ReferenceType)iter.next();
--- a/j2se/src/share/classes/com/sun/tools/jdi/ClassTypeImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ClassTypeImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -34,9 +34,8 @@
 {
     private boolean cachedSuperclass = false;
     private ClassType superclass = null;
-    private Map lineMapper = null;
     private int lastLine = -1;
-    private List interfaces = null;
+    private List<InterfaceType> interfaces = null;
 
     protected ClassTypeImpl(VirtualMachine aVm,long aRef) {
         super(aVm, aRef);
@@ -66,15 +65,15 @@
         return superclass;
     }
 
-    public List interfaces()  {
+    public List<InterfaceType> interfaces()  {
         if (interfaces == null) {
             interfaces = getInterfaces();
         }
         return interfaces;
     }
 
-    void addInterfaces(List list) {
-        List immediate = interfaces();
+    void addInterfaces(List<InterfaceType> list) {
+        List<InterfaceType> immediate = interfaces();
         list.addAll(interfaces());
 
         Iterator iter = immediate.iterator();
@@ -89,15 +88,15 @@
         }
     }
 
-    public List allInterfaces()  {
-        List all = new ArrayList();
+    public List<InterfaceType> allInterfaces()  {
+        List<InterfaceType> all = new ArrayList<InterfaceType>();
         addInterfaces(all);
         return all;
     }
 
-    public List subclasses() {
-        List all = vm.allClasses();
-        List subs = new ArrayList();
+    public List<ClassType> subclasses() {
+        List<ReferenceType> all = vm.allClasses();
+        List<ClassType> subs = new ArrayList<ClassType>();
         Iterator iter = all.iterator();
         while (iter.hasNext()) {
             ReferenceType refType = (ReferenceType)iter.next();
@@ -105,7 +104,7 @@
                 ClassType clazz = (ClassType)refType;
                 ClassType superclass = clazz.superclass();
                 if ((superclass != null) && superclass.equals(this)) {
-                    subs.add(refType);
+                    subs.add((ClassType)refType);
                 }
             }
         }
@@ -222,7 +221,7 @@
 
         validateMethodInvocation(method);
 
-        List arguments = method.validateAndPrepareArgumentsForInvoke(origArguments);
+        List<? extends Value> arguments = method.validateAndPrepareArgumentsForInvoke(origArguments);
 
 	ValueImpl[] args = (ValueImpl[])arguments.toArray(new ValueImpl[0]);
 	JDWP.ClassType.InvokeMethod ret;
@@ -270,7 +269,7 @@
 
         validateConstructorInvocation(method);
 
-        List arguments = method.validateAndPrepareArgumentsForInvoke(
+        List<Value> arguments = method.validateAndPrepareArgumentsForInvoke(
                                                        origArguments);
 	ValueImpl[] args = (ValueImpl[])arguments.toArray(new ValueImpl[0]);
 	JDWP.ClassType.NewInstance ret = null;
@@ -318,8 +317,8 @@
        return method;
    }
    
-   public List allMethods() {
-        ArrayList list = new ArrayList(methods());
+   public List<Method> allMethods() {
+        ArrayList<Method> list = new ArrayList<Method>(methods());
 
         ClassType clazz = superclass();
         while (clazz != null) {
@@ -340,11 +339,14 @@
         return list;
     }
 
-    List inheritedTypes() {
-        List inherited = new ArrayList(interfaces());
+    List<ReferenceType> inheritedTypes() {
+        List<ReferenceType> inherited = new ArrayList<ReferenceType>();
         if (superclass() != null) {
-            inherited.add(0, superclass()); /* insert at front */
+            inherited.add(0, (ReferenceType)superclass()); /* insert at front */
         }
+	for (ReferenceType rt : interfaces()) {
+ 	    inherited.add(rt);
+	}
         return inherited;
     }
 
@@ -388,7 +390,7 @@
         } 
     }
 
-    void addVisibleMethods(Map methodMap) {
+    void addVisibleMethods(Map<String, Method> methodMap) {
         /*
          * Add methods from 
          * parent types first, so that the methods in this class will
@@ -416,7 +418,7 @@
         } else if ((superclazz != null) && superclazz.isAssignableTo(type)) {
             return true;
         } else {
-            List interfaces = interfaces();
+            List<InterfaceType> interfaces = interfaces();
             Iterator iter = interfaces.iterator();
             while (iter.hasNext()) {
                 InterfaceTypeImpl interfaze = (InterfaceTypeImpl)iter.next();
--- a/j2se/src/share/classes/com/sun/tools/jdi/ConcreteMethodImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ConcreteMethodImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -48,8 +48,8 @@
      */
     static private class SoftLocationXRefs {
         final String stratumID;   // The stratum of this information
-        final Map lineMapper;     // Maps line number to location(s)
-        final List lineLocations; // List of locations ordered by code index
+        final Map<Integer, List<Location>> lineMapper;     // Maps line number to location(s)
+        final List<Location> lineLocations; // List of locations ordered by code index
 
         /*
          * Note: these do not necessarily correspond to 
@@ -60,7 +60,7 @@
         final int lowestLine;
         final int highestLine;
     
-        SoftLocationXRefs(String stratumID, Map lineMapper, List lineLocations, 
+        SoftLocationXRefs(String stratumID, Map<Integer, List<Location>> lineMapper, List<Location> lineLocations, 
                      int lowestLine, int highestLine) {
             this.stratumID = stratumID;
             this.lineMapper = Collections.unmodifiableMap(lineMapper);
@@ -72,13 +72,13 @@
     }
 
     private Location location = null;
-    private SoftReference softBaseLocationXRefsRef;
-    private SoftReference softOtherLocationXRefsRef;
-    private SoftReference variablesRef = null;
+    private SoftReference<SoftLocationXRefs> softBaseLocationXRefsRef;
+    private SoftReference<SoftLocationXRefs> softOtherLocationXRefsRef;
+    private SoftReference<List<LocalVariable>> variablesRef = null;
     private boolean absentVariableInformation = false;
     private long firstIndex = -1;
     private long lastIndex = -1;
-    private SoftReference bytecodesRef = null;
+    private SoftReference<byte[]> bytecodesRef = null;
     private int argSlotCount = -1;
 
     ConcreteMethodImpl(VirtualMachine vm, ReferenceTypeImpl declaringType, 
@@ -98,7 +98,7 @@
         return location;
     }
     
-    List sourceNameFilter(List list, 
+    List<Location> sourceNameFilter(List<Location> list, 
                           SDE.Stratum stratum, 
                           String sourceName)
                             throws AbsentInformationException {
@@ -106,11 +106,9 @@
             return list;
         } else {
             /* needs sourceName filteration */
-            List locs = new ArrayList();
-            Iterator it = list.iterator();
-            while (it.hasNext()) {
-                LocationImpl loc = (LocationImpl)it.next();
-                if (loc.sourceName(stratum).equals(sourceName)) {
+            List<Location> locs = new ArrayList<Location>();
+	    for (Location loc : list) {
+                if (((LocationImpl)loc).sourceName(stratum).equals(sourceName)) {
                     locs.add(loc);
                 }
             }
@@ -118,10 +116,10 @@
         }
     }
 
-    List allLineLocations(SDE.Stratum stratum,
+    List<Location> allLineLocations(SDE.Stratum stratum,
                           String sourceName)
                             throws AbsentInformationException {
-        List lineLocations = getLocations(stratum).lineLocations;
+        List<Location> lineLocations = getLocations(stratum).lineLocations;
 
         if (lineLocations.size() == 0) {
             throw new AbsentInformationException();
@@ -131,7 +129,7 @@
           sourceNameFilter(lineLocations, stratum, sourceName));
     }
 
-    List locationsOfLine(SDE.Stratum stratum,
+    List<Location> locationsOfLine(SDE.Stratum stratum,
                          String sourceName,
                          int lineNumber) 
                             throws AbsentInformationException {
@@ -145,11 +143,10 @@
          * Find the locations which match the line number
          * passed in.
          */
-        List list = (List)info.lineMapper.get(
-                                  new Integer(lineNumber));
+        List<Location> list = info.lineMapper.get(new Integer(lineNumber));
 
         if (list == null) {
-            list = new ArrayList(0);
+            list = new ArrayList<Location>(0);
         }
         return Collections.unmodifiableList(
           sourceNameFilter(list, stratum, sourceName));
@@ -186,7 +183,7 @@
                     "Location with invalid code index");
         }
 
-        List lineLocations = getLocations(stratum).lineLocations;
+        List<Location> lineLocations = getLocations(stratum).lineLocations;
 
         /*
          * Check for absent line numbers.
@@ -216,14 +213,14 @@
     }
 
 
-    public List variables() throws AbsentInformationException {
+    public List<LocalVariable> variables() throws AbsentInformationException {
         return getVariables();
     }
 
-    public List variablesByName(String name) throws AbsentInformationException {
-        List variables = getVariables();
+    public List<LocalVariable> variablesByName(String name) throws AbsentInformationException {
+        List<LocalVariable> variables = getVariables();
         
-        List retList = new ArrayList(2);
+        List<LocalVariable> retList = new ArrayList<LocalVariable>(2);
         Iterator iter = variables.iterator();
         while(iter.hasNext()) {
             LocalVariable variable = (LocalVariable)iter.next();
@@ -234,10 +231,10 @@
         return retList;
     }
 
-    public List arguments() throws AbsentInformationException {
-        List variables = getVariables();
+    public List<LocalVariable> arguments() throws AbsentInformationException {
+        List<LocalVariable> variables = getVariables();
         
-        List retList = new ArrayList(variables.size());
+        List<LocalVariable> retList = new ArrayList<LocalVariable>(variables.size());
         Iterator iter = variables.iterator();
         while(iter.hasNext()) {
             LocalVariable variable = (LocalVariable)iter.next();
@@ -258,7 +255,7 @@
             } catch (JDWPException exc) {
                 throw exc.toJDIException();
             }
-            bytecodesRef = new SoftReference(bytecodes);
+            bytecodesRef = new SoftReference<byte[]>(bytecodes);
         }
         /*
          * Arrays are always modifiable, so it is a little unsafe
@@ -287,8 +284,8 @@
             return info;
         }
 
-        List lineLocations = new ArrayList();
-        Map lineMapper = new HashMap();
+        List<Location> lineLocations = new ArrayList<Location>();
+        Map<Integer, List<Location>> lineMapper = new HashMap<Integer, List<Location>>();
         int lowestLine = -1;
         int highestLine = -1;
         SDE.LineStratum lastLineStratum = null;
@@ -333,9 +330,9 @@
     
                 // Add to the line -> locations map
                 Integer key = new Integer(lineNumber);
-                List mappedLocs = (List)lineMapper.get(key);
+                List<Location> mappedLocs = lineMapper.get(key);
                 if (mappedLocs == null) {
-                    mappedLocs = new ArrayList(1);
+                    mappedLocs = new ArrayList<Location>(1);
                     lineMapper.put(key, mappedLocs);
                 }
                 mappedLocs.add(loc);
@@ -345,7 +342,7 @@
         info = new SoftLocationXRefs(stratumID,
                                 lineMapper, lineLocations, 
                                 lowestLine, highestLine);
-        softOtherLocationXRefsRef = new SoftReference(info);
+        softOtherLocationXRefsRef = new SoftReference<SoftLocationXRefs>(info);
         return info;
     }
 
@@ -369,8 +366,8 @@
 
         int count  = lntab.lines.length;
 
-        List lineLocations = new ArrayList(count);
-        Map lineMapper = new HashMap();
+        List<Location> lineLocations = new ArrayList<Location>(count);
+        Map<Integer, List<Location>>lineMapper = new HashMap<Integer, List<Location>>();
         int lowestLine = -1;
         int highestLine = -1;
         for (int i = 0; i < count; i++) {
@@ -403,9 +400,9 @@
     
                 // Add to the line -> locations map
                 Integer key = new Integer(lineNumber);
-                List mappedLocs = (List)lineMapper.get(key);
+                List<Location> mappedLocs = lineMapper.get(key);
                 if (mappedLocs == null) {
-                    mappedLocs = new ArrayList(1);
+                    mappedLocs = new ArrayList<Location>(1);
                     lineMapper.put(key, mappedLocs);
                 }
                 mappedLocs.add(loc);
@@ -426,7 +423,7 @@
              * method start with no line info
              */
             if (count > 0) {
-                location = (Location)lineLocations.get(0);
+                location = lineLocations.get(0);
             } else {
                 location = new LocationImpl(virtualMachine(), this, 
                                             firstIndex); 
@@ -436,11 +433,11 @@
         info = new SoftLocationXRefs(SDE.BASE_STRATUM_NAME,
                                 lineMapper, lineLocations, 
                                 lowestLine, highestLine);
-        softBaseLocationXRefsRef = new SoftReference(info);
+        softBaseLocationXRefsRef = new SoftReference<SoftLocationXRefs>(info);
         return info;
     }
 
-    private List getVariables1_4() throws AbsentInformationException {
+    private List<LocalVariable> getVariables1_4() throws AbsentInformationException {
         JDWP.Method.VariableTable vartab = null;
         try {
             vartab = JDWP.Method.VariableTable.
@@ -457,7 +454,7 @@
         // Get the number of slots used by argument variables
         argSlotCount = vartab.argCnt;
         int count = vartab.slots.length;
-        List variables = new ArrayList(count);
+        List<LocalVariable> variables = new ArrayList<LocalVariable>(count);
         for (int i=0; i<count; i++) {
             JDWP.Method.VariableTable.SlotInfo si = vartab.slots[i];
 
@@ -482,7 +479,7 @@
         return variables;
     }
 
-    private List getVariables1() throws AbsentInformationException {
+    private List<LocalVariable> getVariables1() throws AbsentInformationException {
 
         if (!vm.canGet1_5LanguageFeatures()) {
             return getVariables1_4();
@@ -504,7 +501,7 @@
         // Get the number of slots used by argument variables
         argSlotCount = vartab.argCnt;
         int count = vartab.slots.length;
-        List variables = new ArrayList(count);
+        List<LocalVariable> variables = new ArrayList<LocalVariable>(count);
         for (int i=0; i<count; i++) {
             JDWP.Method.VariableTableWithGeneric.SlotInfo si = vartab.slots[i];
 
@@ -530,19 +527,19 @@
         return variables;
     }
 
-    private List getVariables() throws AbsentInformationException {
+    private List<LocalVariable> getVariables() throws AbsentInformationException {
         if (absentVariableInformation) {
             throw new AbsentInformationException();
         }
 
-        List variables = (variablesRef == null) ? null :
-                                     (List)variablesRef.get();
+        List<LocalVariable> variables = (variablesRef == null) ? null :
+                                     	variablesRef.get();
         if (variables != null) {
             return variables;
         }
         variables = getVariables1();
         variables = Collections.unmodifiableList(variables);
-        variablesRef = new SoftReference(variables);
+        variablesRef = new SoftReference<List<LocalVariable>>(variables);
         return variables;
     }
 }
--- a/j2se/src/share/classes/com/sun/tools/jdi/ConnectorImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ConnectorImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -39,20 +39,20 @@
 import java.io.Serializable;
 
 abstract class ConnectorImpl implements Connector {
-    Map defaultArguments = new LinkedHashMap();
+    Map<String,Argument> defaultArguments = new java.util.LinkedHashMap<String,Argument>();
 
     // Used by BooleanArgument
     static String trueString = null;
     static String falseString;
 
-    public Map defaultArguments() {
-        Map defaults = new LinkedHashMap();
+    public Map<String,Argument> defaultArguments() {
+        Map<String,Argument> defaults = new java.util.LinkedHashMap<String,Argument>();
         Collection values = defaultArguments.values();
 
         Iterator iter = values.iterator();
         while (iter.hasNext()) {
             ArgumentImpl argument = (ArgumentImpl)iter.next();
-            defaults.put(argument.name(), argument.clone());
+            defaults.put(argument.name(), (Argument)argument.clone());
         }
         return defaults;
     }
@@ -88,7 +88,7 @@
 
     void addSelectedArgument(String name, String label, String description, 
                              String defaultValue, boolean mustSpecify,
-                             List list) {
+                             List<String> list) {
         defaultArguments.put(name, 
                              new SelectedArgumentImpl(name, label, 
                                                       description, 
@@ -397,21 +397,20 @@
     class SelectedArgumentImpl extends ConnectorImpl.ArgumentImpl 
                               implements Connector.SelectedArgument {
 
-        private final List choices;
+        private final List<String> choices;
 
         SelectedArgumentImpl(String name, String label, String description, 
                              String value,
-                             boolean mustSpecify, List choices) {
+                             boolean mustSpecify, List<String> choices) {
             super(name, label, description, value, mustSpecify);
-            this.choices = Collections.unmodifiableList(
-                                           new ArrayList(choices));
+            this.choices = Collections.unmodifiableList(new ArrayList<String>(choices));
         }
 
         /**
          * Return the possible values for the argument
          * @return {@link List} of {@link String}
          */
-        public List choices() {
+        public List<String> choices() {
             return choices;
         }
 
--- a/j2se/src/share/classes/com/sun/tools/jdi/EventQueueImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/EventQueueImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -37,7 +37,7 @@
      * Note this is not a synchronized list. Iteration/update should be
      * protected through the 'this' monitor.
      */
-    LinkedList eventSets = new LinkedList();
+    LinkedList<EventSet> eventSets = new LinkedList<EventSet>();
 
     TargetVM target;
     boolean closed = false;
--- a/j2se/src/share/classes/com/sun/tools/jdi/EventRequestManagerImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/EventRequestManagerImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -27,6 +27,7 @@
 
 import com.sun.jdi.*;
 import com.sun.jdi.request.*;
+import com.sun.tools.jdi.JDWP;
 
 import java.util.*;
 
@@ -35,6 +36,10 @@
  * etc.
  * It include implementations of all the request interfaces..
  */
+// Warnings from List filters and List[] requestLists is  hard to fix. 
+// Remove SuppressWarning when we fix the warnings from List filters
+// and List[] requestLists. The generic array is not supported. 
+@SuppressWarnings("unchecked")
 class EventRequestManagerImpl extends MirrorImpl
                                        implements EventRequestManager
 {
@@ -91,7 +96,7 @@
         boolean isEnabled = false;
         boolean deleted = false;
         byte suspendPolicy = JDWP.SuspendPolicy.ALL;
-        private Map clientProperties = null;
+        private Map<Object, Object> clientProperties = null;
     
         EventRequestImpl() {
             super(EventRequestManagerImpl.this.vm);
@@ -216,9 +221,9 @@
          * @see #putProperty
          * @see #getProperty
          */
-        private Map getProperties() {
+        private Map<Object, Object> getProperties() {
             if (clientProperties == null) {
-                clientProperties = new HashMap(2);
+                clientProperties = new HashMap<Object, Object>(2);
             }
             return clientProperties;
         }
@@ -863,68 +868,68 @@
 	}
     }
   
-    public List stepRequests() {
+    public List<StepRequest> stepRequests() {
         return unmodifiableRequestList(JDWP.EventKind.SINGLE_STEP);
     }
 
-    public List classPrepareRequests() {
+    public List<ClassPrepareRequest> classPrepareRequests() {
         return unmodifiableRequestList(JDWP.EventKind.CLASS_PREPARE);
     }
 
-    public List classUnloadRequests() {
+    public List<ClassUnloadRequest> classUnloadRequests() {
         return unmodifiableRequestList(JDWP.EventKind.CLASS_UNLOAD);
     }
 
-    public List threadStartRequests() {
+    public List<ThreadStartRequest> threadStartRequests() {
         return unmodifiableRequestList(JDWP.EventKind.THREAD_START);
     }
 
-    public List threadDeathRequests() {
+    public List<ThreadDeathRequest> threadDeathRequests() {
         return unmodifiableRequestList(JDWP.EventKind.THREAD_DEATH);
     }
 
-    public List exceptionRequests() {
+    public List<ExceptionRequest> exceptionRequests() {
         return unmodifiableRequestList(JDWP.EventKind.EXCEPTION);
     }
 
-    public List breakpointRequests() {
+    public List<BreakpointRequest> breakpointRequests() {
         return unmodifiableRequestList(JDWP.EventKind.BREAKPOINT);
     }
 
-    public List accessWatchpointRequests() {
+    public List<AccessWatchpointRequest> accessWatchpointRequests() {
         return unmodifiableRequestList(JDWP.EventKind.FIELD_ACCESS);
     }
 
-    public List modificationWatchpointRequests() {
+    public List<ModificationWatchpointRequest> modificationWatchpointRequests() {
         return unmodifiableRequestList(JDWP.EventKind.FIELD_MODIFICATION);
     }
 
-    public List methodEntryRequests() {
+    public List<MethodEntryRequest> methodEntryRequests() {
         return unmodifiableRequestList(JDWP.EventKind.METHOD_ENTRY);
     }
 
-    public List methodExitRequests() {
+    public List<MethodExitRequest> methodExitRequests() {
         return unmodifiableRequestList(
                                EventRequestManagerImpl.methodExitEventCmd);
     }
 
-    public List monitorContendedEnterRequests() {
+    public List<MonitorContendedEnterRequest> monitorContendedEnterRequests() {
         return unmodifiableRequestList(JDWP.EventKind.MONITOR_CONTENDED_ENTER);
     }
 
-    public List monitorContendedEnteredRequests() {
+    public List<MonitorContendedEnteredRequest> monitorContendedEnteredRequests() {
         return unmodifiableRequestList(JDWP.EventKind.MONITOR_CONTENDED_ENTERED);
     }
 
-    public List monitorWaitRequests() {
+    public List<MonitorWaitRequest> monitorWaitRequests() {
         return unmodifiableRequestList(JDWP.EventKind.MONITOR_WAIT);
     }
 
-    public List monitorWaitedRequests() {
+    public List<MonitorWaitedRequest> monitorWaitedRequests() {
         return unmodifiableRequestList(JDWP.EventKind.MONITOR_WAITED);
     }
 
-    public List vmDeathRequests() {
+    public List<VMDeathRequest> vmDeathRequests() {
         return unmodifiableRequestList(JDWP.EventKind.VM_DEATH);
     }
 
@@ -943,7 +948,7 @@
         return null;
     }
         
-    List requestList(int eventCmd) {
+    List<? extends EventRequest>  requestList(int eventCmd) {
         return requestLists[eventCmd];
     }
 
--- a/j2se/src/share/classes/com/sun/tools/jdi/FieldImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/FieldImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -27,7 +27,6 @@
 
 import com.sun.jdi.*;
 
-import java.util.List;
 
 public class FieldImpl extends TypeComponentImpl 
                        implements Field, ValueContainer {
--- a/j2se/src/share/classes/com/sun/tools/jdi/GenericListeningConnector.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/GenericListeningConnector.java	Fri May 25 00:49:14 2007 +0000
@@ -45,7 +45,7 @@
     static final String ARG_ADDRESS = "address";
     static final String ARG_TIMEOUT = "timeout";
 
-    Map listenMap;
+    Map<Map<String,? extends Connector.Argument>, TransportService.ListenKey>  listenMap;
     TransportService transportService;
     Transport transport;
 
@@ -81,7 +81,7 @@
                 false,
                 0, Integer.MAX_VALUE);
 
-	listenMap = new HashMap(10);
+	listenMap = new HashMap<Map<String,? extends Connector.Argument>,TransportService.ListenKey>(10);
     }
 
     /**
@@ -101,14 +101,13 @@
 	return new GenericListeningConnector(ts, true);
     }
 
-    public String startListening(String address, Map args) 
+    public String startListening(String address, Map<String,? extends Connector.Argument> args) 
 	throws IOException, IllegalConnectorArgumentsException
     {
-	TransportService.ListenKey listener =
-            (TransportService.ListenKey)listenMap.get(args);
+	TransportService.ListenKey listener = listenMap.get(args);
 	if (listener != null) {
 	   throw new IllegalConnectorArgumentsException("Already listening",
-               new ArrayList(args.keySet())); 
+               new ArrayList<String>(args.keySet())); 
 	}
 
 	listener = transportService.startListening(address);
@@ -127,11 +126,10 @@
     public void stopListening(Map<String,? extends Connector.Argument> args)
 	throws IOException, IllegalConnectorArgumentsException
     {
-	TransportService.ListenKey listener = 
-	    (TransportService.ListenKey)listenMap.get(args);
+	TransportService.ListenKey listener = listenMap.get(args);
 	if (listener == null) {
            throw new IllegalConnectorArgumentsException("Not listening", 
-               new ArrayList(args.keySet()));
+               new ArrayList<String>(args.keySet()));
         }
 	transportService.stopListening(listener);
 	listenMap.remove(args);
@@ -147,8 +145,7 @@
 	    timeout = Integer.decode(ts).intValue();
         }
 
-	TransportService.ListenKey listener =
-            (TransportService.ListenKey)listenMap.get(args);
+	TransportService.ListenKey listener = listenMap.get(args);
 	Connection connection;
 	if (listener != null) {
 	    connection = transportService.accept(listener, timeout, 0);
@@ -159,7 +156,7 @@
 	     * once-off accept
 	     */
 	     startListening(args);
-	     listener = (TransportService.ListenKey)listenMap.get(args);
+	     listener = listenMap.get(args);
 	     assert listener != null;
 	     connection = transportService.accept(listener, timeout, 0);
 	     stopListening(args);
--- a/j2se/src/share/classes/com/sun/tools/jdi/InterfaceTypeImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/InterfaceTypeImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -37,29 +37,26 @@
 public class InterfaceTypeImpl extends ReferenceTypeImpl
                                implements InterfaceType {
 
-    private SoftReference superinterfacesRef = null;
+    private SoftReference<List<InterfaceType>> superinterfacesRef = null;
 
     protected InterfaceTypeImpl(VirtualMachine aVm,long aRef) {
         super(aVm, aRef);
     }
 
-    public List superinterfaces() {
-        List superinterfaces = (superinterfacesRef == null) ? null :
-                                     (List)superinterfacesRef.get();
+    public List<InterfaceType> superinterfaces() {
+        List<InterfaceType> superinterfaces = (superinterfacesRef == null) ? null :
+                                     superinterfacesRef.get();
         if (superinterfaces == null) {
             superinterfaces = getInterfaces();
             superinterfaces = Collections.unmodifiableList(superinterfaces);
-            superinterfacesRef = new SoftReference(superinterfaces);
+            superinterfacesRef = new SoftReference<List<InterfaceType>>(superinterfaces);
         }
         return superinterfaces;
     }
 
-    public List subinterfaces() {
-        List all = vm.allClasses();
-        List subs = new ArrayList();
-        Iterator iter = all.iterator();
-        while (iter.hasNext()) {
-            ReferenceType refType = (ReferenceType)iter.next();
+    public List<InterfaceType> subinterfaces() {
+        List<InterfaceType> subs = new ArrayList<InterfaceType>();
+        for (ReferenceType refType : vm.allClasses()) {
             if (refType instanceof InterfaceType) {
                 InterfaceType interfaze = (InterfaceType)refType;
                 if (interfaze.isPrepared() && interfaze.superinterfaces().contains(this)) {
@@ -67,16 +64,12 @@
                 }
             }
         }
-
         return subs;
     }
 
-    public List implementors() {
-        List all = vm.allClasses();
-        List implementors = new ArrayList();
-        Iterator iter = all.iterator();
-        while (iter.hasNext()) {
-            ReferenceType refType = (ReferenceType)iter.next();
+    public List<ClassType> implementors() {
+        List<ClassType> implementors = new ArrayList<ClassType>();
+        for (ReferenceType refType : vm.allClasses()) {
             if (refType instanceof ClassType) {
                 ClassType clazz = (ClassType)refType;
                 if (clazz.isPrepared() && clazz.interfaces().contains(this)) {
@@ -84,50 +77,44 @@
                 }
             }
         }
-
         return implementors;
     }
 
-    void addVisibleMethods(Map methodMap) {
+    void addVisibleMethods(Map<String, Method> methodMap) {
         /*
          * Add methods from 
          * parent types first, so that the methods in this class will
          * overwrite them in the hash table
          */
 
-        Iterator iter = superinterfaces().iterator();
-        while (iter.hasNext()) {
-            InterfaceTypeImpl interfaze = (InterfaceTypeImpl)iter.next();
-            interfaze.addVisibleMethods(methodMap);
+	for (InterfaceType interfaze : superinterfaces()) {
+            ((InterfaceTypeImpl)interfaze).addVisibleMethods(methodMap);
         }
 
         addToMethodMap(methodMap, methods());
     }
 
-    public List allMethods() {
-        ArrayList list = new ArrayList(methods());
+    public List<Method> allMethods() {
+        ArrayList<Method> list = new ArrayList<Method>(methods());
         
         /*
          * It's more efficient if don't do this 
          * recursively.
          */
-        List interfaces = allSuperinterfaces();
-        Iterator iter = interfaces.iterator();
-        while (iter.hasNext()) {
-            InterfaceType interfaze = (InterfaceType)iter.next();
+        for (InterfaceType interfaze : allSuperinterfaces()) {
             list.addAll(interfaze.methods());
         }
 
         return list;
     }
 
-    List allSuperinterfaces() {
-        ArrayList list = new ArrayList();
+    List<InterfaceType> allSuperinterfaces() {
+        ArrayList<InterfaceType> list = new ArrayList<InterfaceType>();
         addSuperinterfaces(list);
         return list;
     }
 
-    void addSuperinterfaces(List list) {
+    void addSuperinterfaces(List<InterfaceType> list) {
         /*
          * This code is a little strange because it 
          * builds the list with a more suitable order than the
@@ -140,7 +127,7 @@
          * Get a list of direct superinterfaces that's not already in the 
          * list being built.
          */
-        List immediate = new ArrayList(superinterfaces());
+        List<InterfaceType> immediate = new ArrayList<InterfaceType>(superinterfaces());
         Iterator iter = immediate.iterator();
         while (iter.hasNext()) {
             InterfaceType interfaze = (InterfaceType)iter.next();
@@ -170,13 +157,9 @@
         if (this.equals(type)) {
             return true;
         } else {
-
             // Try superinterfaces.
-            List supers = superinterfaces();
-            Iterator iter = supers.iterator();
-            while (iter.hasNext()) {
-                InterfaceTypeImpl interfaze = (InterfaceTypeImpl)iter.next();
-                if (interfaze.isAssignableTo(type)) {
+            for (InterfaceType interfaze : superinterfaces()) {
+                if (((InterfaceTypeImpl)interfaze).isAssignableTo(type)) {
                     return true;
                 }
             }
@@ -185,7 +168,7 @@
         }
     }
 
-    List inheritedTypes() {
+    List<InterfaceType> inheritedTypes() {
         return superinterfaces();
     }
 
--- a/j2se/src/share/classes/com/sun/tools/jdi/JNITypeParser.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/JNITypeParser.java	Fri May 25 00:49:14 2007 +0000
@@ -35,8 +35,8 @@
     static final char SIGNATURE_ENDFUNC = ')';
 
     private String signature;
-    private List typeNameList;
-    private List signatureList;
+    private List<String> typeNameList;
+    private List<String> signatureList;
     private int currentIndex;
 
     JNITypeParser(String signature) {
@@ -85,7 +85,7 @@
         return (String)typeNameList().get(typeNameList().size()-1);
     }
 
-    List argumentTypeNames() {
+    List<String> argumentTypeNames() {
         return typeNameList().subList(0, typeNameList().size() - 1);
     }
         
@@ -93,7 +93,7 @@
         return (String)signatureList().get(signatureList().size()-1);
     }
 
-    List argumentSignatures() {
+    List<String> argumentSignatures() {
         return signatureList().subList(0, signatureList().size() - 1);
     }
 
@@ -110,9 +110,9 @@
         return signature().substring(level);
     }
         
-    private synchronized List signatureList() {
+    private synchronized List<String> signatureList() {
         if (signatureList == null) {
-            signatureList = new ArrayList(10);
+            signatureList = new ArrayList<String>(10);
             String elem;
             
             currentIndex = 0;
@@ -129,9 +129,9 @@
         return signatureList;
     }
 
-    private synchronized List typeNameList() {
+    private synchronized List<String> typeNameList() {
         if (typeNameList == null) {
-            typeNameList = new ArrayList(10);
+            typeNameList = new ArrayList<String>(10);
             String elem;
             
             currentIndex = 0;
--- a/j2se/src/share/classes/com/sun/tools/jdi/MethodImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/MethodImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -37,11 +37,11 @@
     private JNITypeParser signatureParser;
     abstract int argSlotCount() throws AbsentInformationException;
 
-    abstract List allLineLocations(SDE.Stratum stratum,
+    abstract List<Location> allLineLocations(SDE.Stratum stratum,
                                    String sourceName)
                            throws AbsentInformationException;
 
-    abstract List locationsOfLine(SDE.Stratum stratum,
+    abstract List<Location> locationsOfLine(SDE.Stratum stratum,
                                   String sourceName,
                                   int lineNumber)
                            throws AbsentInformationException;
@@ -91,25 +91,25 @@
         return (int)ref();
     }
 
-    public final List allLineLocations()
+    public final List<Location> allLineLocations()
                            throws AbsentInformationException {
         return allLineLocations(vm.getDefaultStratum(), null);
     }
 
-    public List allLineLocations(String stratumID,
+    public List<Location> allLineLocations(String stratumID,
                                  String sourceName)
                            throws AbsentInformationException {
         return allLineLocations(declaringType.stratum(stratumID),
                                 sourceName);
     }
 
-    public final List locationsOfLine(int lineNumber)
+    public final List<Location> locationsOfLine(int lineNumber)
                            throws AbsentInformationException {
         return locationsOfLine(vm.getDefaultStratum(), 
                                null, lineNumber);
     }
 
-    public List locationsOfLine(String stratumID,
+    public List<Location> locationsOfLine(String stratumID,
                                 String sourceName,
                                 int lineNumber)
                            throws AbsentInformationException {
@@ -148,11 +148,11 @@
         return enclosing.findType(signature);
     }
 
-    public List argumentTypeNames() {
+    public List<String> argumentTypeNames() {
         return signatureParser.argumentTypeNames();
     }
 
-    public List argumentSignatures() {
+    public List<String> argumentSignatures() {
         return signatureParser.argumentSignatures();
     }
 
@@ -162,9 +162,9 @@
         return enclosing.findType(signature);
     }
 
-    public List argumentTypes() throws ClassNotLoadedException {
+    public List<Type> argumentTypes() throws ClassNotLoadedException {
         int size = argumentSignatures().size();
-        ArrayList types = new ArrayList(size);
+        ArrayList<Type> types = new ArrayList<Type>(size);
         for (int i = 0; i < size; i++) {
             Type type = argumentType(i);
             types.add(type);
@@ -289,9 +289,9 @@
      *     - delete arguments(n+1), ...
      * NOTE that this might modify the input list.
      */
-    void handleVarArgs(List arguments) 
+    void handleVarArgs(List<Value> arguments) 
         throws ClassNotLoadedException, InvalidTypeException {
-        List paramTypes = this.argumentTypes();
+        List<Type> paramTypes = this.argumentTypes();
         ArrayType lastParamType = (ArrayType)paramTypes.get(paramTypes.size() - 1);
         Type componentType = lastParamType.componentType();
         int argCount = arguments.size();
@@ -351,10 +351,10 @@
     /*
      * The output list will be different than the input list.
      */
-    List validateAndPrepareArgumentsForInvoke(List origArguments) 
+    List<Value> validateAndPrepareArgumentsForInvoke(List<? extends Value> origArguments) 
                          throws ClassNotLoadedException, InvalidTypeException {
         
-        List arguments = new ArrayList(origArguments);
+        List<Value> arguments = new ArrayList<Value>(origArguments);
         if (isVarArgs()) {
             handleVarArgs(arguments);
         }
--- a/j2se/src/share/classes/com/sun/tools/jdi/NonConcreteMethodImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/NonConcreteMethodImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -64,41 +64,41 @@
         return location;
     }
 
-    public List allLineLocations(String stratumID,
+    public List<Location> allLineLocations(String stratumID,
                                  String sourceName) {
-        return new ArrayList(0);
+        return new ArrayList<Location>(0);
     }
 
-    public List allLineLocations(SDE.Stratum stratum,
+    public List<Location> allLineLocations(SDE.Stratum stratum,
                                  String sourceName) {
-        return new ArrayList(0);
+        return new ArrayList<Location>(0);
     }
 
-    public List locationsOfLine(String stratumID,
+    public List<Location> locationsOfLine(String stratumID,
                                 String sourceName,
                                 int lineNumber) {
-        return new ArrayList(0);
+        return new ArrayList<Location>(0);
     }
 
-    public List locationsOfLine(SDE.Stratum stratum,
+    public List<Location> locationsOfLine(SDE.Stratum stratum,
                                 String sourceName,
                                 int lineNumber) {
-        return new ArrayList(0);
+        return new ArrayList<Location>(0);
     }
 
     public Location locationOfCodeIndex(long codeIndex) {
         return null;
     }
 
-    public List variables() throws AbsentInformationException {
+    public List<LocalVariable> variables() throws AbsentInformationException {
         throw new AbsentInformationException();
     }
 
-    public List variablesByName(String name) throws AbsentInformationException {
+    public List<LocalVariable> variablesByName(String name) throws AbsentInformationException {
         throw new AbsentInformationException();
     }
 
-    public List arguments() throws AbsentInformationException {
+    public List<LocalVariable> arguments() throws AbsentInformationException {
         throw new AbsentInformationException();
     }
     
--- a/j2se/src/share/classes/com/sun/tools/jdi/ObjectReferenceImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ObjectReferenceImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -161,7 +161,7 @@
     }
 
     public Value getValue(Field sig) {
-        List list = new ArrayList(1);
+        List<Field> list = new ArrayList<Field>(1);
         list.add(sig);
         Map map = getValues(list);
         return(Value)map.get(sig);
@@ -170,9 +170,9 @@
     public Map<Field,Value> getValues(List<? extends Field> theFields) {
         validateMirrors(theFields);
 
-        List staticFields = new ArrayList(0);
+        List<Field> staticFields = new ArrayList<Field>(0);
         int size = theFields.size();
-        List instanceFields = new ArrayList(size);
+        List<Field> instanceFields = new ArrayList<Field>(size);
 
         for (int i=0; i<size; i++) {
             Field field = (Field)theFields.get(i);
@@ -190,11 +190,11 @@
             }
         }
 
-        Map map;
+        Map<Field, Value> map;
         if (staticFields.size() > 0) {
             map = referenceType().getValues(staticFields);
         } else {
-            map = new HashMap(size);
+            map = new HashMap<Field, Value>(size);
         }
 
         size = instanceFields.size();
@@ -380,7 +380,7 @@
 
         validateMethodInvocation(method, options);
 
-        List arguments = method.validateAndPrepareArgumentsForInvoke(
+        List<Value> arguments = method.validateAndPrepareArgumentsForInvoke(
                                                   origArguments);
 
 	ValueImpl[] args = (ValueImpl[])arguments.toArray(new ValueImpl[0]);
@@ -503,8 +503,8 @@
         return info;
     }
 
-    public List waitingThreads() throws IncompatibleThreadStateException {
-        return Arrays.asList(jdwpMonitorInfo().waiters);
+    public List<ThreadReference> waitingThreads() throws IncompatibleThreadStateException {
+        return Arrays.asList((ThreadReference[])jdwpMonitorInfo().waiters);
     }
 
     public ThreadReference owningThread() throws IncompatibleThreadStateException {
@@ -516,7 +516,7 @@
     }
 
 
-    public List referringObjects(long maxReferrers) {
+    public List<ObjectReference> referringObjects(long maxReferrers) {
         if (!vm.canGetInstanceInfo()) {
             throw new UnsupportedOperationException(
                 "target does not support getting referring objects");
@@ -532,7 +532,7 @@
         // JDWP can't currently handle more than this (in mustang)
 
         try {
-            return Arrays.asList(JDWP.ObjectReference.ReferringObjects.
+            return Arrays.asList((ObjectReference[])JDWP.ObjectReference.ReferringObjects.
                                 process(vm, this, intMax).referringObjects);
         } catch (JDWPException exc) {
             throw exc.toJDIException();
--- a/j2se/src/share/classes/com/sun/tools/jdi/ObsoleteMethodImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ObsoleteMethodImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -54,20 +54,20 @@
         throw new ClassNotLoadedException("type unknown");
     }
 
-    public List argumentTypeNames() {
-        return new ArrayList();
+    public List<String> argumentTypeNames() {
+        return new ArrayList<String>();
     }
 
-    public List argumentSignatures() {
-        return new ArrayList();
+    public List<String> argumentSignatures() {
+        return new ArrayList<String>();
     }
 
     Type argumentType(int index) throws ClassNotLoadedException {
         throw new ClassNotLoadedException("type unknown");
     }
 
-    public List argumentTypes() throws ClassNotLoadedException {
-        return new ArrayList();
+    public List<Type> argumentTypes() throws ClassNotLoadedException {
+        return new ArrayList<Type>();
     }
 
 }
--- a/j2se/src/share/classes/com/sun/tools/jdi/PacketStream.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/PacketStream.java	Fri May 25 00:49:14 2007 +0000
@@ -577,10 +577,10 @@
         return array;
     }
 
-    List readArrayRegion() {
+    List<Value> readArrayRegion() {
         byte typeKey = readByte();
         int length = readInt();
-        List list = new ArrayList(length);
+        List<Value> list = new ArrayList<Value>(length);
         boolean gettingObjects = isObjectTag(typeKey);
         for (int i = 0; i < length; i++) {
             /*
@@ -599,10 +599,10 @@
         return list;
     }
 
-    void writeArrayRegion(List srcValues) {
+    void writeArrayRegion(List<Value> srcValues) {
         writeInt(srcValues.size());
         for (int i = 0; i < srcValues.size(); i++) {
-            ValueImpl value = (ValueImpl)srcValues.get(i);
+            Value value = srcValues.get(i);
             writeUntaggedValue(value);
         }
     }
--- a/j2se/src/share/classes/com/sun/tools/jdi/ReferenceTypeImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ReferenceTypeImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -40,9 +40,9 @@
     private String baseSourceDir = null;
     private String baseSourcePath = null;
     protected int modifiers = -1;
-    private SoftReference fieldsRef = null;
-    private SoftReference methodsRef = null;
-    private SoftReference sdeRef = null;
+    private SoftReference<List<Field>> fieldsRef = null;
+    private SoftReference<List<Method>> methodsRef = null;
+    private SoftReference<SDE> sdeRef = null;
 
     private boolean isClassLoaderCached = false;
     private ClassLoaderReference classLoader = null;
@@ -302,18 +302,17 @@
         return (status & JDWP.ClassStatus.ERROR) != 0;
     }
 
-    public List fields() {
-        List fields = (fieldsRef == null) ? null : (List)fieldsRef.get();
+    public List<Field> fields() {
+        List<Field> fields = (fieldsRef == null) ? null : fieldsRef.get();
         if (fields == null) {
             if (vm.canGet1_5LanguageFeatures()) {
                 JDWP.ReferenceType.FieldsWithGeneric.FieldInfo[] jdwpFields;
                 try {
-                    jdwpFields = JDWP.ReferenceType.FieldsWithGeneric.
-                        process(vm, this).declared;
+                    jdwpFields = JDWP.ReferenceType.FieldsWithGeneric.process(vm, this).declared;
                 } catch (JDWPException exc) {
                     throw exc.toJDIException();
                 }
-                fields = new ArrayList(jdwpFields.length);
+                fields = new ArrayList<Field>(jdwpFields.length);
                 for (int i=0; i<jdwpFields.length; i++) {
                     JDWP.ReferenceType.FieldsWithGeneric.FieldInfo fi
                         = jdwpFields[i];
@@ -332,7 +331,7 @@
                 } catch (JDWPException exc) {
                     throw exc.toJDIException();
                 }
-                fields = new ArrayList(jdwpFields.length);
+                fields = new ArrayList<Field>(jdwpFields.length);
                 for (int i=0; i<jdwpFields.length; i++) {
                     JDWP.ReferenceType.Fields.FieldInfo fi = jdwpFields[i];
 
@@ -345,15 +344,15 @@
             }
 
             fields = Collections.unmodifiableList(fields);
-            fieldsRef = new SoftReference(fields);
+            fieldsRef = new SoftReference<List<Field>>(fields);
         }
         return fields;
     }
 
-    abstract List inheritedTypes();
+    abstract List<? extends ReferenceType> inheritedTypes();
 
-    void addVisibleFields(List visibleList, Map visibleTable, List ambiguousNames) {
-        List list = visibleFields();
+    void addVisibleFields(List<Field> visibleList, Map<String, Field> visibleTable, List<String> ambiguousNames) {
+        List<Field> list = visibleFields();
         Iterator iter = list.iterator();
         while (iter.hasNext()) {
             Field field = (Field)iter.next();
@@ -374,21 +373,21 @@
         }
     }
 
-    public List visibleFields() {
+    public List<Field> visibleFields() {
         /*
          * Maintain two different collections of visible fields. The 
          * list maintains a reasonable order for return. The
          * hash map provides an efficient way to lookup visible fields
          * by name, important for finding hidden or ambiguous fields.
          */
-        List visibleList = new ArrayList();
-        Map  visibleTable = new HashMap();
+        List<Field> visibleList = new ArrayList<Field>();
+        Map<String, Field>  visibleTable = new HashMap<String, Field>();
 
         /* Track fields removed from above collection due to ambiguity */
-        List ambiguousNames = new ArrayList();
+        List<String> ambiguousNames = new ArrayList<String>();
 
         /* Add inherited, visible fields */
-        List types = inheritedTypes();
+        List<? extends ReferenceType> types = inheritedTypes();
         Iterator iter = types.iterator();
         while (iter.hasNext()) {
             /*
@@ -402,7 +401,7 @@
          * Insert fields from this type, removing any inherited fields they
          * hide. 
          */
-        List retList = new ArrayList(fields());
+        List<Field> retList = new ArrayList<Field>(fields());
         iter = retList.iterator();
         while (iter.hasNext()) {
             Field field = (Field)iter.next();
@@ -415,16 +414,16 @@
         return retList;
     }
 
-    void addAllFields(List fieldList, Set typeSet) {
+    void addAllFields(List<Field> fieldList, Set<ReferenceType> typeSet) {
         /* Continue the recursion only if this type is new */
         if (!typeSet.contains(this)) {
-            typeSet.add(this);
+            typeSet.add((ReferenceType)this);
 
             /* Add local fields */
             fieldList.addAll(fields());
 
             /* Add inherited fields */
-            List types = inheritedTypes();
+            List<? extends ReferenceType> types = inheritedTypes();
             Iterator iter = types.iterator();
             while (iter.hasNext()) {
                 ReferenceTypeImpl type = (ReferenceTypeImpl)iter.next();
@@ -432,9 +431,9 @@
             }
         }
     }
-    public List allFields() {
-        List fieldList = new ArrayList();
-        Set typeSet = new HashSet();
+    public List<Field> allFields() {
+        List<Field> fieldList = new ArrayList<Field>();
+        Set<ReferenceType> typeSet = new HashSet<ReferenceType>();
         addAllFields(fieldList, typeSet);
         return fieldList;
     }
@@ -456,8 +455,8 @@
         return null;
     }
 
-    public List methods() {
-        List methods = (methodsRef == null) ? null : (List)methodsRef.get();
+    public List<Method> methods() {
+        List<Method> methods = (methodsRef == null) ? null : methodsRef.get();
         if (methods == null) {
             if (!vm.canGet1_5LanguageFeatures()) {
                 methods = methods1_4();
@@ -469,7 +468,7 @@
                 } catch (JDWPException exc) {
                     throw exc.toJDIException();
                 }
-                methods = new ArrayList(declared.length);
+                methods = new ArrayList<Method>(declared.length);
                 for (int i=0; i<declared.length; i++) {
                     JDWP.ReferenceType.MethodsWithGeneric.MethodInfo 
                         mi = declared[i];
@@ -483,13 +482,13 @@
                 }
             }
             methods = Collections.unmodifiableList(methods);
-            methodsRef = new SoftReference(methods);
+            methodsRef = new SoftReference<List<Method>>(methods);
         }
         return methods;
     }
 
-    private List methods1_4() {
-        List methods;
+    private List<Method> methods1_4() {
+        List<Method> methods;
         JDWP.ReferenceType.Methods.MethodInfo[] declared;
         try {
             declared = JDWP.ReferenceType.Methods.
@@ -497,7 +496,7 @@
         } catch (JDWPException exc) {
             throw exc.toJDIException();
         }
-        methods = new ArrayList(declared.length);
+        methods = new ArrayList<Method>(declared.length);
         for (int i=0; i<declared.length; i++) {
             JDWP.ReferenceType.Methods.MethodInfo mi = declared[i];
 
@@ -515,7 +514,7 @@
      * Utility method used by subclasses to build lists of visible
      * methods.
      */
-    void addToMethodMap(Map methodMap, List methodList) {
+    void addToMethodMap(Map<String, Method> methodMap, List<Method> methodList) {
         Iterator iter = methodList.iterator();
         while (iter.hasNext()) {
             Method method = (Method)iter.next();
@@ -523,15 +522,15 @@
         }
     }
 
-    abstract void addVisibleMethods(Map methodMap);
+    abstract void addVisibleMethods(Map<String, Method> methodMap);
 
-    public List visibleMethods() {
+    public List<Method> visibleMethods() {
         /*
          * Build a collection of all visible methods. The hash
          * map allows us to do this efficiently by keying on the
          * concatenation of name and signature.
          */
-        Map map = new HashMap();
+        Map<String, Method> map = new HashMap<String, Method>();
         addVisibleMethods(map);
 
         /*
@@ -540,16 +539,16 @@
          * So, start over with allMethods() and use the hash map
          * to filter that ordered collection.
          */
-        List list = allMethods();
+        List<Method> list = allMethods();
         list.retainAll(map.values());
         return list;
     }
 
-    abstract public List allMethods();
+    abstract public List<Method> allMethods();
 
-    public List methodsByName(String name) {
-        List methods = visibleMethods();
-        ArrayList retList = new ArrayList(methods.size());
+    public List<Method> methodsByName(String name) {
+        List<Method> methods = visibleMethods();
+        ArrayList<Method> retList = new ArrayList<Method>(methods.size());
         Iterator iter = methods.iterator();
         while (iter.hasNext()) {
             Method candidate = (Method)iter.next();
@@ -561,9 +560,9 @@
         return retList;
     }
 
-    public List methodsByName(String name, String signature) {
-        List methods = visibleMethods();
-        ArrayList retList = new ArrayList(methods.size());
+    public List<Method> methodsByName(String name, String signature) {
+        List<Method> methods = visibleMethods();
+        ArrayList<Method> retList = new ArrayList<Method>(methods.size());
         Iterator iter = methods.iterator();
         while (iter.hasNext()) {
             Method candidate = (Method)iter.next();
@@ -576,7 +575,7 @@
         return retList;
     }
 
-    List getInterfaces() {
+    List<InterfaceType> getInterfaces() {
         InterfaceTypeImpl[] intfs;
 	try {
 	    intfs = JDWP.ReferenceType.Interfaces.
@@ -584,12 +583,12 @@
 	} catch (JDWPException exc) {
 	    throw exc.toJDIException();
 	}
-        return Arrays.asList(intfs);
+        return Arrays.asList((InterfaceType[])intfs);
     }
 
-    public List nestedTypes() {
+    public List<ReferenceType> nestedTypes() {
         List all = vm.allClasses();
-        List nested = new ArrayList();
+        List<ReferenceType> nested = new ArrayList<ReferenceType>();
         String outername = name();
         int outerlen = outername.length();
         Iterator iter = all.iterator();
@@ -609,7 +608,7 @@
     }
 
     public Value getValue(Field sig) {
-        List list = new ArrayList(1);
+        List<Field> list = new ArrayList<Field>(1);
         list.add(sig);
         Map map = getValues(list);
         return(Value)map.get(sig);
@@ -658,7 +657,7 @@
                                          field.ref());
         }
 
-        Map map = new HashMap(size);
+        Map<Field, Value> map = new HashMap<Field, Value>(size);
 
 	ValueImpl[] values;
 	try {
@@ -710,22 +709,22 @@
         return (String)(sourceNames(vm.getDefaultStratum()).get(0));
     }
 
-    public List sourceNames(String stratumID)
+    public List<String> sourceNames(String stratumID)
                                 throws AbsentInformationException {
         SDE.Stratum stratum = stratum(stratumID);
         if (stratum.isJava()) {
-            List result = new ArrayList(1);
+            List<String> result = new ArrayList<String>(1);
             result.add(baseSourceName());
             return result;
         }
         return stratum.sourceNames(this);
     }
 
-    public List sourcePaths(String stratumID)
+    public List<String> sourcePaths(String stratumID)
                                 throws AbsentInformationException {
         SDE.Stratum stratum = stratum(stratumID);
         if (stratum.isJava()) {
-            List result = new ArrayList(1);
+            List<String> result = new ArrayList<String>(1);
             result.add(baseSourceDir() + baseSourceName());
             return result;
         }
@@ -805,7 +804,7 @@
                     process(vm, this).extension;
             } catch (JDWPException exc) {
                 if (exc.errorCode() != JDWP.Error.ABSENT_INFORMATION) {
-                    sdeRef = new SoftReference(NO_SDE_INFO_MARK);
+                    sdeRef = new SoftReference<SDE>(NO_SDE_INFO_MARK);
                     throw exc.toJDIException();
                 }
             }
@@ -814,17 +813,17 @@
             } else {
                 sde = new SDE(extension);
             }
-            sdeRef = new SoftReference(sde);
+            sdeRef = new SoftReference<SDE>(sde);
         }
         return sde;
     }
 
-    public List availableStrata() {
+    public List<String> availableStrata() {
         SDE sde = sourceDebugExtensionInfo();
         if (sde.isValid()) {
             return sde.availableStrata();
         } else {
-            List strata = new ArrayList();
+            List<String> strata = new ArrayList<String>();
             strata.add(SDE.BASE_STRATUM_NAME);
             return strata;
         }
@@ -849,16 +848,16 @@
         return modifiers;
     }        
 
-    public List allLineLocations()
+    public List<Location> allLineLocations()
                             throws AbsentInformationException {
         return allLineLocations(vm.getDefaultStratum(), null);
     }
 
-    public List allLineLocations(String stratumID, String sourceName)
+    public List<Location> allLineLocations(String stratumID, String sourceName)
                             throws AbsentInformationException {
         boolean someAbsent = false; // A method that should have info, didn't
         SDE.Stratum stratum = stratum(stratumID);
-        List list = new ArrayList();  // location list
+        List<Location> list = new ArrayList<Location>();  // location list
 
         for (Iterator iter = methods().iterator(); iter.hasNext(); ) {
             MethodImpl method = (MethodImpl)iter.next();
@@ -880,14 +879,14 @@
         return list;
     }
 
-    public List locationsOfLine(int lineNumber)
+    public List<Location> locationsOfLine(int lineNumber)
                            throws AbsentInformationException {
         return locationsOfLine(vm.getDefaultStratum(),
                                null,
                                lineNumber);
     }
 
-    public List locationsOfLine(String stratumID,
+    public List<Location> locationsOfLine(String stratumID,
                                 String sourceName,
                                 int lineNumber)
                            throws AbsentInformationException {
@@ -895,10 +894,10 @@
         boolean someAbsent = false; 
         // A method that should have info, did
         boolean somePresent = false; 
-        List methods = methods();
+        List<Method> methods = methods();
         SDE.Stratum stratum = stratum(stratumID);
 
-        List list = new ArrayList();
+        List<Location> list = new ArrayList<Location>();
 
         Iterator iter = methods.iterator();
         while(iter.hasNext()) {
@@ -924,7 +923,7 @@
         return list;
     }
 
-    public List instances(long maxInstances) {
+    public List<ObjectReference> instances(long maxInstances) {
         if (!vm.canGetInstanceInfo()) {
             throw new UnsupportedOperationException(
                 "target does not support getting instances");
@@ -939,8 +938,9 @@
         // JDWP can't currently handle more than this (in mustang)
 
         try {
-            return Arrays.asList(JDWP.ReferenceType.Instances.
-                process(vm, this, intMax).instances);
+            return Arrays.asList(
+		(ObjectReference[])JDWP.ReferenceType.Instances.
+                	process(vm, this, intMax).instances);
         } catch (JDWPException exc) {
             throw exc.toJDIException();
         }
@@ -1013,7 +1013,7 @@
             byte[] cpbytes;
             constanPoolCount = jdwpCPool.count;
             cpbytes = jdwpCPool.bytes;
-            constantPoolBytesRef = new SoftReference(cpbytes);
+            constantPoolBytesRef = new SoftReference<byte[]>(cpbytes);
             constantPoolInfoGotten = true;
         }
     }
--- a/j2se/src/share/classes/com/sun/tools/jdi/SDE.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/SDE.java	Fri May 25 00:49:14 2007 +0000
@@ -110,12 +110,12 @@
          * is always a terminator stratum.
          * Default sourceName (the first one) must be first.
          */
-        List sourceNames(ReferenceTypeImpl refType) {
+        List<String> sourceNames(ReferenceTypeImpl refType) {
             int i;
             int fileIndexStart = stratumTable[sti].fileIndex;
             /* one past end */
             int fileIndexEnd = stratumTable[sti+1].fileIndex; 
-            List result = new ArrayList(fileIndexEnd - fileIndexStart);
+            List<String> result = new ArrayList<String>(fileIndexEnd - fileIndexStart);
             for (i = fileIndexStart; i < fileIndexEnd; ++i) {
                 result.add(fileTable[i].sourceName);
             }
@@ -129,12 +129,12 @@
          * is always a terminator stratum.
          * Default sourcePath (the first one) must be first.
          */
-        List sourcePaths(ReferenceTypeImpl refType) {
+        List<String> sourcePaths(ReferenceTypeImpl refType) {
             int i;
             int fileIndexStart = stratumTable[sti].fileIndex;
             /* one past end */
             int fileIndexEnd = stratumTable[sti+1].fileIndex; 
-            List result = new ArrayList(fileIndexEnd - fileIndexStart);
+            List<String> result = new ArrayList<String>(fileIndexEnd - fileIndexStart);
             for (i = fileIndexStart; i < fileIndexEnd; ++i) {
                 result.add(fileTable[i].getSourcePath(refType));
             }
@@ -364,8 +364,8 @@
         return new Stratum(sti);
     }
 
-    List availableStrata() {
-        List strata = new ArrayList();
+    List<String> availableStrata() {
+        List<String> strata = new ArrayList<String>();
 
         for (int i = 0; i < (stratumIndex-1); ++i) {
             StratumTableRecord rec = stratumTable[i];
--- a/j2se/src/share/classes/com/sun/tools/jdi/StackFrameImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/StackFrameImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -46,7 +46,7 @@
     private final ThreadReferenceImpl thread;
     private final long id;
     private final Location location;
-    private Map visibleVariables =  null;
+    private Map<String, LocalVariable> visibleVariables =  null;
     private ObjectReference thisObject = null;
 
     StackFrameImpl(VirtualMachine vm, ThreadReferenceImpl thread, 
@@ -156,17 +156,15 @@
      */
     private void createVisibleVariables() throws AbsentInformationException {
         if (visibleVariables == null) {
-            List allVariables = location.method().variables();
-            Map map = new HashMap(allVariables.size());
-        
-            Iterator iter = allVariables.iterator();
-            while (iter.hasNext()) {
-                LocalVariableImpl variable = (LocalVariableImpl)iter.next();
+            List<LocalVariable> allVariables = location.method().variables();
+            Map<String, LocalVariable> map = new HashMap<String, LocalVariable>(allVariables.size());
+	    
+	    for (LocalVariable variable : allVariables) {
                 String name = variable.name();
                 if (variable.isVisible(this)) {
                     LocalVariable existing = (LocalVariable)map.get(name);
                     if ((existing == null) || 
-                        variable.hides(existing)) {
+                        ((LocalVariableImpl)variable).hides(existing)) {
                         map.put(name, variable);
                     }
                 }
@@ -179,10 +177,10 @@
      * Return the list of visible variable in the frame.
      * Need not be synchronized since it cannot be provably stale.
      */
-    public List visibleVariables() throws AbsentInformationException {
+    public List<LocalVariable> visibleVariables() throws AbsentInformationException {
         validateStackFrame();
         createVisibleVariables();
-        List mapAsList = new ArrayList(visibleVariables.values());
+        List<LocalVariable> mapAsList = new ArrayList<LocalVariable>(visibleVariables.values());
         Collections.sort(mapAsList);
         return mapAsList;
     }
@@ -194,17 +192,16 @@
     public LocalVariable visibleVariableByName(String name) throws AbsentInformationException  {
         validateStackFrame();
         createVisibleVariables();
-        return (LocalVariable)visibleVariables.get(name);
+        return visibleVariables.get(name);
     }
 
     public Value getValue(LocalVariable variable) {
-        List list = new ArrayList(1);
+        List<LocalVariable> list = new ArrayList<LocalVariable>(1);
         list.add(variable);
-        Map map = getValues(list);
-        return (Value)map.get(variable);
+        return getValues(list).get(variable);
     }
 
-    public Map getValues(List<? extends LocalVariable> variables) {      
+    public Map<LocalVariable, Value> getValues(List<? extends LocalVariable> variables) {      
         validateStackFrame();
         validateMirrors(variables);
 
@@ -249,7 +246,7 @@
             throw new InternalException(
                       "Wrong number of values returned from target VM");
         }
-        Map map = new HashMap(count);
+        Map<LocalVariable, Value> map = new HashMap<LocalVariable, Value>(count);
         for (int i=0; i<count; ++i) {
             LocalVariableImpl variable = (LocalVariableImpl)variables.get(i);
             map.put(variable, values[i]);
@@ -318,7 +315,7 @@
         }
     }
 
-    public List getArgumentValues() {
+    public List<Value> getArgumentValues() {
         validateStackFrame();
         MethodImpl mmm = (MethodImpl)location.method();
         List<String> argSigs = mmm.argumentSignatures();
@@ -366,7 +363,7 @@
             throw new InternalException(
                       "Wrong number of values returned from target VM");
         }
-        return Arrays.asList(values);
+        return Arrays.asList((Value[])values);
     }
 
     void pop() throws IncompatibleThreadStateException {
--- a/j2se/src/share/classes/com/sun/tools/jdi/TargetVM.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/TargetVM.java	Fri May 25 00:49:14 2007 +0000
@@ -26,6 +26,7 @@
 package com.sun.tools.jdi;
 
 import com.sun.jdi.*;
+import com.sun.jdi.event.*;
 import com.sun.jdi.connect.spi.Connection;
 import com.sun.jdi.event.EventSet;
 
@@ -33,9 +34,9 @@
 import java.io.IOException;
 
 public class TargetVM implements Runnable {
-    private Map waitingQueue = new HashMap(32,0.75f);
+    private Map<String, Packet> waitingQueue = new HashMap<String, Packet>(32,0.75f);
     private boolean shouldListen = true;
-    private List eventQueues = Collections.synchronizedList(new ArrayList(2));
+    private List<EventQueue> eventQueues = Collections.synchronizedList(new ArrayList<EventQueue>(2));
     private VirtualMachineImpl vm;
     private Connection connection;
     private Thread readerThread; 
--- a/j2se/src/share/classes/com/sun/tools/jdi/ThreadGroupReferenceImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ThreadGroupReferenceImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -138,12 +138,12 @@
         return kids;
     }
 
-    public List threads() {
-        return Arrays.asList(kids().childThreads);
+    public List<ThreadReference> threads() {
+        return Arrays.asList((ThreadReference[])kids().childThreads);
     }
 
-    public List threadGroups() {
-        return Arrays.asList(kids().childGroups);
+    public List<ThreadGroupReference> threadGroups() {
+        return Arrays.asList((ThreadGroupReference[])kids().childGroups);
     }
 
     public String toString() {
--- a/j2se/src/share/classes/com/sun/tools/jdi/ThreadReferenceImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/ThreadReferenceImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -42,12 +42,12 @@
     private static class Cache extends ObjectReferenceImpl.Cache {
         String name = null;
         JDWP.ThreadReference.Status status = null;
-        List frames = null;
+        List<StackFrame> frames = null;
         int framesStart = -1;
         int framesLength = 0;
         int frameCount = -1;
-        List ownedMonitors = null;
-        List ownedMonitorsInfo = null;
+        List<ObjectReference> ownedMonitors = null;
+        List<MonitorInfo> ownedMonitorsInfo = null;
         ObjectReference contendedMonitor = null;
         boolean triedCurrentContended = false;
     }
@@ -57,7 +57,7 @@
     }
 
     // Listeners - synchronized on vm.state() 
-    private List listeners = new ArrayList();
+    private List<WeakReference<ThreadListener>> listeners = new ArrayList<WeakReference<ThreadListener>>();
 
     ThreadReferenceImpl(VirtualMachine aVm, long aRef) {
         super(aVm,aRef);
@@ -287,7 +287,7 @@
         return frameCount;
     }
 
-    public List frames() throws IncompatibleThreadStateException  {
+    public List<StackFrame> frames() throws IncompatibleThreadStateException  {
         return privateFrames(0, -1);
     }
 
@@ -317,7 +317,7 @@
         return ((start + length) <= (local.framesStart + local.framesLength));
     }
 
-    public List frames(int start, int length) 
+    public List<StackFrame> frames(int start, int length) 
                               throws IncompatibleThreadStateException  {
         if (length < 0) {
             throw new IndexOutOfBoundsException(
@@ -330,9 +330,9 @@
      * Private version of frames() allows "-1" to specify all 
      * remaining frames.
      */
-    private List privateFrames(int start, int length) 
+    private List<StackFrame> privateFrames(int start, int length) 
                               throws IncompatibleThreadStateException  {
-        List frames = null;
+        List<StackFrame> frames = null;
         try {
             Cache local = (Cache)getCache();
 
@@ -344,7 +344,7 @@
                     = JDWP.ThreadReference.Frames.
                           process(vm, this, start, length).frames;
                 int count = jdwpFrames.length;
-                frames = new ArrayList(count);
+                frames = new ArrayList<StackFrame>(count);
 
                 // Lock must be held while creating stack frames.
                 // so that a resume will not resume a partially 
@@ -388,8 +388,8 @@
         return Collections.unmodifiableList(frames);
     }
 
-    public List ownedMonitors()  throws IncompatibleThreadStateException  {
-        List monitors = null;
+    public List<ObjectReference> ownedMonitors()  throws IncompatibleThreadStateException  {
+        List<ObjectReference> monitors = null;
         try {
             Cache local = (Cache)getCache();
 
@@ -398,7 +398,7 @@
             }
             if (monitors == null) {
                 monitors = Arrays.asList(
-                                 JDWP.ThreadReference.OwnedMonitors.
+                                 (ObjectReference[])JDWP.ThreadReference.OwnedMonitors.
                                          process(vm, this).owned);
                 if (local != null) {
                     local.ownedMonitors = monitors;
@@ -454,8 +454,8 @@
         return monitor;
     }
 
-    public List ownedMonitorsAndFrames()  throws IncompatibleThreadStateException  {
-        List monitors = null;
+    public List<MonitorInfo> ownedMonitorsAndFrames()  throws IncompatibleThreadStateException  {
+        List<MonitorInfo> monitors = null;
         try {
             Cache local = (Cache)getCache();
 
@@ -466,7 +466,7 @@
                 JDWP.ThreadReference.OwnedMonitorsStackDepthInfo.monitor[] minfo;
                 minfo = JDWP.ThreadReference.OwnedMonitorsStackDepthInfo.process(vm, this).owned;
 
-                monitors = new ArrayList(minfo.length);
+                monitors = new ArrayList<MonitorInfo>(minfo.length);
 
                 for (int i=0; i < minfo.length; i++) {
                     JDWP.ThreadReference.OwnedMonitorsStackDepthInfo.monitor mi =
@@ -559,7 +559,7 @@
 
     void addListener(ThreadListener listener) {
         synchronized (vm.state()) {
-            listeners.add(new WeakReference(listener));
+            listeners.add(new WeakReference<ThreadListener>(listener));
         }
     }
 
--- a/j2se/src/share/classes/com/sun/tools/jdi/VMState.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/VMState.java	Fri May 25 00:49:14 2007 +0000
@@ -34,7 +34,7 @@
     private final VirtualMachineImpl vm;
 
     // Listeners
-    private final List listeners = new ArrayList(); // synchronized (this)
+    private final List<WeakReference> listeners = new ArrayList<WeakReference>(); // synchronized (this)
     private boolean notifyingListeners = false;  // synchronized (this)
 
     /*
@@ -48,8 +48,8 @@
 
     // This is cached only while the VM is suspended
     private static class Cache {
-        List groups = null;  // cached Top Level ThreadGroups
-        List threads = null; // cached Threads
+        List<ThreadGroupReference> groups = null;  // cached Top Level ThreadGroups
+        List<ThreadReference> threads = null; // cached Threads
     }
 
     private Cache cache = null;               // synchronized (this)
@@ -161,7 +161,7 @@
     }
 
     synchronized void addListener(VMListener listener) {
-        listeners.add(new WeakReference(listener));
+        listeners.add(new WeakReference<VMListener>(listener));
     }
 
     synchronized boolean hasListener(VMListener listener) {
@@ -179,8 +179,8 @@
         }
     }
 
-    List allThreads() {
-        List threads = null;
+    List<ThreadReference> allThreads() {
+        List<ThreadReference> threads = null;
         try {
             Cache local = getCache();
 
@@ -189,7 +189,7 @@
                 threads = local.threads;
             }
             if (threads == null) {
-                threads = Arrays.asList(JDWP.VirtualMachine.AllThreads.
+                threads = Arrays.asList((ThreadReference[])JDWP.VirtualMachine.AllThreads.
                                         process(vm).threads);
                 if (local != null) {
                     local.threads = threads;
@@ -206,8 +206,8 @@
     }
 
 
-    List topLevelThreadGroups() {
-        List groups = null;
+    List<ThreadGroupReference> topLevelThreadGroups() {
+        List<ThreadGroupReference> groups = null;
         try {
             Cache local = getCache();
 
@@ -216,7 +216,7 @@
             }
             if (groups == null) {
                 groups = Arrays.asList(
-                                JDWP.VirtualMachine.TopLevelThreadGroups.
+                                (ThreadGroupReference[])JDWP.VirtualMachine.TopLevelThreadGroups.
                                        process(vm).groups);
                 if (local != null) {
                     local.groups = groups;
--- a/j2se/src/share/classes/com/sun/tools/jdi/VirtualMachineImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/VirtualMachineImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -71,8 +71,8 @@
     // Protected by "synchronized(this)". "retrievedAllTypes" may be
     // tested unsynchronized (since once true, it stays true), but must
     // be set synchronously
-    private Map typesByID;
-    private TreeSet typesBySignature;
+    private Map<Long, ReferenceType> typesByID;
+    private TreeSet<ReferenceType> typesBySignature;
     private boolean retrievedAllTypes = false;
 
     // For other languages support
@@ -80,11 +80,11 @@
 
     // ObjectReference cache
     // "objectsByID" protected by "synchronized(this)".
-    private final Map objectsByID = new HashMap();
-    private final ReferenceQueue referenceQueue = new ReferenceQueue();
+    private final Map<Long, SoftObjectReference> objectsByID = new HashMap<Long, SoftObjectReference>();
+    private final ReferenceQueue<ObjectReferenceImpl> referenceQueue = new ReferenceQueue<ObjectReferenceImpl>();
     static private final int DISPOSE_THRESHOLD = 50;
-    private final List batchedDisposeRequests = 
-            Collections.synchronizedList(new ArrayList(DISPOSE_THRESHOLD + 10));
+    private final List<SoftObjectReference> batchedDisposeRequests = 
+            Collections.synchronizedList(new ArrayList<SoftObjectReference>(DISPOSE_THRESHOLD + 10));
 
     // These are cached once for the life of the VM
     private JDWP.VirtualMachine.Version versionInfo;
@@ -264,10 +264,10 @@
         return System.identityHashCode(this);
     }
 
-    public List classesByName(String className) {
+    public List<ReferenceType> classesByName(String className) {
         validateVM();
         String signature = JNITypeParser.typeNameToSignature(className);
-        List list;
+        List<ReferenceType> list;
         if (retrievedAllTypes) {
            list = findReferenceTypes(signature);
         } else {
@@ -276,15 +276,15 @@
         return Collections.unmodifiableList(list);
     }
 
-    public List allClasses() {
+    public List<ReferenceType> allClasses() {
         validateVM();
             
         if (!retrievedAllTypes) {
             retrieveAllClasses();
         }
-        ArrayList a;
+        ArrayList<ReferenceType> a;
         synchronized (this) {
-            a = new ArrayList(typesBySignature);
+            a = new ArrayList<ReferenceType>(typesBySignature);
         }
         return Collections.unmodifiableList(a);
     }
@@ -354,7 +354,7 @@
         }
 
         // Delete any record of the breakpoints
-        List toDelete = new ArrayList();
+        List<BreakpointRequest> toDelete = new ArrayList<BreakpointRequest>();
         EventRequestManager erm = eventRequestManager();
         it = erm.breakpointRequests().iterator();
         while (it.hasNext()) {
@@ -373,12 +373,12 @@
         }
     }
 
-    public List allThreads() {
+    public List<ThreadReference> allThreads() {
         validateVM();
         return state.allThreads();
     }
 
-    public List topLevelThreadGroups() {
+    public List<ThreadGroupReference> topLevelThreadGroups() {
         validateVM();
         return state.topLevelThreadGroups();
     }
@@ -826,12 +826,12 @@
         }
     }
 
-    private synchronized List findReferenceTypes(String signature) {
+    private synchronized List<ReferenceType> findReferenceTypes(String signature) {
         if (typesByID == null) {
-            return new ArrayList(0);
+            return new ArrayList<ReferenceType>(0);
         }
         Iterator iter = typesBySignature.iterator();
-        List list = new ArrayList();
+        List<ReferenceType> list = new ArrayList<ReferenceType>();
         while (iter.hasNext()) {
             ReferenceTypeImpl type = (ReferenceTypeImpl)iter.next();
             int comp = signature.compareTo(type.signature());
@@ -846,8 +846,8 @@
     }
 
     private void initReferenceTypes() {
-        typesByID = new HashMap(300);
-        typesBySignature = new TreeSet();
+        typesByID = new HashMap<Long, ReferenceType>(300);
+        typesBySignature = new TreeSet<ReferenceType>();
     }
 
     ReferenceTypeImpl referenceType(long ref, byte tag) {
@@ -926,7 +926,7 @@
         return capabilitiesNew;
     }
 
-    private List retrieveClassesBySignature(String signature) {
+    private List<ReferenceType> retrieveClassesBySignature(String signature) {
         if ((vm.traceFlags & VirtualMachine.TRACE_REFTYPES) != 0) {
             vm.printTrace("Retrieving matching ReferenceTypes, sig=" + signature);
         }
@@ -939,7 +939,7 @@
         }
 
         int count = cinfos.length;
-        List list = new ArrayList(count);
+        List<ReferenceType> list = new ArrayList<ReferenceType>(count);
 
         // Hold lock during processing to improve performance
         synchronized (this) {
@@ -1250,7 +1250,7 @@
         /*
          * Attempt to retrieve an existing object object reference 
          */
-        SoftObjectReference ref = (SoftObjectReference)objectsByID.get(key);
+        SoftObjectReference ref = objectsByID.get(key);
         if (ref != null) {
             object = ref.object();
         }
@@ -1311,8 +1311,7 @@
         // Handle any queue elements that are not strongly reachable 
         processQueue();
 
-        SoftObjectReference ref = 
-            (SoftObjectReference)objectsByID.remove(new Long(object.ref()));
+        SoftObjectReference ref = objectsByID.remove(new Long(object.ref()));
         if (ref != null) {
             batchForDispose(ref);
         } else {
@@ -1378,11 +1377,11 @@
         return pathInfo;
     }
 
-   public List classPath() {
+   public List<String> classPath() {
        return Arrays.asList(getClasspath().classpaths);
    }
 
-   public List bootClassPath() {
+   public List<String> bootClassPath() {
        return Arrays.asList(getClasspath().bootclasspaths);
    }
 
@@ -1411,12 +1410,12 @@
 	return threadGroupForJDI;
     }
 
-   static private class SoftObjectReference extends SoftReference {
+   static private class SoftObjectReference extends SoftReference<ObjectReferenceImpl> {
        int count;
        Long key;
 
        SoftObjectReference(Long key, ObjectReferenceImpl mirror, 
-                           ReferenceQueue queue) {
+                           ReferenceQueue<ObjectReferenceImpl> queue) {
            super(mirror, queue);
            this.count = 1;
            this.key = key;
--- a/j2se/src/share/classes/com/sun/tools/jdi/VirtualMachineManagerImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdi/VirtualMachineManagerImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -42,10 +42,9 @@
 
 /* Public for use by com.sun.jdi.Bootstrap */
 public class VirtualMachineManagerImpl implements VirtualMachineManagerService {
-    private List connectors = new ArrayList();
+    private List<Connector> connectors = new ArrayList<Connector>();
     private LaunchingConnector defaultConnector = null;
-    private List targets = new ArrayList();
-    private List connectionListeners = new ArrayList();
+    private List<VirtualMachine> targets = new ArrayList<VirtualMachine>();
     private final ThreadGroup mainGroupForJDI;
     private ResourceBundle messages = null;
     private int vmSequenceNumber = 0;
@@ -141,10 +140,8 @@
 	// isn't found then we arbitarly pick the first connector.
 	//
 	boolean found = false;
-	List launchers = launchingConnectors();
-	Iterator i = launchers.iterator();
-	while (i.hasNext()) {
-	    LaunchingConnector lc = (LaunchingConnector)i.next();
+	List<LaunchingConnector> launchers = launchingConnectors();
+	for (LaunchingConnector lc: launchers) {
 	    if (lc.name().equals("com.sun.jdi.CommandLineLaunch")) {
 		setDefaultConnector(lc);
 		found = true;
@@ -152,7 +149,7 @@
 	    }
 	}
 	if (!found && launchers.size() > 0) {
-	    setDefaultConnector((LaunchingConnector)launchers.get(0));
+	    setDefaultConnector(launchers.get(0));
 	}
 
     }
@@ -168,47 +165,41 @@
         defaultConnector = connector;
     }
 
-    public List launchingConnectors() {
-        List launchingConnectors = new ArrayList(connectors.size());
-        Iterator iter = connectors.iterator();
-        while (iter.hasNext()) {
-            Object connector = iter.next();
+    public List<LaunchingConnector> launchingConnectors() {
+        List<LaunchingConnector> launchingConnectors = new ArrayList<LaunchingConnector>(connectors.size());
+	for (Connector connector: connectors) {
             if (connector instanceof LaunchingConnector) {
-                launchingConnectors.add(connector);
+                launchingConnectors.add((LaunchingConnector)connector);
             }
         }
         return Collections.unmodifiableList(launchingConnectors);
     }
 
-    public List attachingConnectors() {
-        List attachingConnectors = new ArrayList(connectors.size());
-        Iterator iter = connectors.iterator();
-        while (iter.hasNext()) {
-            Object connector = iter.next();
+    public List<AttachingConnector> attachingConnectors() {
+        List<AttachingConnector> attachingConnectors = new ArrayList<AttachingConnector>(connectors.size());
+	for (Connector connector: connectors) {
             if (connector instanceof AttachingConnector) {
-                attachingConnectors.add(connector);
+                attachingConnectors.add((AttachingConnector)connector);
             }
         }
         return Collections.unmodifiableList(attachingConnectors);
     }
 
-    public List listeningConnectors() {
-        List listeningConnectors = new ArrayList(connectors.size());
-        Iterator iter = connectors.iterator();
-        while (iter.hasNext()) {
-            Object connector = iter.next();
+    public List<ListeningConnector> listeningConnectors() {
+        List<ListeningConnector> listeningConnectors = new ArrayList<ListeningConnector>(connectors.size());
+	for (Connector connector: connectors) {
             if (connector instanceof ListeningConnector) {
-                listeningConnectors.add(connector);
+                listeningConnectors.add((ListeningConnector)connector);
             }
         }
         return Collections.unmodifiableList(listeningConnectors);
     }
 
-    public List allConnectors() {
+    public List<Connector> allConnectors() {
         return Collections.unmodifiableList(connectors);
     }
 
-    public List connectedVirtualMachines() {
+    public List<VirtualMachine> connectedVirtualMachines() {
         return Collections.unmodifiableList(targets);
     }
 
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/AbstractSimpleNode.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/AbstractSimpleNode.java	Fri May 25 00:49:14 2007 +0000
@@ -32,7 +32,7 @@
 
     AbstractSimpleNode() {
         kind = "-simple-";
-        components = new ArrayList();
+        components = new ArrayList<Node>();
     }
 
     void document(PrintWriter writer) {
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/ConstantNode.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/ConstantNode.java	Fri May 25 00:49:14 2007 +0000
@@ -31,10 +31,10 @@
 class ConstantNode extends AbstractCommandNode {
     
     ConstantNode() {
-        this(new ArrayList());
+        this(new ArrayList<Node>());
     }
 
-    ConstantNode(List components) {
+    ConstantNode(List<Node> components) {
         this.kind = "Constant";
         this.components = components;
         this.lineno = 0;
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/ConstantSetNode.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/ConstantSetNode.java	Fri May 25 00:49:14 2007 +0000
@@ -33,16 +33,16 @@
     /**
      * The mapping between a constant and its value.
      */
-    protected static Map constantMap;
+    protected static Map<String, String> constantMap;
     
     ConstantSetNode(){
         if (constantMap == null) {
-            constantMap = new HashMap();
+            constantMap = new HashMap<String, String>();
         }
     }
     
     void prune() {
-        List addons = new ArrayList();
+        List<Node> addons = new ArrayList<Node>();
 
         for (Iterator it = components.iterator(); it.hasNext(); ) {
             Node node = (Node)it.next();
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/ErrorNode.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/ErrorNode.java	Fri May 25 00:49:14 2007 +0000
@@ -33,10 +33,10 @@
     protected static final String NAME_OF_ERROR_TABLE = "Error";
     
     ErrorNode() {
-        this(new ArrayList());
+        this(new ArrayList<Node>());
     }
 
-    ErrorNode(List components) {
+    ErrorNode(List<Node> components) {
         this.kind = "Error";
         this.components = components;
         this.lineno = 0;
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/Node.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/Node.java	Fri May 25 00:49:14 2007 +0000
@@ -32,9 +32,9 @@
 abstract class Node {
 
     String kind;
-    List components;
+    List<Node> components;
     int lineno;
-    List commentList = new ArrayList();
+    List<String> commentList = new ArrayList<String>();
     Node parent = null;
     Context context;
 
@@ -43,7 +43,7 @@
 
     abstract void document(PrintWriter writer);
 
-    void set(String kind, List components, int lineno) {
+    void set(String kind, List<Node> components, int lineno) {
         this.kind = kind;
         this.components = components;
         this.lineno = lineno;
@@ -99,8 +99,8 @@
 
     String comment() {
         StringBuffer comment = new StringBuffer();
-        for (Iterator it = commentList.iterator(); it.hasNext();) {
-            comment.append((String)it.next());
+        for (String st : commentList) {
+            comment.append(st);
         }
         return comment.toString();
     }
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/OutNode.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/OutNode.java	Fri May 25 00:49:14 2007 +0000
@@ -32,7 +32,7 @@
 
     String cmdName;
 
-    void set(String kind, List components, int lineno) {
+    void set(String kind, List<Node> components, int lineno) {
         super.set(kind, components, lineno);
         components.add(0, new NameNode("Out"));
     }
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/Parse.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/Parse.java	Fri May 25 00:49:14 2007 +0000
@@ -31,7 +31,7 @@
 class Parse {
 
     final StreamTokenizer izer;
-    final Map kindMap = new HashMap();
+    final Map<String, Node> kindMap = new HashMap<String, Node>();
 
     Parse(Reader reader) {
         izer = new StreamTokenizer(new BufferedReader(reader));
@@ -90,7 +90,7 @@
     }
 
     RootNode items() throws IOException {
-        List list = new ArrayList();
+        List<Node> list = new ArrayList<Node>();
         
         while (izer.nextToken() != StreamTokenizer.TT_EOF) {
             izer.pushBack();
@@ -131,13 +131,13 @@
             case '(': {
                 if (izer.nextToken() == StreamTokenizer.TT_WORD) {
                     String kind = izer.sval;
-                    List list = new ArrayList();
+                    List<Node> list = new ArrayList<Node>();
         
                     while (izer.nextToken() != ')') {
                         izer.pushBack();
                         list.add(item());
                     }
-                    Node proto = (Node)(kindMap.get(kind));
+                    Node proto = kindMap.get(kind);
                     if (proto == null) {
                         error("Invalid kind: " + kind);
                         return null;
--- a/j2se/src/share/classes/com/sun/tools/jdwpgen/ReplyNode.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/com/sun/tools/jdwpgen/ReplyNode.java	Fri May 25 00:49:14 2007 +0000
@@ -32,7 +32,7 @@
 
     String cmdName;
 
-    void set(String kind, List components, int lineno) {
+    void set(String kind, List<Node> components, int lineno) {
         super.set(kind, components, lineno);
         components.add(0, new NameNode(kind));
     }
--- a/j2se/src/share/classes/java/lang/management/ManagementFactory.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/lang/management/ManagementFactory.java	Fri May 25 00:49:14 2007 +0000
@@ -282,7 +282,7 @@
  * @see javax.management.MXBean
  *
  * @author  Mandy Chung
- * @version 1.30, 05/05/07 
+ * @version 1.31, 05/09/07 
  * @since   1.5
  */
 public class ManagementFactory {
@@ -618,9 +618,9 @@
         final Class interfaceClass = mxbeanInterface;
         // Only allow MXBean interfaces from rt.jar loaded by the
         // bootstrap class loader
-        final ClassLoader loader = (ClassLoader)
-            AccessController.doPrivileged(new PrivilegedAction() {
-                public Object run() { 
+        final ClassLoader loader = 
+            AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() {
+                public ClassLoader run() { 
                     return interfaceClass.getClassLoader();
                 }
             });
--- a/j2se/src/share/classes/java/nio/channels/SocketChannel.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/nio/channels/SocketChannel.java	Fri May 25 00:49:14 2007 +0000
@@ -90,7 +90,7 @@
  *
  * @author Mark Reinhold
  * @author JSR-51 Expert Group
- * @version 1.40, 07/05/05
+ * @version 1.41, 07/05/06
  * @since 1.4
  */
 
@@ -161,7 +161,14 @@
 	throws IOException
     {
 	SocketChannel sc = open();
-	sc.connect(remote);
+	try {
+	    sc.connect(remote);
+	} finally {
+	    if (!sc.isConnected()) {
+		try { sc.close(); } catch (IOException x) { }
+	    }
+	}
+	assert sc.isConnected();
 	return sc;
     }
 
--- a/j2se/src/share/classes/java/security/AccessController.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/security/AccessController.java	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,8 +53,8 @@
  * 
  * <pre>
  * 
- *    FilePermission perm = new FilePermission("/temp/testFile", "read");
- *    AccessController.checkPermission(perm);
+ * FilePermission perm = new FilePermission("/temp/testFile", "read");
+ * AccessController.checkPermission(perm);
  * 
  * </pre>
  *
@@ -71,30 +71,26 @@
  * The <code>checkPermission </code>method determines whether access 
  * is granted or denied based on the following algorithm:
  * 
- * <pre>
- * i = m;
+ *  <pre> {@code
+ * for (int i = m; i > 0; i--) {
  * 
- * while (i > 0) {
+ *     if (caller i's domain does not have the permission)
+ *         throw AccessControlException
  * 
- *      if (caller i's domain does not have the permission)
- *              throw AccessControlException
- * 
- *      else if (caller i is marked as privileged) {
- *              if (a context was specified in the call to doPrivileged) 
- *                 context.checkPermission(permission)
- *              return;
- *      }
- *      i = i - 1;
+ *     else if (caller i is marked as privileged) {
+ *         if (a context was specified in the call to doPrivileged) 
+ *             context.checkPermission(permission)
+ *         return;
+ *     }
  * };
  *
- *    // Next, check the context inherited when
- *    // the thread was created. Whenever a new thread is created, the
- *    // AccessControlContext at that time is
- *    // stored and associated with the new thread, as the "inherited"
- *    // context.
+ * // Next, check the context inherited when the thread was created.
+ * // Whenever a new thread is created, the AccessControlContext at
+ * // that time is stored and associated with the new thread, as the
+ * // "inherited" context.
  * 
  * inheritedContext.checkPermission(permission);
- * </pre>
+ * }</pre>
  * 
  * <p> A caller can be marked as being "privileged" 
  * (see {@link #doPrivileged(PrivilegedAction) doPrivileged} and below). 
@@ -113,23 +109,22 @@
  * don't need to return a value from within the "privileged" block, do 
  * the following:
  *
- * <pre>
- *   somemethod() {
- *        ...normal code here...
- *        AccessController.doPrivileged(new PrivilegedAction<Void>() {
- *            public Void run() {
- *                // privileged code goes here, for example:
- *                System.loadLibrary("awt");
- *                return null; // nothing to return
- *            }
- *        });
-  *       ...normal code here...
- *  }
- * </pre>
+ *  <pre> {@code
+ * somemethod() {
+ *     ...normal code here...
+ *     AccessController.doPrivileged(new PrivilegedAction<Void>() {
+ *         public Void run() {
+ *             // privileged code goes here, for example:
+ *             System.loadLibrary("awt");
+ *             return null; // nothing to return
+ *         }
+ *     });
+ *     ...normal code here...
+ * }}</pre>
  *
  * <p>
  * PrivilegedAction is an interface with a single method, named
- * <code>run</code>, that returns an Object.
+ * <code>run</code>.
  * The above example shows creation of an implementation
  * of that interface; a concrete implementation of the
  * <code>run</code> method is supplied.
@@ -144,19 +139,17 @@
  *
  * <p> If you need to return a value, you can do something like the following:
  *
- * <pre>
- *   somemethod() {
- *        ...normal code here...
- *        String user = AccessController.doPrivileged(
- *          new PrivilegedAction<String>() {
- *            public String run() {
- *                return System.getProperty("user.name");
- *            }
- *          }
- *        );
- *        ...normal code here...
- *  }
- * </pre>
+ *  <pre> {@code
+ * somemethod() {
+ *     ...normal code here...
+ *     String user = AccessController.doPrivileged(
+ *         new PrivilegedAction<String>() {
+ *         public String run() {
+ *             return System.getProperty("user.name");
+ *             }
+ *         });
+ *     ...normal code here...
+ * }}</pre>
  *
  * <p>If the action performed in your <code>run</code> method could
  * throw a "checked" exception (those listed in the <code>throws</code> clause
@@ -164,26 +157,24 @@
  * <code>PrivilegedExceptionAction</code> interface instead of the
  * <code>PrivilegedAction</code> interface:
  * 
- * <pre>
- *   somemethod() throws FileNotFoundException {
- *        ...normal code here...
- *      try {
- *        FileInputStream fis = AccessController.doPrivileged(
- *          new PrivilegedExceptionAction<FileInputStream>() {
- *            public FileInputStream run() throws FileNotFoundException {
- *                return new FileInputStream("someFile");
- *            }
- *          }
- *        );
- *      } catch (PrivilegedActionException e) {
- *        // e.getException() should be an instance of FileNotFoundException,
- *        // as only "checked" exceptions will be "wrapped" in a
- *        // <code>PrivilegedActionException</code>.
- *        throw (FileNotFoundException) e.getException();
- *      }
- *        ...normal code here...
- *  }
- * </pre>
+ *  <pre> {@code
+ * somemethod() throws FileNotFoundException {
+ *     ...normal code here...
+ *     try {
+ *         FileInputStream fis = AccessController.doPrivileged(
+ *         new PrivilegedExceptionAction<FileInputStream>() {
+ *             public FileInputStream run() throws FileNotFoundException {
+ *                 return new FileInputStream("someFile");
+ *             }
+ *         });
+ *     } catch (PrivilegedActionException e) {
+ *         // e.getException() should be an instance of FileNotFoundException,
+ *         // as only "checked" exceptions will be "wrapped" in a
+ *         // PrivilegedActionException.
+ *         throw (FileNotFoundException) e.getException();
+ *     }
+ *     ...normal code here...
+ *  }}</pre>
  * 
  * <p> Be *very* careful in your use of the "privileged" construct, and 
  * always remember to make the privileged code section as small as possible.
@@ -202,7 +193,7 @@
  * 
  * <pre>
  * 
- *   AccessControlContext acc = AccessController.getContext()
+ * AccessControlContext acc = AccessController.getContext()
  * 
  * </pre>
  * 
@@ -216,7 +207,7 @@
  * 
  * <pre>
  * 
- *   acc.checkPermission(permission)
+ * acc.checkPermission(permission)
  * 
  * </pre> 
  *
@@ -224,23 +215,22 @@
  * to check the context against. In these cases you can use the
  * doPrivileged method that takes a context:
  * 
- * <pre>
- *   somemethod() {
- *         AccessController.doPrivileged(new PrivilegedAction<Object>() {
- *              public Object run() {
- *                 // Code goes here. Any permission checks within this
- *                 // run method will require that the intersection of the
- *                 // callers protection domain and the snapshot's
- *                 // context have the desired permission.
- *              }
- *         }, acc);
- *         ...normal code here...
- *   }
- * </pre>
+ *  <pre> {@code
+ * somemethod() {
+ *     AccessController.doPrivileged(new PrivilegedAction<Object>() {
+ *         public Object run() {
+ *             // Code goes here. Any permission checks within this
+ *             // run method will require that the intersection of the
+ *             // callers protection domain and the snapshot's
+ *             // context have the desired permission.
+ *         }
+ *     }, acc);
+ *     ...normal code here...
+ * }}</pre>
  * 
  * @see AccessControlContext
  *
- * @version 1.67 07/05/05
+ * @version 1.68 07/05/17
  * @author Li Gong 
  * @author Roland Schemers
  */
--- a/j2se/src/share/classes/java/util/Arrays.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/util/Arrays.java	Fri May 25 00:49:14 2007 +0000
@@ -50,7 +50,7 @@
  * @author  Josh Bloch
  * @author  Neal Gafter
  * @author  John Rose
- * @version 1.78, 05/05/07
+ * @version 1.79, 05/15/07
  * @since   1.2
  */
 
@@ -407,9 +407,7 @@
         int i = fromIndex, n = toIndex;
         while(i < n) {
             if (a[i] != a[i]) {
-		double swap = a[i];
-                a[i] = a[--n];
-                a[n] = swap;
+		swap(a, i, --n);
             } else {
                 if (a[i]==0 && Double.doubleToLongBits(a[i])==NEG_ZERO_BITS) {
                     a[i] = 0.0d;
@@ -427,7 +425,7 @@
             int j = binarySearch0(a, fromIndex, n, 0.0d); // posn of ANY zero
             do {
                 j--;
-            } while (j>=0 && a[j]==0.0d);
+            } while (j>=fromIndex && a[j]==0.0d);
 
             // j is now one less than the index of the FIRST zero
             for (int k=0; k<numNegZeros; k++)
@@ -451,9 +449,7 @@
         int i = fromIndex, n = toIndex;
         while(i < n) {
             if (a[i] != a[i]) {
-		float swap = a[i];
-                a[i] = a[--n];
-                a[n] = swap;
+		swap(a, i, --n);
             } else {
                 if (a[i]==0 && Float.floatToIntBits(a[i])==NEG_ZERO_BITS) {
                     a[i] = 0.0f;
@@ -471,7 +467,7 @@
             int j = binarySearch0(a, fromIndex, n, 0.0f); // posn of ANY zero
             do {
                 j--;
-            } while (j>=0 && a[j]==0.0f);
+            } while (j>=fromIndex && a[j]==0.0f);
 
             // j is now one less than the index of the FIRST zero
             for (int k=0; k<numNegZeros; k++)
--- a/j2se/src/share/classes/java/util/WeakHashMap.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/util/WeakHashMap.java	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,7 +119,7 @@
  * @param <K> the type of keys maintained by this map
  * @param <V> the type of mapped values
  *
- * @version	1.45, 05/05/07
+ * @version	1.46, 05/16/07
  * @author      Doug Lea
  * @author      Josh Bloch
  * @author	Mark Reinhold
@@ -144,14 +144,14 @@
     private static final int MAXIMUM_CAPACITY = 1 << 30;
 
     /**
-     * The load fast used when none specified in constructor.
+     * The load factor used when none specified in constructor.
      */
     private static final float DEFAULT_LOAD_FACTOR = 0.75f;
 
     /**
      * The table, resized as necessary. Length MUST Always be a power of two.
      */
-    private Entry[] table;
+    private Entry<K,V>[] table;
 
     /**
      * The number of key-value mappings contained in this weak hash map.
@@ -171,7 +171,7 @@
     /**
      * Reference queue for cleared WeakEntries
      */
-    private final ReferenceQueue<K> queue = new ReferenceQueue<K>();
+    private final ReferenceQueue<Object> queue = new ReferenceQueue<Object>();
 
     /**
      * The number of times this WeakHashMap has been structurally modified.
@@ -184,6 +184,11 @@
      */
     private volatile int modCount;
 
+    @SuppressWarnings("unchecked")
+    private Entry<K,V>[] newTable(int n) {
+	return (Entry<K,V>[]) new Entry[n];
+    }
+
     /**
      * Constructs a new, empty <tt>WeakHashMap</tt> with the given initial
      * capacity and the given load factor.
@@ -206,7 +211,7 @@
         int capacity = 1;
         while (capacity < initialCapacity)
             capacity <<= 1;
-        table = new Entry[capacity];
+        table = newTable(capacity);
         this.loadFactor = loadFactor;
         threshold = (int)(capacity * loadFactor);
     }
@@ -228,8 +233,8 @@
      */
     public WeakHashMap() {
         this.loadFactor = DEFAULT_LOAD_FACTOR;
-        threshold = (int)(DEFAULT_INITIAL_CAPACITY);
-        table = new Entry[DEFAULT_INITIAL_CAPACITY];
+        threshold = DEFAULT_INITIAL_CAPACITY;
+        table = newTable(DEFAULT_INITIAL_CAPACITY);
     }
 
     /**
@@ -259,14 +264,14 @@
      * Use NULL_KEY for key if it is null.
      */
     private static Object maskNull(Object key) {
-        return (key == null ? NULL_KEY : key);
+        return (key == null) ? NULL_KEY : key;
     }
 
     /**
      * Returns internal representation of null key back to caller as null.
      */
-    private static <K> K unmaskNull(Object key) {
-        return (K) (key == NULL_KEY ? null : key);
+    private static Object unmaskNull(Object key) {
+        return (key == NULL_KEY) ? null : key;
     }
 
     /**
@@ -288,35 +293,38 @@
      * Expunges stale entries from the table.
      */
     private void expungeStaleEntries() {
-	Entry<K,V> e;
-        while ( (e = (Entry<K,V>) queue.poll()) != null) {
-            int h = e.hash;
-            int i = indexFor(h, table.length);
+        for (Object x; (x = queue.poll()) != null; ) {
+	    synchronized (queue) {
+		@SuppressWarnings("unchecked")
+		    Entry<K,V> e = (Entry<K,V>) x;
+		int h = e.hash;
+		int i = indexFor(h, table.length);
 
-            Entry<K,V> prev = table[i];
-            Entry<K,V> p = prev;
-            while (p != null) {
-                Entry<K,V> next = p.next;
-                if (p == e) {
-                    if (prev == e)
-                        table[i] = next;
-                    else
-                        prev.next = next;
-                    e.next = null;  // Help GC
-                    e.value = null; //  "   "
-                    size--;
-                    break;
-                }
-                prev = p;
-                p = next;
-            }
-        }
+		Entry<K,V> prev = table[i];
+		Entry<K,V> p = prev;
+		while (p != null) {
+		    Entry<K,V> next = p.next;
+		    if (p == e) {
+			if (prev == e)
+			    table[i] = next;
+			else
+			    prev.next = next;
+			e.next = null;  // Help GC
+			e.value = null; //  "   "
+			size--;
+			break;
+		    }
+		    prev = p;
+		    p = next;
+		}
+	    }
+	}
     }
 
     /**
      * Returns the table after first expunging stale entries.
      */
-    private Entry[] getTable() {
+    private Entry<K,V>[] getTable() {
         expungeStaleEntries();
         return table;
     }
@@ -364,7 +372,7 @@
     public V get(Object key) {
         Object k = maskNull(key);
         int h = HashMap.hash(k.hashCode());
-        Entry[] tab = getTable();
+        Entry<K,V>[] tab = getTable();
         int index = indexFor(h, tab.length);
         Entry<K,V> e = tab[index];
         while (e != null) {
@@ -394,7 +402,7 @@
     Entry<K,V> getEntry(Object key) {
         Object k = maskNull(key);
         int h = HashMap.hash(k.hashCode());
-        Entry[] tab = getTable();
+        Entry<K,V>[] tab = getTable();
         int index = indexFor(h, tab.length);
         Entry<K,V> e = tab[index];
         while (e != null && !(e.hash == h && eq(k, e.get())))
@@ -415,9 +423,9 @@
      *         previously associated <tt>null</tt> with <tt>key</tt>.)
      */
     public V put(K key, V value) {
-        K k = (K) maskNull(key);
+        Object k = maskNull(key);
         int h = HashMap.hash(k.hashCode());
-        Entry[] tab = getTable();
+        Entry<K,V>[] tab = getTable();
         int i = indexFor(h, tab.length);
 
         for (Entry<K,V> e = tab[i]; e != null; e = e.next) {
@@ -452,14 +460,14 @@
      *        is irrelevant).
      */
     void resize(int newCapacity) {
-        Entry[] oldTable = getTable();
+        Entry<K,V>[] oldTable = getTable();
         int oldCapacity = oldTable.length;
         if (oldCapacity == MAXIMUM_CAPACITY) {
             threshold = Integer.MAX_VALUE;
             return;
         }
 
-        Entry[] newTable = new Entry[newCapacity];
+        Entry<K,V>[] newTable = newTable(newCapacity);
         transfer(oldTable, newTable);
         table = newTable;
 
@@ -478,7 +486,7 @@
     }
 
     /** Transfers all entries from src to dest tables */
-    private void transfer(Entry[] src, Entry[] dest) {
+    private void transfer(Entry<K,V>[] src, Entry<K,V>[] dest) {
         for (int j = 0; j < src.length; ++j) {
             Entry<K,V> e = src[j];
             src[j] = null;
@@ -559,7 +567,7 @@
     public V remove(Object key) {
         Object k = maskNull(key);
         int h = HashMap.hash(k.hashCode());
-        Entry[] tab = getTable();
+        Entry<K,V>[] tab = getTable();
         int i = indexFor(h, tab.length);
         Entry<K,V> prev = tab[i];
         Entry<K,V> e = prev;
@@ -588,7 +596,7 @@
     Entry<K,V> removeMapping(Object o) {
         if (!(o instanceof Map.Entry))
             return null;
-        Entry[] tab = getTable();
+        Entry<K,V>[] tab = getTable();
         Map.Entry entry = (Map.Entry)o;
         Object k = maskNull(entry.getKey());
         int h = HashMap.hash(k.hashCode());
@@ -625,7 +633,7 @@
             ;
 
         modCount++;
-        Entry[] tab = table;
+        Entry<K,V>[] tab = table;
         for (int i = 0; i < tab.length; ++i)
             tab[i] = null;
         size = 0;
@@ -649,7 +657,7 @@
 	if (value==null)
             return containsNullValue();
 
-	Entry[] tab = getTable();
+	Entry<K,V>[] tab = getTable();
         for (int i = tab.length ; i-- > 0 ;)
             for (Entry e = tab[i] ; e != null ; e = e.next)
                 if (value.equals(e.value))
@@ -661,7 +669,7 @@
      * Special-case code for containsValue with null argument
      */
     private boolean containsNullValue() {
-	Entry[] tab = getTable();
+	Entry<K,V>[] tab = getTable();
         for (int i = tab.length ; i-- > 0 ;)
             for (Entry e = tab[i] ; e != null ; e = e.next)
                 if (e.value==null)
@@ -673,7 +681,7 @@
      * The entries in this hash table extend WeakReference, using its main ref
      * field as the key.
      */
-    private static class Entry<K,V> extends WeakReference<K> implements Map.Entry<K,V> {
+    private static class Entry<K,V> extends WeakReference<Object> implements Map.Entry<K,V> {
         private V value;
         private final int hash;
         private Entry<K,V> next;
@@ -681,8 +689,8 @@
         /**
          * Creates new entry.
          */
-        Entry(K key, V value,
-	      ReferenceQueue<K> queue,
+        Entry(Object key, V value,
+	      ReferenceQueue<Object> queue,
               int hash, Entry<K,V> next) {
             super(key, queue);
             this.value = value;
@@ -690,8 +698,9 @@
             this.next  = next;
         }
 
-        public K getKey() {
-            return WeakHashMap.<K>unmaskNull(get());
+        @SuppressWarnings("unchecked")
+	public K getKey() {
+            return (K) WeakHashMap.unmaskNull(get());
         }
 
         public V getValue() {
@@ -754,7 +763,7 @@
         }
 
         public boolean hasNext() {
-            Entry[] t = table;
+            Entry<K,V>[] t = table;
 
             while (nextKey == null) {
                 Entry<K,V> e = entry;
--- a/j2se/src/share/classes/java/util/logging/FileHandler.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/util/logging/FileHandler.java	Fri May 25 00:49:14 2007 +0000
@@ -114,7 +114,7 @@
  * Note that the use of unique ids to avoid conflicts is only guaranteed
  * to work reliably when using a local disk file system.
  *
- * @version 1.42, 05/05/07
+ * @version 1.43, 05/09/07
  * @since 1.4
  */
 
@@ -128,7 +128,7 @@
     private FileOutputStream lockStream;
     private File files[];
     private static final int MAX_LOCKS = 100;
-    private static java.util.HashMap locks = new java.util.HashMap();
+    private static java.util.HashMap<String, String> locks = new java.util.HashMap<String, String>();
 
     // A metered stream is a subclass of OutputStream that
     //   (a) forwards all its output to a target stream
@@ -578,7 +578,7 @@
 	    // it is OK to write the target files, even if we are
 	    // currently being called from untrusted code.
             // So it is safe to raise privilege here.
-	    AccessController.doPrivileged(new PrivilegedAction() {
+	    AccessController.doPrivileged(new PrivilegedAction<Object>() {
 		public Object run() {
 		    rotate();
 		    return null;
--- a/j2se/src/share/classes/java/util/logging/Level.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/util/logging/Level.java	Fri May 25 00:49:14 2007 +0000
@@ -55,12 +55,12 @@
  * they maintain the Object uniqueness property across serialization
  * by defining a suitable readResolve method.
  *
- * @version 1.26, 05/05/07
+ * @version 1.27, 05/09/07
  * @since 1.4
  */
 
 public class Level implements java.io.Serializable {
-    private static java.util.ArrayList known = new java.util.ArrayList();
+    private static java.util.ArrayList<Level> known = new java.util.ArrayList<Level>();
     private static String defaultBundle = "sun.util.logging.resources.logging";
 
     /**
@@ -344,7 +344,7 @@
 	// in the current default locale.
 	// This is relatively expensive, but not excessively so.
 	for (int i = 0; i < known.size(); i++) {
-	    Level l = (Level) known.get(i);
+	    Level l =  known.get(i);
 	    if (name.equals(l.getLocalizedName())) {
 		return l;
 	    }
--- a/j2se/src/share/classes/java/util/logging/LogRecord.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/util/logging/LogRecord.java	Fri May 25 00:49:14 2007 +0000
@@ -60,14 +60,14 @@
  *
  * </ul>
  *
- * @version 1.30, 05/05/07
+ * @version 1.31, 05/09/07
  * @since 1.4
  */
 
 public class LogRecord implements java.io.Serializable {
     private static long globalSequenceNumber;
     private static int nextThreadId=10;
-    private static ThreadLocal threadIds = new ThreadLocal();
+    private static ThreadLocal<Integer> threadIds = new ThreadLocal<Integer>();
 
     /**
      * @serial Logging message level
@@ -147,7 +147,7 @@
 	// Assign a thread ID and a unique sequence number.
 	synchronized (LogRecord.class) {
 	    sequenceNumber = globalSequenceNumber++;
-	    Integer id = (Integer)threadIds.get();
+	    Integer id = threadIds.get();
 	    if (id == null) {
 		id = new Integer(nextThreadId++);
 		threadIds.set(id);
--- a/j2se/src/share/classes/java/util/logging/Logger.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/java/util/logging/Logger.java	Fri May 25 00:49:14 2007 +0000
@@ -156,7 +156,7 @@
  * All the other logging methods are implemented as calls on this
  * log(LogRecord) method.
  *
- * @version 1.56, 05/05/07
+ * @version 1.57, 05/09/07
  * @since 1.4
  */
 
@@ -166,7 +166,7 @@
     private static final int offValue = Level.OFF.intValue();
     private LogManager manager;
     private String name;
-    private ArrayList handlers;
+    private ArrayList<Handler> handlers;
     private String resourceBundleName;
     private boolean useParentHandlers = true;
     private Filter filter;
@@ -182,7 +182,7 @@
     // We keep weak references from parents to children, but strong
     // references from children to parents.
     private Logger parent;    // our nearest parent.
-    private ArrayList kids;   // WeakReferences to loggers that have us as parent
+    private ArrayList<WeakReference<Logger>> kids;   // WeakReferences to loggers that have us as parent
     private Level levelObject;
     private volatile int levelValue;  // current effective level value
 
@@ -1201,7 +1201,7 @@
 	handler.getClass();
 	checkAccess();
 	if (handlers == null) {
-	    handlers = new ArrayList();
+	    handlers = new ArrayList<Handler>();
 	}
 	handlers.add(handler);
     }
@@ -1405,9 +1405,9 @@
 	    // Remove ourself from any previous parent.
 	    if (parent != null) {
 		// assert parent.kids != null;
-		for (Iterator iter = parent.kids.iterator(); iter.hasNext(); ) {
-		    WeakReference ref = (WeakReference) iter.next();
-		    Logger kid = (Logger) ref.get();
+		for (Iterator<WeakReference<Logger>> iter = parent.kids.iterator(); iter.hasNext(); ) {
+		    WeakReference<Logger> ref =  iter.next();
+		    Logger kid =  ref.get();
 		    if (kid == this) {
 		        iter.remove();
 			break;
@@ -1419,9 +1419,9 @@
 	    // Set our new parent.
 	    parent = newParent;
 	    if (parent.kids == null) {
-	        parent.kids = new ArrayList(2);
+	        parent.kids = new ArrayList<WeakReference<Logger>>(2);
 	    }
-	    parent.kids.add(new WeakReference(this));
+	    parent.kids.add(new WeakReference<Logger>(this));
 
 	    // As a result of the reparenting, the effective level
 	    // may have changed for us and our children.
@@ -1461,8 +1461,8 @@
 	// Recursively update the level on each of our kids.
 	if (kids != null) {
 	    for (int i = 0; i < kids.size(); i++) {
-	        WeakReference ref = (WeakReference)kids.get(i);
-		Logger kid = (Logger) ref.get();
+	        WeakReference<Logger> ref = kids.get(i);
+		Logger kid =  ref.get();
 		if (kid != null) {
 		    kid.updateEffectiveLevel();
  		}
--- a/j2se/src/share/classes/javax/script/ScriptEngineManager.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/javax/script/ScriptEngineManager.java	Fri May 25 00:49:14 2007 +0000
@@ -93,7 +93,7 @@
         nameAssociations = new HashMap<String, ScriptEngineFactory>();
         extensionAssociations = new HashMap<String, ScriptEngineFactory>();
         mimeTypeAssociations = new HashMap<String, ScriptEngineFactory>();
-        AccessController.doPrivileged(new PrivilegedAction() {
+        AccessController.doPrivileged(new PrivilegedAction<Object>() {
             public Object run() {
                 initEngines(loader);
                 return null;
--- a/j2se/src/share/classes/sun/instrument/InstrumentationImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/instrument/InstrumentationImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -282,7 +282,7 @@
     // Enable or disable Java programming language access checks on a
     // reflected object (for example, a method)
     private static void setAccessible(final AccessibleObject ao, final boolean accessible) {
-        AccessController.doPrivileged(new PrivilegedAction() {
+        AccessController.doPrivileged(new PrivilegedAction<Object>() {
                 public Object run() {
                     ao.setAccessible(accessible);
                     return null;
@@ -297,7 +297,7 @@
             throws Throwable {
                                                                                                                          
         ClassLoader mainAppLoader   = ClassLoader.getSystemClassLoader();
-        Class       javaAgentClass  = mainAppLoader.loadClass(classname);
+        Class<?>    javaAgentClass  = mainAppLoader.loadClass(classname);
 
 	Method m = null;
 	NoSuchMethodException firstExc = null;
--- a/j2se/src/share/classes/sun/jvmstat/monitor/MonitoredHost.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/monitor/MonitoredHost.java	Fri May 25 00:49:14 2007 +0000
@@ -40,7 +40,7 @@
  * for event notification.
  *
  * @author Brian Doherty
- * @version 1.9, 05/05/07
+ * @version 1.10, 05/09/07
  * @since 1.5
  *
  * @see HostIdentifier
@@ -49,7 +49,8 @@
  * @see HostListener
  */
 public abstract class MonitoredHost {
-    private static Map monitoredHosts = new HashMap();
+    private static Map<HostIdentifier, MonitoredHost> monitoredHosts = 
+		new HashMap<HostIdentifier, MonitoredHost>();
 
     /*
      * The monitoring implementation override mechanism. The value of
@@ -185,7 +186,7 @@
         MonitoredHost mh = null;
 
         synchronized(monitoredHosts) {
-            mh = (MonitoredHost)monitoredHosts.get(hostId);
+            mh = monitoredHosts.get(hostId);
             if (mh != null) {
                 if (mh.isErrored()) {
                     monitoredHosts.remove(hostId);
@@ -205,7 +206,7 @@
 
         try {
             // run the constructor taking a single String parameter.
-            Class c = Class.forName(classname);
+            Class<?> c = Class.forName(classname);
 
             Constructor cons = c.getConstructor(
                 new Class[] { hostId.getClass() }
@@ -418,5 +419,5 @@
      *               with this MonitoredHost, or the empty set of none.
      * @throws MonitorException Thrown if monitoring errors occur.
      */
-    public abstract Set activeVms() throws MonitorException;
+    public abstract Set<Integer> activeVms() throws MonitorException;
 }
--- a/j2se/src/share/classes/sun/jvmstat/monitor/MonitoredVm.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/monitor/MonitoredVm.java	Fri May 25 00:49:14 2007 +0000
@@ -36,7 +36,7 @@
  * maintenance of the connection to the target.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public interface MonitoredVm {
@@ -75,7 +75,7 @@
      *
      * @param patternString a string containing a pattern as described in
      *                      {@link java.util.regex.Pattern}.
-     * @return List - a List of {@link Monitor} objects that can be used to
+     * @return List<Monitor> - a List of {@link Monitor} objects that can be used to
      *                monitor the instrumentation objects whose names match
      *                the given pattern. If no instrumentation objects have`
      *                names matching the given pattern, then an empty List
@@ -84,7 +84,7 @@
      *                          with the target Java Virtual Machine.
      * @see java.util.regex.Pattern
      */
-    List findByPattern(String patternString) throws MonitorException;
+    List<Monitor> findByPattern(String patternString) throws MonitorException;
 
     /**
      * Detach from target Java Virtual Machine.
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AbstractMonitoredVm.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AbstractMonitoredVm.java	Fri May 25 00:49:14 2007 +0000
@@ -39,7 +39,7 @@
  * mechanism to the target Java Virtual Machine.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public abstract class AbstractMonitoredVm implements BufferedMonitoredVm {
@@ -89,7 +89,7 @@
     /**
      * {@inheritDoc}
      */
-    public List findByPattern(String patternString) throws MonitorException {
+    public List<Monitor> findByPattern(String patternString) throws MonitorException {
         return pdb.findByPattern(patternString);
     }
 
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AbstractPerfDataBuffer.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AbstractPerfDataBuffer.java	Fri May 25 00:49:14 2007 +0000
@@ -39,7 +39,7 @@
  * to its contents.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public abstract class AbstractPerfDataBuffer {
@@ -109,7 +109,7 @@
      *
      * @param patternString  a string containing a pattern as described in
      *                       {@link java.util.regex.Pattern}.
-     * @return List - a List of {@link Monitor} objects that can be used to
+     * @return List<Monitor> - a List of {@link Monitor} objects that can be used to
      *                monitor the instrumentation objects whose names match
      *                the given pattern. If no instrumentation objects have`
      *                names matching the given pattern, then an empty List
@@ -118,7 +118,7 @@
      *                          with the target Java Virtual Machine.
      * @see java.util.regex.Pattern
      */
-    public List findByPattern(String patternString) throws MonitorException {
+    public List<Monitor> findByPattern(String patternString) throws MonitorException {
         return impl.findByPattern(patternString);
     }
 
@@ -169,7 +169,7 @@
                            + ".PerfDataBuffer";
 
         try {
-            Class implClass = Class.forName(classname);
+            Class<?> implClass = Class.forName(classname);
             Constructor cons = implClass.getConstructor(new Class[] {
                     Class.forName("java.nio.ByteBuffer"),
                     Integer.TYPE
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AliasFileParser.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/AliasFileParser.java	Fri May 25 00:49:14 2007 +0000
@@ -38,7 +38,7 @@
  *
  * Java style comments can occur anywhere within the file.
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class AliasFileParser {
@@ -120,7 +120,7 @@
     /**
      * method to parse the given input file.
      */
-    public void parse(Map map) throws SyntaxException, IOException {
+    public void parse(Map<String, ArrayList<String>> map) throws SyntaxException, IOException {
 
         if (inputfile == null) {
             return;
@@ -149,7 +149,7 @@
             String name = currentToken.sval;
             match(StreamTokenizer.TT_WORD);
 
-            ArrayList aliases = new ArrayList();
+            ArrayList<String> aliases = new ArrayList<String>();
 
             do {
                 aliases.add(currentToken.sval);
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/PerfDataBufferImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/PerfDataBufferImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -37,7 +37,7 @@
  * PerfData instrumentation buffer.
  *
  * @author Brian Doherty
- * @version 1.10, 05/05/07
+ * @version 1.11, 05/09/07
  * @since 1.5
  * @see AbstractPerfDataBuffer
  */
@@ -51,7 +51,7 @@
     /**
      * A Map of monitor objects found in the instrumentation buffer.
      */
-    protected Map monitors;
+    protected Map<String, Monitor> monitors;
 
     /**
      * The Local Java Virtual Machine Identifier for this buffer.
@@ -62,7 +62,7 @@
      * A Map of monitor object names to aliases as read in from the alias map
      * file.
      */
-    protected Map aliasMap;
+    protected Map<String, ArrayList<String>> aliasMap;
 
     /**
      * A cache of resolved monitor aliases.
@@ -80,8 +80,8 @@
     protected PerfDataBufferImpl(ByteBuffer buffer, int lvmid) {
         this.buffer = buffer;
         this.lvmid = lvmid;
-        this.monitors = new TreeMap();
-        this.aliasMap = new HashMap();
+        this.monitors = new TreeMap<String, Monitor>();
+        this.aliasMap = new HashMap<String, ArrayList<String>>();
         this.aliasCache = new HashMap();
     }
 
@@ -203,7 +203,7 @@
 
         Monitor  m = (Monitor)aliasCache.get(name);
         if (m == null) {
-            ArrayList al = (ArrayList) aliasMap.get(name);
+            ArrayList al = aliasMap.get(name);
             if (al != null) {
                 for (Iterator i = al.iterator(); i.hasNext() && m == null; ) {
                     String alias = (String)i.next();
@@ -244,11 +244,11 @@
             }
 
             // look for the requested monitor
-            m = (Monitor)monitors.get(name);
+            m = monitors.get(name);
             if (m == null) {
                 // not found - load any new monitors, and try again.
                 getNewMonitors(monitors);
-                m = (Monitor)monitors.get(name);
+                m = monitors.get(name);
             }
             if (m == null) {
                 // still not found, look for aliases
@@ -266,7 +266,7 @@
      *
      * @param patternString a string containing a pattern as described in
      *                      {@link java.util.regex.Pattern}.
-     * @return List - a List of {@link Monitor} objects that can be used to
+     * @return List<Monitor> - a List of {@link Monitor} objects that can be used to
      *                monitor the instrumentation objects whose names match
      *                the given pattern. If no instrumentation objects have`
      *                names matching the given pattern, then an empty List
@@ -275,7 +275,7 @@
      *                          with the target Java Virtual Machine.
      * @see java.util.regex.Pattern
      */
-    public List findByPattern(String patternString)
+    public List<Monitor> findByPattern(String patternString)
                 throws MonitorException, PatternSyntaxException {
 
         synchronized(this) {
@@ -288,7 +288,7 @@
 
         Pattern pattern = Pattern.compile(patternString);
         Matcher matcher = pattern.matcher("");
-        List matches = new ArrayList();
+        List<Monitor> matches = new ArrayList<Monitor>();
 
         Set monitorSet = monitors.entrySet();
 
@@ -334,7 +334,7 @@
      * @throws MonitorException Thrown if communications errors occur
      *                          while communicating with the target.
      */
-    protected abstract MonitorStatus getMonitorStatus(Map m)
+    protected abstract MonitorStatus getMonitorStatus(Map<String, Monitor> m)
                                      throws MonitorException;
 
     /**
@@ -344,7 +344,7 @@
      * @throws MonitorException Thrown if communications errors occur
      *                          while communicating with the target.
      */
-    protected abstract void buildMonitorMap(Map m) throws MonitorException;
+    protected abstract void buildMonitorMap(Map<String, Monitor> m) throws MonitorException;
 
     /**
      * get the new Monitor objects from the Map of Monitor objects.
@@ -353,5 +353,5 @@
      * @throws MonitorException Thrown if communications errors occur
      *                          while communicating with the target.
      */
-    protected abstract void getNewMonitors(Map m) throws MonitorException;
+    protected abstract void getNewMonitors(Map<String, Monitor> m) throws MonitorException;
 }
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/file/MonitoredHostProvider.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/file/MonitoredHostProvider.java	Fri May 25 00:49:14 2007 +0000
@@ -36,7 +36,7 @@
  * <em>file:</em> protocol of the HotSpot PerfData monitoring implementation.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class MonitoredHostProvider extends MonitoredHost {
@@ -111,7 +111,7 @@
      * notion of tracking active or inactive Java Virtual Machines. This
      * method currently returns an empty set.
      */
-    public Set activeVms() {
-        return new HashSet(0);
+    public Set<Integer> activeVms() {
+        return new HashSet<Integer>(0);
     }
 }
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/LocalMonitoredVm.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/LocalMonitoredVm.java	Fri May 25 00:49:14 2007 +0000
@@ -42,7 +42,7 @@
  * mechanism.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class LocalMonitoredVm extends AbstractMonitoredVm {
@@ -50,7 +50,7 @@
     /**
      * List of registered listeners.
      */
-    private ArrayList listeners;
+    private ArrayList<VmListener> listeners;
 
     /**
      * Task performing listener notification.
@@ -67,7 +67,7 @@
            throws MonitorException {
         super(vmid, interval);
         this.pdb = new PerfDataBuffer(vmid);
-        listeners = new ArrayList();
+        listeners = new ArrayList<VmListener>();
     }
 
     /**
@@ -167,14 +167,13 @@
      */
     void fireMonitorsUpdatedEvents() {
         VmEvent ev = null;
-        ArrayList registered = null;
+        ArrayList<VmListener> registered = null;
 
         synchronized (listeners) {
-            registered = (ArrayList)listeners.clone();
+            registered = cast(listeners.clone());
         }
 
-        for (Iterator i = registered.iterator(); i.hasNext(); /* empty */) {
-            VmListener l = (VmListener)i.next();
+        for (VmListener l :  registered) {
             // lazily create the event object;
             if (ev == null) {
                 ev = new VmEvent(this);
@@ -207,4 +206,9 @@
             }
         }
     }
+    // Suppress unchecked cast warning msg.
+    @SuppressWarnings("unchecked")
+    static <T> T cast(Object x) {
+	return (T) x;
+    }
 }
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/LocalVmManager.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/LocalVmManager.java	Fri May 25 00:49:14 2007 +0000
@@ -41,7 +41,7 @@
  * principal running this JVM.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class LocalVmManager {
@@ -126,14 +126,14 @@
      *
      * @return Set - the Set of monitorable Java Virtual Machines
      */
-    public synchronized Set activeVms() {
+    public synchronized Set<Integer> activeVms() {
         /*
          * This method is synchronized because the Matcher object used by
          * fileFilter is not safe for concurrent use, and this method is
          * called by multiple threads. Before this method was synchronized,
          * we'd see strange file names being matched by the matcher.
          */
-        Set jvmSet = new HashSet();
+        Set<Integer> jvmSet = new HashSet<Integer>();
 
         if (! tmpdir.isDirectory()) {
             return jvmSet;
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/MonitoredHostProvider.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/MonitoredHostProvider.java	Fri May 25 00:49:14 2007 +0000
@@ -36,15 +36,15 @@
  * <em>local</em> protocol of the HotSpot PerfData monitoring implementation.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class MonitoredHostProvider extends MonitoredHost {
     private static final int DEFAULT_POLLING_INTERVAL = 1000;
 
-    private ArrayList listeners;
+    private ArrayList<HostListener> listeners;
     private NotifierTask task;
-    private HashSet activeVms;
+    private HashSet<Integer> activeVms;
     private LocalVmManager vmManager;
 
     /**
@@ -54,9 +54,9 @@
      */
     public MonitoredHostProvider(HostIdentifier hostId) {
         this.hostId = hostId;
-        this.listeners = new ArrayList();
+        this.listeners = new ArrayList<HostListener>();
         this.interval = DEFAULT_POLLING_INTERVAL;
-        this.activeVms = new HashSet();
+        this.activeVms = new HashSet<Integer>();
         this.vmManager = new LocalVmManager();
     }
 
@@ -147,7 +147,7 @@
     /**
      * {@inheritDoc}
      */
-    public Set activeVms() {
+    public Set<Integer> activeVms() {
         return vmManager.activeVms();
     }
 
@@ -190,13 +190,13 @@
             Set lastActiveVms = activeVms;
 
             // get the current set of active JVMs
-            activeVms = (HashSet)vmManager.activeVms();
+            activeVms = (HashSet<Integer>)vmManager.activeVms();
 
             if (activeVms.isEmpty()) {
                 return;
             }
-            Set startedVms = new HashSet();
-            Set terminatedVms = new HashSet();
+            Set<Integer> startedVms = new HashSet<Integer>();
+            Set<Object> terminatedVms = new HashSet<Object>();
 
             for (Iterator i = activeVms.iterator(); i.hasNext(); /* empty */) {
                 Integer vmid = (Integer)i.next();
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/PerfDataBuffer.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/local/PerfDataBuffer.java	Fri May 25 00:49:14 2007 +0000
@@ -43,12 +43,13 @@
  * instrumentation buffer for the target HotSpot Java Virtual Machine.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
+// Suppreess unchecked conversion warning at line 34.
+//@SuppressWarnings("unchecked")
 public class PerfDataBuffer extends AbstractPerfDataBuffer {
-    private static final Perf perf =
-            (Perf)AccessController.doPrivileged(new Perf.GetPerfAction());
+    private static final Perf perf = AccessController.doPrivileged(new Perf.GetPerfAction());
 
     /**
      * Create a PerfDataBuffer instance for accessing the specified
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/MonitoredHostProvider.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/MonitoredHostProvider.java	Fri May 25 00:49:14 2007 +0000
@@ -40,16 +40,16 @@
  * <em>rmi</em> protocol of the HotSpot PerfData monitoring implementation.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class MonitoredHostProvider extends MonitoredHost {
     private static final String serverName = "/JStatRemoteHost";
     private static final int DEFAULT_POLLING_INTERVAL = 1000;
 
-    private ArrayList listeners;
+    private ArrayList<HostListener> listeners;
     private NotifierTask task;
-    private HashSet activeVms;
+    private HashSet<Integer> activeVms;
     private RemoteVmManager vmManager;
     private RemoteHost remoteHost;
     private Timer timer;
@@ -64,9 +64,9 @@
     public MonitoredHostProvider(HostIdentifier hostId)
            throws MonitorException {
         this.hostId = hostId;
-        this.listeners = new ArrayList();
+        this.listeners = new ArrayList<HostListener>();
         this.interval = DEFAULT_POLLING_INTERVAL;
-        this.activeVms = new HashSet();
+        this.activeVms = new HashSet<Integer>();
 
         String rmiName;
         String sn = serverName;
@@ -231,7 +231,7 @@
     /**
      * {@inheritDoc}
      */
-    public Set activeVms() throws MonitorException {
+    public Set<Integer> activeVms() throws MonitorException {
         return vmManager.activeVms();
     }
 
@@ -297,7 +297,7 @@
 
             try {
                 // get the current set of active JVMs
-                activeVms = (HashSet)vmManager.activeVms();
+                activeVms = (HashSet<Integer>)vmManager.activeVms();
 
             } catch (MonitorException e) {
                 // XXX: use logging api
@@ -314,8 +314,8 @@
                 return;
             }
 
-            Set startedVms = new HashSet();
-            Set terminatedVms = new HashSet();
+            Set<Integer> startedVms = new HashSet<Integer>();
+            Set<Object> terminatedVms = new HashSet<Object>();
 
             for (Iterator i = activeVms.iterator(); i.hasNext(); /* empty */ ) {
                 Integer vmid = (Integer)i.next();
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/RemoteMonitoredVm.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/RemoteMonitoredVm.java	Fri May 25 00:49:14 2007 +0000
@@ -43,12 +43,12 @@
  * of a live, remote target Java Virtual Machine through an RMI server.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class RemoteMonitoredVm extends AbstractMonitoredVm {
 
-    private ArrayList listeners;
+    private ArrayList<VmListener> listeners;
     private NotifierTask notifierTask;
     private SamplerTask samplerTask;
     private Timer timer;
@@ -70,7 +70,7 @@
         super(vmid, interval);
         this.rvm = rvm;
         pdb = new PerfDataBuffer(rvm, vmid.getLocalVmId());
-        this.listeners = new ArrayList();
+        this.listeners = new ArrayList<VmListener>();
         this.timer = timer;
     }
 
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/RemoteVmManager.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/protocol/rmi/RemoteVmManager.java	Fri May 25 00:49:14 2007 +0000
@@ -43,7 +43,7 @@
  * principal running the RMI server application on the remote host.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class RemoteVmManager {
@@ -93,7 +93,7 @@
      *
      * @return Set - the Set of monitorable Java Virtual Machines
      */
-    public Set activeVms() throws MonitorException {
+    public Set<Integer> activeVms() throws MonitorException {
         int[] active = null;
 
         try {
@@ -104,7 +104,7 @@
                                        + e.getMessage(), e);
         }
 
-        Set activeSet = new HashSet(active.length);
+        Set<Integer> activeSet = new HashSet<Integer>(active.length);
 
         for (int i = 0; i < active.length; i++) {
             activeSet.add(new Integer(active[i]));
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/v1_0/PerfDataBuffer.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/v1_0/PerfDataBuffer.java	Fri May 25 00:49:14 2007 +0000
@@ -41,7 +41,7 @@
  * memory buffer.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  * @see AbstractPerfDataBuffer
  */
@@ -79,7 +79,7 @@
     long lastModificationTime;
     int lastUsed;
     IntegerMonitor overflow;
-    ArrayList insertedMonitors;
+    ArrayList<Monitor> insertedMonitors;
 
     /**
      * Construct a PerfDataBufferImpl instance.
@@ -102,7 +102,7 @@
     /**
      * {@inheritDoc}
      */
-    protected void buildMonitorMap(Map map) throws MonitorException {
+    protected void buildMonitorMap(Map<String, Monitor> map) throws MonitorException {
         assert Thread.holdsLock(this);
 
         // start at the beginning of the buffer
@@ -142,13 +142,13 @@
         // work around 1.4.2 counter inititization bugs
         kludge(map);
 
-        insertedMonitors = new ArrayList(map.values());
+        insertedMonitors = new ArrayList<Monitor>(map.values());
     }
 
     /**
      * {@inheritDoc}
      */
-    protected void getNewMonitors(Map map) throws MonitorException {
+    protected void getNewMonitors(Map<String, Monitor> map) throws MonitorException {
         assert Thread.holdsLock(this);
 
         int used = prologue.getUsed();
@@ -185,7 +185,7 @@
     /**
      * {@inheritDoc}
      */
-    protected MonitorStatus getMonitorStatus(Map map) throws MonitorException {
+    protected MonitorStatus getMonitorStatus(Map<String, Monitor> map) throws MonitorException {
         assert Thread.holdsLock(this);
         assert insertedMonitors != null;
 
@@ -196,14 +196,14 @@
         ArrayList removed = EMPTY_LIST;
         ArrayList inserted = insertedMonitors;
 
-        insertedMonitors = new ArrayList();
+        insertedMonitors = new ArrayList<Monitor>();
         return new MonitorStatus(inserted, removed);
     }
 
     /**
      * Build the pseudo monitors used to map the prolog data into counters.
      */
-    protected void buildPseudoMonitors(Map map) {
+    protected void buildPseudoMonitors(Map<String, Monitor> map) {
         Monitor monitor = null;
         String name = null;
         IntBuffer ib = null;
@@ -256,7 +256,7 @@
      * starts scheduling tasks, which is the last thing done in vm
      * initialization.
      */
-    protected void synchWithTarget(Map map) throws MonitorException {
+    protected void synchWithTarget(Map<String, Monitor> map) throws MonitorException {
         /*
          * synch must happen with syncWaitMs from now. Default is 5 seconds,
          * which is reasonabally generous and should provide for extreme
@@ -290,14 +290,14 @@
      * the given name. The polling period is bounded by the timeLimit
      * argument.
      */
-    protected Monitor pollFor(Map map, String name, long timeLimit)
+    protected Monitor pollFor(Map<String, Monitor> map, String name, long timeLimit)
                       throws MonitorException {
         Monitor monitor = null;
 
         log("polling for: " + lvmid + "," + name + " ");
 
         pollForEntry = nextEntry;
-        while ((monitor = (Monitor)map.get(name)) == null) {
+        while ((monitor = map.get(name)) == null) {
             log(".");
 
             try { Thread.sleep(20); } catch (InterruptedException e) { }
@@ -320,7 +320,7 @@
      * method depends on the availability of certain counters, which
      * is generally guaranteed by the synchWithTarget() method.
      */
-    protected void kludge(Map map) {
+    protected void kludge(Map<String, Monitor> map) {
         if (Boolean.getBoolean("sun.jvmstat.perfdata.disableKludge")) {
             // bypass all kludges
             return;
@@ -359,7 +359,7 @@
      * is set. This bug couldn't be fixed for 1.4.2 FCS due to putback
      * restrictions.
      */
-    private void kludgeMantis(Map map, StringMonitor args) {
+    private void kludgeMantis(Map<String, Monitor> map, StringMonitor args) {
         /*
          * the HotSpot 1.4.2 JVM with the +UseParallelGC option along
          * with its default +UseAdaptiveSizePolicy option has a bug with
--- a/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/v2_0/PerfDataBuffer.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/jvmstat/perfdata/monitor/v2_0/PerfDataBuffer.java	Fri May 25 00:49:14 2007 +0000
@@ -59,7 +59,7 @@
  * </pre>
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  * @see AbstractPerfDataBuffer
  */
@@ -97,7 +97,7 @@
     int nextEntry;
     long lastNumEntries;
     IntegerMonitor overflow;
-    ArrayList insertedMonitors;
+    ArrayList<Monitor> insertedMonitors;
 
     /**
      * Construct a PerfDataBuffer instance.
@@ -120,7 +120,7 @@
     /**
      * {@inheritDoc}
      */
-    protected void buildMonitorMap(Map map) throws MonitorException {
+    protected void buildMonitorMap(Map<String, Monitor>  map) throws MonitorException {
         assert Thread.holdsLock(this);
 
         // start at the beginning of the buffer
@@ -160,13 +160,13 @@
         lastNumEntries = numEntries;
 
         // keep track of the monitors just added.
-        insertedMonitors = new ArrayList(map.values());
+        insertedMonitors = new ArrayList<Monitor>(map.values());
     }
 
     /**
      * {@inheritDoc}
      */
-    protected void getNewMonitors(Map map) throws MonitorException {
+    protected void getNewMonitors(Map<String, Monitor> map) throws MonitorException {
         assert Thread.holdsLock(this);
 
         int numEntries = prologue.getNumEntries();
@@ -193,7 +193,7 @@
     /**
      * {@inheritDoc}
      */
-    protected MonitorStatus getMonitorStatus(Map map) throws MonitorException {
+    protected MonitorStatus getMonitorStatus(Map<String, Monitor> map) throws MonitorException {
         assert Thread.holdsLock(this);
         assert insertedMonitors != null;
 
@@ -204,14 +204,14 @@
         ArrayList removed = EMPTY_LIST;
         ArrayList inserted = insertedMonitors;
 
-        insertedMonitors = new ArrayList();
+        insertedMonitors = new ArrayList<Monitor>();
         return new MonitorStatus(inserted, removed);
     }
 
     /**
      * Build the pseudo monitors used to map the prolog data into counters.
      */
-    protected void buildPseudoMonitors(Map map) {
+    protected void buildPseudoMonitors(Map<String, Monitor> map) {
         Monitor monitor = null;
         String name = null;
         IntBuffer ib = null;
@@ -521,14 +521,14 @@
     /**
      * Method to dump debugging information
      */
-    private void dumpAll(Map map, int lvmid) {
+    private void dumpAll(Map<String, Monitor> map, int lvmid) {
         if (DEBUG) {
-            Set keys = map.keySet();
+            Set<String> keys = map.keySet();
 
             System.err.println("Dump for " + lvmid);
             int j = 0;
             for (Iterator i = keys.iterator(); i.hasNext(); j++) {
-                Monitor monitor = (Monitor)map.get(i.next());
+                Monitor monitor = map.get(i.next());
                 System.err.println(j + "\t" + monitor.getName()
                                    + "=" + monitor.getValue());
             }
--- a/j2se/src/share/classes/sun/management/Agent.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/Agent.java	Fri May 25 00:49:14 2007 +0000
@@ -275,8 +275,8 @@
             try {
                 // Instantiate the named class. 
                 // invoke the premain(String args) method
-                Class clz = ClassLoader.getSystemClassLoader().loadClass(cname);
-                Method premain = clz.getMethod("premain",
+                Class<?> clz = ClassLoader.getSystemClassLoader().loadClass(cname);
+                Method premain = clz.getMethod("premain", 
                                                new Class[] { String.class });
                 premain.invoke(null, /* static */
                                new Object[] { args });
--- a/j2se/src/share/classes/sun/management/GcInfoCompositeData.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/GcInfoCompositeData.java	Fri May 25 00:49:14 2007 +0000
@@ -183,8 +183,7 @@
             getMemoryUsageBeforeGc(CompositeData cd) {
         try {
             TabularData td = (TabularData) cd.get(MEMORY_USAGE_BEFORE_GC);
-            return (Map<String,MemoryUsage>)
-                memoryUsageMapType.toJavaTypeData(td);
+            return cast(memoryUsageMapType.toJavaTypeData(td));
         } catch (InvalidObjectException e) {
             // Should never reach here
             throw Util.newAssertionError(e);
@@ -193,12 +192,17 @@
             throw Util.newAssertionError(e);
         }
     }
+
+    @SuppressWarnings("unchecked") 
+    public static Map<String, MemoryUsage> cast(Object x) {
+	return (Map<String, MemoryUsage>) x;
+    }
     public static Map<String, MemoryUsage> 
             getMemoryUsageAfterGc(CompositeData cd) {
         try {
             TabularData td = (TabularData) cd.get(MEMORY_USAGE_AFTER_GC);
-            return (Map<String,MemoryUsage>)
-                memoryUsageMapType.toJavaTypeData(td);
+            //return (Map<String,MemoryUsage>)
+            return cast(memoryUsageMapType.toJavaTypeData(td));
         } catch (InvalidObjectException e) {
             // Should never reach here
             throw Util.newAssertionError(e);
--- a/j2se/src/share/classes/sun/management/HotspotClassLoading.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/HotspotClassLoading.java	Fri May 25 00:49:14 2007 +0000
@@ -25,6 +25,8 @@
 
 package sun.management;
 
+import sun.management.counter.*;
+
 /**
  * Implementation class of HotspotClassLoadingMBean interface.
  *
@@ -78,7 +80,7 @@
     private static final String CLS_COUNTER_NAME_PATTERN =
         JAVA_CLS + "|" + COM_SUN_CLS + "|" + SUN_CLS;
 
-    public java.util.List getInternalClassLoadingCounters() {
+    public java.util.List<Counter> getInternalClassLoadingCounters() {
         return jvm.getInternalCounters(CLS_COUNTER_NAME_PATTERN);
     }
 }
--- a/j2se/src/share/classes/sun/management/HotspotCompilation.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/HotspotCompilation.java	Fri May 25 00:49:14 2007 +0000
@@ -112,7 +112,7 @@
     private CompilerThreadInfo[] threads;
     private int numActiveThreads; // number of active compiler threads
 
-    private Map counters; 
+    private Map<String, Counter> counters; 
     private Counter lookup(String name) {
         Counter c = null;
 
@@ -137,7 +137,7 @@
     private void initCompilerCounters() {
         // Build a tree map of the current list of performance counters 
         ListIterator iter = getInternalCompilerCounters().listIterator(); 
-        counters = new TreeMap();
+        counters = new TreeMap<String, Counter>();
         while (iter.hasNext()) {
             Counter c = (Counter) iter.next();
             counters.put(c.getName(), c);
@@ -199,8 +199,8 @@
         return nmethodSize.longValue();
     }
 
-    public java.util.List getCompilerThreadStats() {
-        List list = new ArrayList(threads.length);
+    public java.util.List<CompilerThreadStat> getCompilerThreadStats() {
+        List<CompilerThreadStat> list = new ArrayList<CompilerThreadStat>(threads.length);
         int i = 0;
         if (threads[0] == null) {
             // no adaptor thread
@@ -230,7 +230,7 @@
                               -1);
     }
 
-    public java.util.List getInternalCompilerCounters() {
+    public java.util.List<Counter> getInternalCompilerCounters() {
         return jvm.getInternalCounters(CI_COUNTER_NAME_PATTERN); 
     } 
 }
--- a/j2se/src/share/classes/sun/management/HotspotMemory.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/HotspotMemory.java	Fri May 25 00:49:14 2007 +0000
@@ -54,7 +54,7 @@
     private static final String GC_COUNTER_NAME_PATTERN =
         JAVA_GC + "|" + COM_SUN_GC + "|" + SUN_GC;
 
-    public java.util.List getInternalMemoryCounters() {
+    public java.util.List<Counter> getInternalMemoryCounters() {
         return jvm.getInternalCounters(GC_COUNTER_NAME_PATTERN); 
     } 
 }
--- a/j2se/src/share/classes/sun/management/HotspotRuntime.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/HotspotRuntime.java	Fri May 25 00:49:14 2007 +0000
@@ -27,6 +27,8 @@
 
 import java.util.List;
 import java.util.ArrayList;
+import sun.management.counter.Counter;
+
 
 /**
  * Implementation class of HotspotRuntimeMBean interface.
@@ -69,7 +71,7 @@
         JAVA_RT + "|" + COM_SUN_RT + "|" + SUN_RT + "|" +
         JAVA_PROPERTY + "|" + COM_SUN_PROPERTY + "|" + SUN_PROPERTY;
 
-    public java.util.List getInternalRuntimeCounters() {
+    public java.util.List<Counter> getInternalRuntimeCounters() {
         return jvm.getInternalCounters(RT_COUNTER_NAME_PATTERN); 
     } 
 }
--- a/j2se/src/share/classes/sun/management/HotspotThread.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/HotspotThread.java	Fri May 25 00:49:14 2007 +0000
@@ -27,6 +27,8 @@
 
 import java.util.Map;
 import java.util.HashMap;
+import sun.management.counter.Counter;
+
 
 /**
  * Implementation class of HotspotThreadMBean interface.
@@ -48,15 +50,15 @@
 
     public native int getInternalThreadCount();
 
-    public Map getInternalThreadCpuTimes() {
+    public Map<String, Long> getInternalThreadCpuTimes() {
         int count = getInternalThreadCount();
         if (count == 0) {
-            return java.util.Collections.EMPTY_MAP;
+            return java.util.Collections.emptyMap(); 
         }
         String[] names = new String[count];
         long[] times = new long[count];
         int numThreads = getInternalThreadTimes0(names, times);
-        Map result = new HashMap(numThreads);
+        Map<String, Long> result = new HashMap<String, Long>(numThreads);
         for (int i = 0; i < numThreads; i++) {
             result.put(names[i], new Long(times[i]));
         }
@@ -71,7 +73,7 @@
     private static final String THREADS_COUNTER_NAME_PATTERN =
         JAVA_THREADS + "|" + COM_SUN_THREADS + "|" + SUN_THREADS;
 
-    public java.util.List getInternalThreadingCounters() {
+    public java.util.List<Counter> getInternalThreadingCounters() {
         return jvm.getInternalCounters(THREADS_COUNTER_NAME_PATTERN);
     }
 }
--- a/j2se/src/share/classes/sun/management/ManagementFactory.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/ManagementFactory.java	Fri May 25 00:49:14 2007 +0000
@@ -123,7 +123,7 @@
 
     public static List<MemoryPoolMXBean> getMemoryPoolMXBeans() {
         MemoryPoolMXBean[] pools = MemoryImpl.getMemoryPools();
-        List list = new ArrayList(pools.length);
+        List<MemoryPoolMXBean> list = new ArrayList<MemoryPoolMXBean>(pools.length);
         for (int i = 0; i < pools.length; i++) {
             MemoryPoolMXBean p = pools[i];
             list.add(p);
@@ -133,7 +133,7 @@
 
     public static List<MemoryManagerMXBean> getMemoryManagerMXBeans() {
         MemoryManagerMXBean[]  mgrs = MemoryImpl.getMemoryManagers();
-        List result = new ArrayList(mgrs.length);
+        List<MemoryManagerMXBean> result = new ArrayList<MemoryManagerMXBean>(mgrs.length);
         for (int i = 0; i < mgrs.length; i++) {
             MemoryManagerMXBean m = mgrs[i];
             result.add(m);
@@ -143,7 +143,7 @@
 
     public static List<GarbageCollectorMXBean> getGarbageCollectorMXBeans() {
         MemoryManagerMXBean[]  mgrs = MemoryImpl.getMemoryManagers();
-        List result = new ArrayList(mgrs.length);
+        List<GarbageCollectorMXBean> result = new ArrayList<GarbageCollectorMXBean>(mgrs.length);
         for (int i = 0; i < mgrs.length; i++) {
             if (mgrs[i] instanceof GarbageCollectorMXBean) {
                 GarbageCollectorMXBean gc = (GarbageCollectorMXBean) mgrs[i];
@@ -288,7 +288,7 @@
             final MBeanServer mbs0 = mbs;
             final Object mbean0 = mbean;
             final boolean ignore = ignoreConflicts;
-            AccessController.doPrivileged(new PrivilegedExceptionAction() {
+            AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
                 public Object run() throws InstanceAlreadyExistsException,
                                            MBeanRegistrationException,
                                            NotCompliantMBeanException {
@@ -409,7 +409,7 @@
 
             // inner class requires these fields to be final
             final MBeanServer mbs0 = mbs;
-            AccessController.doPrivileged(new PrivilegedExceptionAction() {
+            AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
                 public Object run() throws MBeanRegistrationException,
                                            RuntimeOperationsException  {
                     try {
--- a/j2se/src/share/classes/sun/management/MappedMXBeanType.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/MappedMXBeanType.java	Fri May 25 00:49:14 2007 +0000
@@ -57,6 +57,9 @@
  *
  * OpenDataException will be thrown if a Java type is not supported.
  */
+// Suppress unchecked cast warnings at line 442, 523 and 546 
+// Suppress unchecked calls at line 235, 284, 380 and 430. 
+@SuppressWarnings("unchecked")
 public abstract class MappedMXBeanType {
     private static final WeakHashMap<Type,MappedMXBeanType> convertedTypes =
         new WeakHashMap<Type,MappedMXBeanType>();
@@ -557,7 +560,7 @@
 
             final TabularData td = (TabularData) data;
  
-            Map result = new HashMap();
+            Map<Object, Object> result = new HashMap<Object, Object>();
             for (CompositeData row : (Collection<CompositeData>) td.values()) {
                 Object key = keyType.toJavaTypeData(row.get(KEY));
                 Object value = valueType.toJavaTypeData(row.get(VALUE));
@@ -567,7 +570,7 @@
         }
     }
 
-    private static final Class COMPOSITE_DATA_CLASS = 
+    private static final Class<?> COMPOSITE_DATA_CLASS = 
         javax.management.openmbean.CompositeData.class;
 
     // Classes that have a static from method
@@ -600,7 +603,7 @@
     //   its element type is determined as described above. 
     //
     static class CompositeDataMXBeanType extends MappedMXBeanType {
-        final Class javaClass;
+        final Class<?> javaClass;
         final boolean isCompositeData;
         Method fromMethod = null;
 
@@ -610,9 +613,8 @@
 
             // check if a static from method exists
             try {
-                fromMethod = (Method)
-                    AccessController.doPrivileged(new PrivilegedExceptionAction() {
-                        public Object run() throws NoSuchMethodException {
+                fromMethod = AccessController.doPrivileged(new PrivilegedExceptionAction<Method>() {
+                        public Method run() throws NoSuchMethodException {
                             return javaClass.getMethod("from", COMPOSITE_DATA_CLASS);
                         }
                     });
@@ -631,9 +633,9 @@
                 this.isCompositeData = false;
 
                 // Make a CompositeData containing all the getters
-                final Method[] methods = (Method[])
-                    AccessController.doPrivileged(new PrivilegedAction() {
-                        public Object run() {
+                final Method[] methods = 
+                    AccessController.doPrivileged(new PrivilegedAction<Method[]>() {
+                        public Method[] run() {
                             return javaClass.getMethods();
                         }
                     });
--- a/j2se/src/share/classes/sun/management/MemoryPoolImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/MemoryPoolImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -173,6 +173,11 @@
                 "Usage threshold is not supported");
         }
 
+        // return false if usage threshold crossing checking is disabled
+        if (usageThreshold == 0) {
+            return false;
+        }
+
         MemoryUsage u = getUsage0();
         return (u.getUsed() >= usageThreshold || 
                 usageSensor.isOn());
@@ -237,6 +242,11 @@
                 "CollectionUsage threshold is not supported");
         }
 
+        // return false if usage threshold crossing checking is disabled
+        if (collectionThreshold == 0) {
+            return false;
+        }
+
         MemoryUsage u = getCollectionUsage0();
         return (gcSensor.isOn() ||
                 (u != null && u.getUsed() >= collectionThreshold)); 
--- a/j2se/src/share/classes/sun/management/MonitorInfoCompositeData.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/MonitorInfoCompositeData.java	Fri May 25 00:49:14 2007 +0000
@@ -91,7 +91,7 @@
 	try {
 	    monitorInfoCompositeType = (CompositeType)
 		MappedMXBeanType.toOpenType(MonitorInfo.class);
-            Set s = monitorInfoCompositeType.keySet();
+            Set<String> s = monitorInfoCompositeType.keySet();
             monitorInfoItemNames = (String[]) s.toArray(new String[0]);
 	} catch (OpenDataException e) {
             // Should never reach here
--- a/j2se/src/share/classes/sun/management/NotificationEmitterSupport.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/NotificationEmitterSupport.java	Fri May 25 00:49:14 2007 +0000
@@ -71,7 +71,7 @@
 	   efficient solution would be to clone the listener list
 	   every time a notification is sent.  */
 	synchronized (listenerLock) {
-	    List newList = new ArrayList(listenerList.size() + 1);
+	    List<ListenerInfo> newList = new ArrayList<ListenerInfo>(listenerList.size() + 1);
 	    newList.addAll(listenerList);
 	    newList.add(new ListenerInfo(listener, filter, handback));
 	    listenerList = newList;
@@ -82,7 +82,7 @@
         throws ListenerNotFoundException {
 
 	synchronized (listenerLock) {
-	    List newList = new ArrayList(listenerList);
+	    List<ListenerInfo> newList = new ArrayList<ListenerInfo>(listenerList);
 	    /* We scan the list of listeners in reverse order because
 	       in forward order we would have to repeat the loop with
 	       the same index after a remove.  */
@@ -106,7 +106,7 @@
 	boolean found = false;
 
 	synchronized (listenerLock) {
-	    List newList = new ArrayList(listenerList);
+	    List<ListenerInfo> newList = new ArrayList<ListenerInfo>(listenerList);
 	    final int size = newList.size();
 	    for (int i = 0; i < size; i++) {
 		ListenerInfo li = (ListenerInfo) newList.get(i);
@@ -141,7 +141,7 @@
 	    return;
 	}
         
-	List currentList;
+	List<ListenerInfo> currentList;
 	synchronized (listenerLock) {
 	    currentList = listenerList;
 	}
@@ -192,7 +192,7 @@
      * listeners end up depending on other threads that are themselves
      * accessing this NotificationBroadcasterSupport.
      */
-    private List listenerList = Collections.EMPTY_LIST;
+    private List<ListenerInfo> listenerList = Collections.emptyList();
 
     abstract public MBeanNotificationInfo[] getNotificationInfo();
 }
--- a/j2se/src/share/classes/sun/management/RuntimeImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/RuntimeImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -132,7 +132,7 @@
 
     public Map<String,String> getSystemProperties() {
         Properties sysProps = System.getProperties();
-        Map<String,String> map = new HashMap();
+        Map<String,String> map = new HashMap<String, String>();
 
         // Properties.entrySet() does not include the entries in
         // the default properties.  So use Properties.stringPropertyNames()
--- a/j2se/src/share/classes/sun/management/VMManagement.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/VMManagement.java	Fri May 25 00:49:14 2007 +0000
@@ -26,7 +26,7 @@
 package sun.management;
 
 import java.util.List;
-
+import sun.management.counter.Counter;
 /**
  * An interface for the monitoring and management of the 
  * Java virtual machine.
@@ -99,5 +99,5 @@
     public long    getClassVerificationTime();
 
     // Performance counter support
-    public List    getInternalCounters(String pattern);
+    public List<Counter>   getInternalCounters(String pattern);
 }
--- a/j2se/src/share/classes/sun/management/VMManagementImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/VMManagementImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -155,10 +155,11 @@
     public String   getLibraryPath()  {
         return System.getProperty("java.library.path");
     }
+
     public String   getBootClassPath( ) {
-        PrivilegedAction pa
+        PrivilegedAction<String> pa
             = new GetPropertyAction("sun.boot.class.path");
-        String result = (String) AccessController.doPrivileged(pa);
+        String result =  AccessController.doPrivileged(pa);
         return result;
     }
 
@@ -166,7 +167,8 @@
     public synchronized List<String> getVmArguments() {
         if (vmArgs == null) {
             String[] args = getVmArguments0();
-            List<String> l = (args != null ? Arrays.asList(args) : Collections.EMPTY_LIST);
+            List<String> l = ((args != null && args.length != 0) ? Arrays.asList(args) : 
+					Collections.<String>emptyList());
             vmArgs = Collections.unmodifiableList(l); 
         }
         return vmArgs;
@@ -178,9 +180,9 @@
 
     // Compilation Subsystem
     public String   getCompilerName() {
-        String name = (String) AccessController.doPrivileged(
-            new PrivilegedAction() {
-                public Object run() {
+        String name =  AccessController.doPrivileged(
+            new PrivilegedAction<String>() {
+                public String run() {
                     return System.getProperty("sun.management.compiler");
                 }
             });
@@ -229,7 +231,7 @@
         }
 
         // construct PerfInstrumentation object 
-        Perf perf = (Perf) AccessController.doPrivileged(new Perf.GetPerfAction());
+        Perf perf =  AccessController.doPrivileged(new Perf.GetPerfAction());
         try {
             ByteBuffer bb = perf.attach(0, "r");
             if (bb.capacity() == 0) {
@@ -247,12 +249,12 @@
         return perfInstr;
     }
 
-    public List    getInternalCounters(String pattern) {
+    public List<Counter> getInternalCounters(String pattern) {
         PerfInstrumentation perf = getPerfInstrumentation();
         if (perf != null) {
             return perf.findByPattern(pattern);
         } else {
-            return Collections.EMPTY_LIST;
+            return Collections.emptyList();
         }
     }
 }
--- a/j2se/src/share/classes/sun/management/counter/perf/PerfInstrumentation.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/counter/perf/PerfInstrumentation.java	Fri May 25 00:49:14 2007 +0000
@@ -36,7 +36,7 @@
     private long lastModificationTime;
     private long lastUsed;
     private int  nextEntry;
-    private SortedMap  map;
+    private SortedMap<String, Counter>  map;
 
     public PerfInstrumentation(ByteBuffer b) {
         prologue = new Prologue(b);
@@ -73,7 +73,7 @@
         buffer.position(prologue.getEntryOffset());
         nextEntry = buffer.position(); 
         // rebuild all the counters
-        map = new TreeMap();
+        map = new TreeMap<String, Counter>();
     }
 
     boolean hasNext() {
@@ -147,17 +147,17 @@
         return counter;
     }
 
-    public synchronized List getAllCounters() {
+    public synchronized List<Counter> getAllCounters() {
         while (hasNext()) {        
             Counter c = getNextCounter();
             if (c != null) {
                 map.put(c.getName(), c);
             }
         }
-        return new ArrayList(map.values());
+        return new ArrayList<Counter>(map.values());
     }
 
-    public synchronized List findByPattern(String patternString) {
+    public synchronized List<Counter> findByPattern(String patternString) {
         while (hasNext()) {        
             Counter c = getNextCounter();
             if (c != null) {
@@ -167,7 +167,7 @@
     
         Pattern pattern = Pattern.compile(patternString);
         Matcher matcher = pattern.matcher("");
-        List matches = new ArrayList();
+        List<Counter> matches = new ArrayList<Counter>();
 
         Iterator iter = map.entrySet().iterator();
         while (iter.hasNext()) {
--- a/j2se/src/share/classes/sun/management/jmxremote/ConnectorBootstrap.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/jmxremote/ConnectorBootstrap.java	Fri May 25 00:49:14 2007 +0000
@@ -180,7 +180,7 @@
     private static class AccessFileCheckerAuthenticator
         implements JMXAuthenticator {
 
-        public AccessFileCheckerAuthenticator(Map env) throws IOException {
+        public AccessFileCheckerAuthenticator(Map<String, Object> env) throws IOException {
             environment = env;
             accessFile = (String) env.get("jmx.remote.x.access.file");
             properties = propertiesFromFile(accessFile);
@@ -206,7 +206,7 @@
 		if (properties.containsKey(p.getName()))
 		    return;
             }
-            final Set principalsStr = new HashSet();
+            final Set<String> principalsStr = new HashSet<String>();
             for (Iterator i = principals.iterator(); i.hasNext(); ) {
                 final Principal p = (Principal) i.next();
                 principalsStr.add(p.getName());
@@ -228,7 +228,7 @@
             return p;
         }
 
-        private final Map environment;
+        private final Map<String, Object> environment;
         private final Properties properties;
         private final String accessFile;
     }
@@ -408,7 +408,7 @@
         System.setProperty("java.rmi.server.randomIDs", "true");
 
         // This RMI server should not keep the VM alive
-        Map env = new HashMap();
+        Map<String, Object> env = new HashMap<String, Object>();
         env.put(RMIExporter.EXPORTER_ATTRIBUTE, new PermanentExporter());
 
         // The local connector server need only be available via the
@@ -516,7 +516,7 @@
 
         JMXServiceURL url = new JMXServiceURL("rmi", null, 0);
 
-        Map env = new HashMap();
+        Map<String, Object> env = new HashMap<String, Object>();
 
         PermanentExporter exporter = new PermanentExporter();
         
--- a/j2se/src/share/classes/sun/management/snmp/jvminstr/README	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/management/snmp/jvminstr/README	Fri May 25 00:49:14 2007 +0000
@@ -1,6 +1,6 @@
 #==============================================================================
 #          JVM Management MIB - Instrumentation Classes
-#          Date: 07/05/06, Version: 1.5
+#          Date: 07/05/24, Version: 1.5
 #==============================================================================
 
 In this directory:
--- a/j2se/src/share/classes/sun/misc/Perf.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/misc/Perf.java	Fri May 25 00:49:14 2007 +0000
@@ -44,7 +44,7 @@
  * stamp and interval measurement purposes.
  *
  * @author   Brian Doherty
- * @version  1.14, 05/05/07
+ * @version  1.15, 05/09/07
  * @since    1.4.2
  * @see      #getPerf
  * @see      sun.misc.Perf$GetPerfAction
@@ -71,7 +71,7 @@
      * <blockquote><pre>
      * class MyTrustedClass {
      *   private static final Perf perf =
-     *       (Perf)AccessController.doPrivileged(new Perf.GetPerfAction());
+     *       AccessController.doPrivileged(new Perf.GetPerfAction<Perf>());
      *   ...
      * }
      * </pre></blockquote>
@@ -87,14 +87,14 @@
      * @see  java.security.AccessController#doPrivileged(PrivilegedAction)
      * @see  java.lang.RuntimePermission
      */
-    public static class GetPerfAction implements PrivilegedAction
+    public static class GetPerfAction implements PrivilegedAction<Perf>
     {
         /**
          * Run the <code>Perf.getPerf()</code> method in a privileged context.
          *
          * @see #getPerf
          */
-        public Object run() {
+        public Perf run() {
             return getPerf();
         }
     }
--- a/j2se/src/share/classes/sun/nio/ch/ChannelInputStream.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/nio/ch/ChannelInputStream.java	Fri May 25 00:49:14 2007 +0000
@@ -37,7 +37,7 @@
  *
  * @author Mike McCloskey
  * @author Mark Reinhold
- * @version 1.13, 07/05/05
+ * @version 1.14, 07/05/06
  * @since 1.4
  */
 
@@ -97,8 +97,8 @@
 	ByteBuffer bb = ((this.bs == bs)
 			 ? this.bb
 			 : ByteBuffer.wrap(bs));
+	bb.limit(Math.min(off + len, bb.capacity()));
 	bb.position(off);
-	bb.limit(Math.min(off + len, bb.capacity()));
 	this.bb = bb;
 	this.bs = bs;
 	return read(bb);
--- a/j2se/src/share/classes/sun/nio/ch/NativeThreadSet.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/nio/ch/NativeThreadSet.java	Fri May 25 00:49:14 2007 +0000
@@ -52,6 +52,7 @@
 		int nn = on * 2;
 		long[] nelts = new long[nn];
 		System.arraycopy(elts, 0, nelts, 0, on);
+		elts = nelts;
 		start = on;
 	    }
 	    for (int i = start; i < elts.length; i++) {
--- a/j2se/src/share/classes/sun/security/provider/certpath/CrlRevocationChecker.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/security/provider/certpath/CrlRevocationChecker.java	Fri May 25 00:49:14 2007 +0000
@@ -77,7 +77,7 @@
  * from one or more <code>CertStores</code>. This is based on section 6.3
  * of RFC 3280 (http://www.ietf.org/rfc/rfc3280.txt).
  *
- * @version 	1.31, 05/05/07
+ * @version 	1.32, 05/06/07
  * @since	1.4
  * @author	Seth Proctor
  * @author	Steve Hanna
@@ -318,21 +318,16 @@
 	    throw new CertPathValidatorException(e);
         }
 	    
-        if (mPossibleCRLs.isEmpty() && mApprovedCRLs.isEmpty()) {
-	    // we are assuming the directory is not secure,
- 	    // so someone may have removed all the CRLs.
- 	    throw new CertPathValidatorException
-		("Could not determine revocation status");
-	}
-		
 	if (debug != null) {
 	    debug.println("CrlRevocationChecker.verifyRevocationStatus() " +
 	        "crls.size() = " + mPossibleCRLs.size());
 	}
-	// Now that we have a list of possible CRLs, see which ones can
-	// be approved
-	mApprovedCRLs.addAll(verifyPossibleCRLs(mPossibleCRLs, currCert, 
+	if (!mPossibleCRLs.isEmpty()) {
+	    // Now that we have a list of possible CRLs, see which ones can
+	    // be approved
+	    mApprovedCRLs.addAll(verifyPossibleCRLs(mPossibleCRLs, currCert, 
 		signFlag, prevKey, reasonsMask));
+	}
 	if (debug != null) {
 	    debug.println("CrlRevocationChecker.verifyRevocationStatus() " +
 	        "approved crls.size() = " + mApprovedCRLs.size());
--- a/j2se/src/share/classes/sun/tools/jinfo/JInfo.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jinfo/JInfo.java	Fri May 25 00:49:14 2007 +0000
@@ -84,7 +84,7 @@
     private static void runTool(String args[]) throws Exception {        
         String tool = "sun.jvm.hotspot.tools.JInfo";
         // Tool not available on this  platform.
-	Class c = loadClass(tool);
+	Class<?> c = loadClass(tool);
 	if (c == null) {          
 	    usage();
 	}
--- a/j2se/src/share/classes/sun/tools/jmap/JMap.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jmap/JMap.java	Fri May 25 00:49:14 2007 +0000
@@ -183,7 +183,7 @@
         }
         
         // Tool not available on this  platform.
-	Class c = loadClass(tool);
+	Class<?> c = loadClass(tool);
 	if (c == null) {          
 	    usage();
 	}
--- a/j2se/src/share/classes/sun/tools/jstack/JStack.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstack/JStack.java	Fri May 25 00:49:14 2007 +0000
@@ -116,7 +116,7 @@
 
     // SA JStack tool
     private static void runJStackTool(boolean mixed, boolean locks, String args[]) throws Exception {
-	Class cl = loadSAClass();
+	Class<?> cl = loadSAClass();
 	if (cl == null) {
 	    usage();		// SA not available
 	}
--- a/j2se/src/share/classes/sun/tools/jstat/Alignment.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/Alignment.java	Fri May 25 00:49:14 2007 +0000
@@ -31,13 +31,13 @@
  * A typesafe enumeration for describing data alignment semantics
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public abstract class Alignment {
 
     private static int nextOrdinal = 0;
-    private static HashMap map = new HashMap();
+    private static HashMap<String, Alignment> map = new HashMap<String, Alignment>();
     private static final String blanks = "                                                                                                                                                               ";
     private final String name;
     private final int value = nextOrdinal++;
@@ -103,7 +103,7 @@
      * @return     The Alignment object matching the given string.
      */
     public static Alignment toAlignment(String s) {
-        return (Alignment)map.get(s);
+        return map.get(s);
     }
 
     /**
--- a/j2se/src/share/classes/sun/tools/jstat/Arguments.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/Arguments.java	Fri May 25 00:49:14 2007 +0000
@@ -29,6 +29,7 @@
 import java.net.*;
 import java.util.*;
 import java.util.regex.*;
+import sun.jvmstat.monitor.Monitor;
 import sun.jvmstat.monitor.VmIdentifier;
 
 /**
@@ -36,7 +37,7 @@
  * level access to arguments.
  *
  * @author Brian Doherty
- * @version 1.10, 05/05/07
+ * @version 1.11, 05/09/07
  * @since 1.5
  */
 public class Arguments {
@@ -49,7 +50,7 @@
     private static final String OPTIONS_FILENAME = "jstat_options";
     private static final String ALL_NAMES = "\\w*";
 
-    private Comparator comparator;
+    private Comparator<Monitor> comparator;
     private int headerRate;
     private boolean help;
     private boolean list;
@@ -166,7 +167,7 @@
             if (arg.compareTo("-a") == 0) {
                 comparator = new AscendingMonitorComparator();
             } else if (arg.compareTo("-d") == 0) {
-                comparator = new DescendingMonitorComparator();
+                comparator =  new DescendingMonitorComparator();
             } else if (arg.compareTo("-t") == 0) {
                 timestamp = true;
             } else if (arg.compareTo("-v") == 0) {
@@ -331,7 +332,7 @@
         }
     }
 
-    public Comparator comparator() {
+    public Comparator<Monitor> comparator() {
         return comparator;
     }
 
--- a/j2se/src/share/classes/sun/tools/jstat/AscendingMonitorComparator.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/AscendingMonitorComparator.java	Fri May 25 00:49:14 2007 +0000
@@ -32,13 +32,13 @@
  * Class to compare two Monitor objects by name in ascending order.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
-class AscendingMonitorComparator implements Comparator {
-    public int compare(Object o1, Object o2) {
-        String name1 = ((Monitor)o1).getName();
-        String name2 = ((Monitor)o2).getName();
+class AscendingMonitorComparator implements Comparator<Monitor> {
+    public int compare(Monitor o1, Monitor o2) {
+        String name1 = o1.getName();
+        String name2 = o2.getName();
         return name1.compareTo(name2);
     }
 }
--- a/j2se/src/share/classes/sun/tools/jstat/DescendingMonitorComparator.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/DescendingMonitorComparator.java	Fri May 25 00:49:14 2007 +0000
@@ -32,13 +32,13 @@
  * Class to compare two Monitor objects by name in descending order.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
-class DescendingMonitorComparator implements Comparator {
-    public int compare(Object o1, Object o2) {
-        String name1 = ((Monitor)o1).getName();
-        String name2 = ((Monitor)o2).getName();
+class DescendingMonitorComparator implements Comparator<Monitor> {
+    public int compare(Monitor o1, Monitor o2) {
+        String name1 = o1.getName();
+        String name2 = o2.getName();
         return name2.compareTo(name1);
     }
 }
--- a/j2se/src/share/classes/sun/tools/jstat/ExpressionExecuter.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/ExpressionExecuter.java	Fri May 25 00:49:14 2007 +0000
@@ -33,14 +33,14 @@
  * in the context of the available monitoring data.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class ExpressionExecuter implements ExpressionEvaluator {
     private static final boolean debug =
             Boolean.getBoolean("ExpressionEvaluator.debug");
     private MonitoredVm vm;
-    private HashMap map = new HashMap();
+    private HashMap<String, Object> map = new HashMap<String, Object>();
 
     ExpressionExecuter(MonitoredVm vm) {
         this.vm = vm;
--- a/j2se/src/share/classes/sun/tools/jstat/JStatLogger.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/JStatLogger.java	Fri May 25 00:49:14 2007 +0000
@@ -38,7 +38,7 @@
  * a target Java Virtual Machine.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class JStatLogger {
@@ -53,16 +53,15 @@
     /**
      * print the monitors that match the given monitor name pattern string.
      */
-    public void printNames(String names, Comparator comparator,
+    public void printNames(String names, Comparator<Monitor> comparator,
                            boolean showUnsupported, PrintStream out)
                 throws MonitorException, PatternSyntaxException {
 
         // get the set of all monitors
-        List items = monitoredVm.findByPattern(names);
+        List<Monitor> items = monitoredVm.findByPattern(names);
         Collections.sort(items, comparator);
 
-        for (Iterator i = items.iterator(); i.hasNext(); /* empty */) {
-            Monitor m = (Monitor)i.next();
+        for (Monitor m: items) {
             if (!(m.isSupported() || showUnsupported)) {
                 continue;
             }
@@ -73,13 +72,13 @@
     /**
      * print name=value pairs for the given list of monitors.
      */
-    public void printSnapShot(String names, Comparator comparator,
+    public void printSnapShot(String names, Comparator<Monitor> comparator,
                               boolean verbose, boolean showUnsupported,
                               PrintStream out)
                 throws MonitorException, PatternSyntaxException {
 
         // get the set of all monitors
-        List items = monitoredVm.findByPattern(names);
+        List<Monitor> items = monitoredVm.findByPattern(names);
         Collections.sort(items, comparator);
 
         printList(items, verbose, showUnsupported, out);
@@ -88,13 +87,12 @@
     /**
      * print name=value pairs for the given list of monitors.
      */
-    public void printList(List list, boolean verbose, boolean showUnsupported,
+    public void printList(List<Monitor> list, boolean verbose, boolean showUnsupported,
                           PrintStream out)
                 throws MonitorException {
 
         // print out the name of each available counter
-        for (Iterator i = list.iterator(); i.hasNext(); /* empty */) {
-            Monitor m = (Monitor)i.next();
+        for (Monitor m: list ) {
 
             if (!(m.isSupported() || showUnsupported)) {
                 continue;
--- a/j2se/src/share/classes/sun/tools/jstat/Jstat.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/Jstat.java	Fri May 25 00:49:14 2007 +0000
@@ -38,7 +38,7 @@
  * various UNIX platforms.
  *
  * @author Brian Doherty
- * @version 1.9, 05/05/07
+ * @version 1.10, 05/09/07
  * @since 1.5
  */
 public class Jstat {
@@ -124,9 +124,9 @@
             OptionFormat format = arguments.optionFormat();
             formatter = new OptionOutputFormatter(monitoredVm, format);
         } else {
-            List logged = monitoredVm.findByPattern(arguments.counterNames());
+            List<Monitor> logged = monitoredVm.findByPattern(arguments.counterNames());
             Collections.sort(logged, arguments.comparator());
-            List constants = new ArrayList();
+            List<Monitor> constants = new ArrayList<Monitor>();
 
             for (Iterator i = logged.iterator(); i.hasNext(); /* empty */) {
                 Monitor m = (Monitor)i.next();
--- a/j2se/src/share/classes/sun/tools/jstat/Operator.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/Operator.java	Fri May 25 00:49:14 2007 +0000
@@ -32,13 +32,13 @@
  * A typesafe enumeration for describing mathematical operators.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public abstract class Operator {
 
     private static int nextOrdinal = 0;
-    private static HashMap map = new HashMap();
+    private static HashMap<String, Operator> map = new HashMap<String, Operator>();
 
     private final String name;
     private final int ordinal = nextOrdinal++;
@@ -97,7 +97,7 @@
      * @return     The Operator object matching the given string.
      */
     public static Operator toOperator(String s) {
-        return (Operator)map.get(s);
+        return map.get(s);
     }
 
     /**
--- a/j2se/src/share/classes/sun/tools/jstat/OptionFormat.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/OptionFormat.java	Fri May 25 00:49:14 2007 +0000
@@ -33,16 +33,16 @@
  * line option that was parsed from an option description file.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class OptionFormat {
     protected String name;
-    protected List children;
+    protected List<OptionFormat> children;
 
     public OptionFormat(String name) {
         this.name = name;
-        this.children = new ArrayList();
+        this.children = new ArrayList<OptionFormat>();
     }
 
     public boolean equals(Object o) {
@@ -65,7 +65,7 @@
     }
 
     public OptionFormat getSubFormat(int index) {
-        return (OptionFormat)children.get(index);
+        return children.get(index);
     }
 
     public void insertSubFormat(int index, OptionFormat f) {
@@ -103,8 +103,7 @@
         System.out.println(indent + name + " {");
 
         // iterate over all children and call their printFormat() methods
-        for (Iterator i = children.iterator(); i.hasNext(); /* empty */) {
-            OptionFormat of = (OptionFormat)i.next();
+        for (OptionFormat of : children) { 
             of.printFormat(indentLevel+1);
         }
         System.out.println(indent + "}");
--- a/j2se/src/share/classes/sun/tools/jstat/OptionLister.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/OptionLister.java	Fri May 25 00:49:14 2007 +0000
@@ -33,7 +33,7 @@
  * A class for listing the available options in the jstat_options file.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class OptionLister {
@@ -45,22 +45,22 @@
     }
 
     public void print(PrintStream ps) {
-        Comparator c = new Comparator() {
-               public int compare(Object o1, Object o2) {
-                   OptionFormat of1 = (OptionFormat)o1;
-                   OptionFormat of2 = (OptionFormat)o2;
+        Comparator<OptionFormat> c = new Comparator<OptionFormat>() {
+               public int compare(OptionFormat o1, OptionFormat o2) {
+                   OptionFormat of1 = o1;
+                   OptionFormat of2 = o2;
                    return (of1.getName().compareTo(of2.getName()));
                }
         };
 
-        Set options = new TreeSet(c);
+        Set<OptionFormat> options = new TreeSet<OptionFormat>(c);
 
         for (int i = 0; i < sources.length; i++) {
             try {
                 URL u = sources[i];
                 Reader r = new BufferedReader(
                         new InputStreamReader(u.openStream()));
-                Set s = new Parser(r).parseOptions();
+                Set<OptionFormat> s = new Parser(r).parseOptions();
                 options.addAll(s);
             } catch (IOException e) {
                 if (debug) {
@@ -74,8 +74,7 @@
             }
         }
 
-        for (Iterator i = options.iterator(); i.hasNext(); /* empty */) {
-            OptionFormat of = (OptionFormat)i.next();
+        for ( OptionFormat of : options) {
             if (of.getName().compareTo("timestamp") == 0) {
               // ignore the special timestamp OptionFormat.
               continue;
--- a/j2se/src/share/classes/sun/tools/jstat/Parser.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/Parser.java	Fri May 25 00:49:14 2007 +0000
@@ -33,7 +33,7 @@
  * specification language for the jstat command.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class Parser {
@@ -79,7 +79,7 @@
     };
 
 
-    private static Set reservedWords;
+    private static Set<String> reservedWords;
 
     private StreamTokenizer st;
     private String filename;
@@ -102,7 +102,7 @@
         st.slashSlashComments(true);
         st.slashStarComments(true);
 
-        reservedWords = new HashSet();
+        reservedWords = new HashSet<String>();
         for (int i = 0; i < otherKeyWords.length; i++) {
             reservedWords.add(otherKeyWords[i]);
         }
@@ -552,8 +552,8 @@
         return null;
     }
 
-    public Set parseOptions() throws ParserException, IOException {
-        Set options = new HashSet();
+    public Set<OptionFormat> parseOptions() throws ParserException, IOException {
+        Set<OptionFormat> options = new HashSet<OptionFormat>();
 
         nextToken();
 
--- a/j2se/src/share/classes/sun/tools/jstat/Scale.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstat/Scale.java	Fri May 25 00:49:14 2007 +0000
@@ -31,12 +31,12 @@
  * A typesafe enumeration for describing data scaling semantics
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class Scale {
     private static int nextOrdinal = 0;
-    private static HashMap map = new HashMap();
+    private static HashMap<String, Scale> map = new HashMap<String, Scale>();
 
     private final String name;
     private final int ordinal = nextOrdinal++;
@@ -167,7 +167,7 @@
      * @return     The Scale object matching the given string.
      */
     public static Scale toScale(String s) {
-        return (Scale)map.get(s);
+        return map.get(s);
     }
 
     /**
--- a/j2se/src/share/classes/sun/tools/jstatd/RemoteHostImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/classes/sun/tools/jstatd/RemoteHostImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -44,13 +44,13 @@
  * interface.
  *
  * @author Brian Doherty
- * @version 1.8, 05/05/07
+ * @version 1.9, 05/09/07
  * @since 1.5
  */
 public class RemoteHostImpl implements RemoteHost, HostListener {
 
     private MonitoredHost monitoredHost;
-    private Set activeVms;
+    private Set<Integer> activeVms;
 
     public RemoteHostImpl() throws MonitorException {
         try {
--- a/j2se/src/share/demo/jvmti/java_crw_demo/java_crw_demo.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/demo/jvmti/java_crw_demo/java_crw_demo.c	Fri May 25 00:49:14 2007 +0000
@@ -1743,10 +1743,10 @@
     last_new_pc = 0;
     delta_adj   = 0;
     for ( i = 0 ; i < count ; i++ ) {
-        ByteOffset new_pc;    /* new pc in instrumented code */
+        ByteOffset new_pc=0;    /* new pc in instrumented code */
         unsigned   ft;        /* frame_type */
-        int        delta;     /* pc delta */
-        int        new_delta; /* new pc delta */
+        int        delta=0;     /* pc delta */
+        int        new_delta=0; /* new pc delta */
 
         ft = readU1(ci);
         if ( ft <= 63 ) {
--- a/j2se/src/share/instrument/InstrumentationImplNativeMethods.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/instrument/InstrumentationImplNativeMethods.c	Fri May 25 00:49:14 2007 +0000
@@ -31,6 +31,7 @@
 #include    "Utilities.h"
 #include    "JavaExceptions.h"
 #include    "sun_instrument_InstrumentationImpl.h"
+#include    "typedefs.h"
 
 /*
  * Copyright 2003 Wily Technology, Inc.
@@ -57,7 +58,7 @@
 JNIEXPORT jboolean JNICALL 
 Java_sun_instrument_InstrumentationImpl_isModifiableClass0
   (JNIEnv * jnienv, jobject implThis, jlong agent, jclass clazz) {
-    return isModifiableClass(jnienv, (JPLISAgent*)agent, clazz);
+    return isModifiableClass(jnienv, (JPLISAgent*)(intptr_t)agent, clazz);
 }
 
 /*
@@ -68,7 +69,7 @@
 JNIEXPORT jboolean JNICALL 
 Java_sun_instrument_InstrumentationImpl_isRetransformClassesSupported0
   (JNIEnv * jnienv, jobject implThis, jlong agent) {
-    return isRetransformClassesSupported(jnienv, (JPLISAgent*)agent);
+    return isRetransformClassesSupported(jnienv, (JPLISAgent*)(intptr_t)agent);
 }
 
 /*
@@ -79,7 +80,7 @@
 JNIEXPORT void JNICALL 
 Java_sun_instrument_InstrumentationImpl_setHasRetransformableTransformers
   (JNIEnv * jnienv, jobject implThis, jlong agent, jboolean has) {
-    setHasRetransformableTransformers(jnienv, (JPLISAgent*)agent, has);
+    setHasRetransformableTransformers(jnienv, (JPLISAgent*)(intptr_t)agent, has);
 }
 
 /*
@@ -90,7 +91,7 @@
 JNIEXPORT void JNICALL 
 Java_sun_instrument_InstrumentationImpl_retransformClasses0
   (JNIEnv * jnienv, jobject implThis, jlong agent, jobjectArray classes) {
-    retransformClasses(jnienv, (JPLISAgent*)agent, classes);
+    retransformClasses(jnienv, (JPLISAgent*)(intptr_t)agent, classes);
 }
 
 /*
@@ -100,7 +101,7 @@
  */
 JNIEXPORT void JNICALL Java_sun_instrument_InstrumentationImpl_redefineClasses0
   (JNIEnv * jnienv, jobject implThis, jlong agent, jobjectArray classDefinitions) {
-    redefineClasses(jnienv, (JPLISAgent*)agent, classDefinitions);
+    redefineClasses(jnienv, (JPLISAgent*)(intptr_t)agent, classDefinitions);
 }
 
 /*
@@ -110,7 +111,7 @@
  */
 JNIEXPORT jobjectArray JNICALL Java_sun_instrument_InstrumentationImpl_getAllLoadedClasses0
   (JNIEnv * jnienv, jobject implThis, jlong agent) {
-    return getAllLoadedClasses(jnienv, (JPLISAgent*)agent);
+    return getAllLoadedClasses(jnienv, (JPLISAgent*)(intptr_t)agent);
 }
 
 /*
@@ -120,7 +121,7 @@
  */
 JNIEXPORT jobjectArray JNICALL Java_sun_instrument_InstrumentationImpl_getInitiatedClasses0
   (JNIEnv * jnienv, jobject implThis, jlong agent, jobject classLoader) {
-    return getInitiatedClasses(jnienv, (JPLISAgent*)agent, classLoader);
+    return getInitiatedClasses(jnienv, (JPLISAgent*)(intptr_t)agent, classLoader);
 }
 
 /*
@@ -130,7 +131,7 @@
  */
 JNIEXPORT jlong JNICALL Java_sun_instrument_InstrumentationImpl_getObjectSize0
   (JNIEnv * jnienv, jobject implThis, jlong agent, jobject objectToSize) {
-    return getObjectSize(jnienv, (JPLISAgent*)agent, objectToSize);
+    return getObjectSize(jnienv, (JPLISAgent*)(intptr_t)agent, objectToSize);
 }
 
 
@@ -141,7 +142,7 @@
  */
 JNIEXPORT void JNICALL Java_sun_instrument_InstrumentationImpl_appendToClassLoaderSearch0
   (JNIEnv * jnienv, jobject implThis, jlong agent, jstring jarFile, jboolean isBootLoader) {
-    appendToClassLoaderSearch(jnienv, (JPLISAgent*)agent, jarFile, isBootLoader);
+    appendToClassLoaderSearch(jnienv, (JPLISAgent*)(intptr_t)agent, jarFile, isBootLoader);
 }
 
 
@@ -152,6 +153,6 @@
  */
 JNIEXPORT void JNICALL Java_sun_instrument_InstrumentationImpl_setNativeMethodPrefixes
   (JNIEnv * jnienv, jobject implThis, jlong agent, jobjectArray prefixArray, jboolean isRetransformable) {
-    setNativeMethodPrefixes(jnienv, (JPLISAgent*)agent, prefixArray, isRetransformable);
+    setNativeMethodPrefixes(jnienv, (JPLISAgent*)(intptr_t)agent, prefixArray, isRetransformable);
 }
 
--- a/j2se/src/share/instrument/InvocationAdapter.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/instrument/InvocationAdapter.c	Fri May 25 00:49:14 2007 +0000
@@ -651,7 +651,7 @@
 	}
 	jplis_assert(0);
     }
-
+    return -2; 
 }
 
 
--- a/j2se/src/share/instrument/JPLISAgent.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/instrument/JPLISAgent.c	Fri May 25 00:49:14 2007 +0000
@@ -31,7 +31,6 @@
 #include    <jvmti.h>
 #include    <stdlib.h>
 #include    <string.h>
-
 #include    "JPLISAgent.h"
 #include    "JPLISAssert.h"
 #include    "Utilities.h"
@@ -42,6 +41,7 @@
 #include    "FileSystemSupport.h"		/* MAXPATHLEN */
 
 #include    "sun_instrument_InstrumentationImpl.h"
+#include    "typedefs.h"
 
 /*
  *  The JPLISAgent manages the initialization all of the Java programming language Agents.
@@ -491,7 +491,7 @@
         }
         
     if ( !errorOutstanding ) {
-        jlong   peerReferenceAsScalar = (jlong) agent;
+        jlong   peerReferenceAsScalar = (jlong)(intptr_t) agent;
         localReference = (*jnienv)->NewObject(  jnienv,
                                                 implClass,
                                                 constructorID,
--- a/j2se/src/share/native/common/check_code.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/native/common/check_code.c	Fri May 25 00:49:14 2007 +0000
@@ -1548,12 +1548,14 @@
     struct handler_info_type *handler_info = context->handler_info;
     int *code_data = context->code_data;
     int code_length = context->code_length;
-    int i;
-    for (i = JVM_GetMethodIxExceptionTableLength(env, 
-						 context->class, 
-						 mi); 
-	 --i >= 0; 
-	 handler_info++) {
+    int max_stack_size = JVM_GetMethodIxMaxStack(env, context->class, mi);
+    int i = JVM_GetMethodIxExceptionTableLength(env, context->class, mi);
+    if (max_stack_size < 1 && i > 0) {
+        // If the method contains exception handlers, it must have room
+        // on the expression stack for the exception that the VM could push
+        CCerror(context, "Stack size too large");
+    }
+    for (; --i >= 0; handler_info++) {
         JVM_ExceptionTableEntryType einfo;
 	stack_item_type *stack_item = NEW(stack_item_type, 1);
 
--- a/j2se/src/share/native/java/net/Inet6Address.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/native/java/net/Inet6Address.c	Fri May 25 00:49:14 2007 +0000
@@ -35,6 +35,7 @@
 jclass ia6_class;
 jfieldID ia6_ipaddressID;
 jfieldID ia6_scopeidID;
+jfieldID ia6_cachedscopeidID;
 jfieldID ia6_scopeidsetID;
 jfieldID ia6_scopeifnameID;
 jfieldID ia6_scopeifnamesetID;
@@ -55,6 +56,8 @@
     CHECK_NULL(ia6_ipaddressID);
     ia6_scopeidID = (*env)->GetFieldID(env, ia6_class, "scope_id", "I");
     CHECK_NULL(ia6_scopeidID);
+    ia6_cachedscopeidID = (*env)->GetFieldID(env, ia6_class, "cached_scope_id", "I");
+    CHECK_NULL(ia6_cachedscopeidID);
     ia6_scopeidsetID = (*env)->GetFieldID(env, ia6_class, "scope_id_set", "Z");
     CHECK_NULL(ia6_scopeidID);
     ia6_scopeifnameID = (*env)->GetFieldID(env, ia6_class, "scope_ifname", "Ljava/net/NetworkInterface;");
--- a/j2se/src/share/native/java/net/net_util.h	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/share/native/java/net/net_util.h	Fri May 25 00:49:14 2007 +0000
@@ -60,12 +60,6 @@
 extern jclass ia4_class;
 extern jmethodID ia4_ctrID;
 
-extern jclass ia6_class;
-extern jfieldID ia6_ipaddressID;
-extern jfieldID ia6_scopeidID;
-extern jfieldID ia6_scopeidsetID;
-extern jmethodID ia6_ctrID;
-
 /* NetworkInterface fields */
 extern jclass ni_class;
 extern jfieldID ni_nameID;
@@ -93,6 +87,7 @@
 extern jclass ia6_class;
 extern jfieldID ia6_ipaddressID;
 extern jfieldID ia6_scopeidID;
+extern jfieldID ia6_cachedscopeidID;
 extern jfieldID ia6_scopeidsetID;
 extern jfieldID ia6_scopeifnameID;
 extern jfieldID ia6_scopeifnamesetID;
@@ -119,7 +114,7 @@
 NET_AllocSockaddr(struct sockaddr **him, int *len);
 
 JNIEXPORT int JNICALL
-NET_InetAddressToSockaddr(JNIEnv *env, jobject iaObj, int port, struct sockaddr *him, int *len, jboolean isLocalAddr);
+NET_InetAddressToSockaddr(JNIEnv *env, jobject iaObj, int port, struct sockaddr *him, int *len);
 
 jobject
 NET_SockaddrToInetAddress(JNIEnv *env, struct sockaddr *him, int *port);
--- a/j2se/src/solaris/bin/java_md.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/bin/java_md.c	Fri May 25 00:49:14 2007 +0000
@@ -650,7 +650,7 @@
 
     libjvm = dlopen(jvmpath, RTLD_NOW + RTLD_GLOBAL);
     if (libjvm == NULL) {
-#if defined(__sparc) && !defined(_LP64) /* i.e. 32-bit sparc */
+#if defined(__solaris__) && defined(__sparc) && !defined(_LP64) /* i.e. 32-bit sparc */
       FILE * fp;
       Elf32_Ehdr elf_head;
       int count;
@@ -991,9 +991,9 @@
   return result;
 }
 
-#if defined(__sun) && defined(__sparc)
+#if defined(__sparc)
 
-/* Methods for solaris-sparc: these are easy. */
+/* Methods for solaris-sparc and linux-sparc: these are easy. */
 
 /* Ask the OS how many processors there are. */
 unsigned long 
@@ -1006,9 +1006,9 @@
   return sys_processors;
 }
 
-/* The solaris-sparc version of the "server-class" predicate. */
+/* The sparc version of the "server-class" predicate. */
 jboolean
-solaris_sparc_ServerClassMachine(void) {
+unix_sparc_ServerClassMachine(void) {
   jboolean            result            = JNI_FALSE;
   /* How big is a server class machine? */
   const unsigned long server_processors = 2UL;
@@ -1023,13 +1023,13 @@
     }
   }
   if (_launcher_debug) {
-    printf("solaris_" LIBARCHNAME "_ServerClassMachine: %s\n",
+    printf("unix_" LIBARCHNAME "_ServerClassMachine: %s\n",
            (result == JNI_TRUE ? "JNI_TRUE" : "JNI_FALSE"));
   }
   return result;
 }
 
-#endif /* __sun && __sparc */
+#endif /* __sparc */
 
 #if defined(__sun) && defined(i586)
 
@@ -1369,7 +1369,7 @@
 #elif defined(ALWAYS_ACT_AS_SERVER_CLASS_MACHINE)
   result = JNI_TRUE;
 #elif defined(__sun) && defined(__sparc)
-  result = solaris_sparc_ServerClassMachine();
+  result = unix_sparc_ServerClassMachine();
 #elif defined(__sun) && defined(i586)
   result = solaris_i586_ServerClassMachine();
 #elif defined(__linux__) && defined(i586)
--- a/j2se/src/solaris/hpi/native_threads/src/sys_api_td.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/hpi/native_threads/src/sys_api_td.c	Fri May 25 00:49:14 2007 +0000
@@ -542,12 +542,12 @@
 
 int
 sysAccept(int fd, struct sockaddr *him, int *len) {
-    INTERRUPT_IO(accept(fd, him, len))
+    INTERRUPT_IO(accept(fd, him, (uint *)len))
 }
 
 int
 sysGetSockName(int fd, struct sockaddr *him, int *len) {
-    return getsockname(fd, him, len);
+    return getsockname(fd, him, (uint *)len);
 }
 
 int
@@ -602,5 +602,5 @@
 ssize_t
 sysRecvFrom(int fd, char *buf, int nBytes, 
 	    int flags, struct sockaddr *from, int *fromlen) {
-    INTERRUPT_IO(recvfrom(fd, buf, nBytes, flags, from, fromlen))
+    INTERRUPT_IO(recvfrom(fd, buf, nBytes, flags, from, (uint *)fromlen))
 }
--- a/j2se/src/solaris/native/java/net/PlainDatagramSocketImpl.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/native/java/net/PlainDatagramSocketImpl.c	Fri May 25 00:49:14 2007 +0000
@@ -245,7 +245,7 @@
     }
  
     /* bind */
-    if (NET_InetAddressToSockaddr(env, iaObj, localport, (struct sockaddr *)&him, &len, JNI_TRUE) != 0) {
+    if (NET_InetAddressToSockaddr(env, iaObj, localport, (struct sockaddr *)&him, &len) != 0) {
       return;
     }
 
@@ -310,7 +310,7 @@
 	return;
     }
 
-    if (NET_InetAddressToSockaddr(env, address, port, (struct sockaddr *)&rmtaddr, &len, JNI_FALSE) != 0) {
+    if (NET_InetAddressToSockaddr(env, address, port, (struct sockaddr *)&rmtaddr, &len) != 0) {
       return;
     }
 
@@ -460,7 +460,7 @@
 	rmtaddrP = 0;
     } else {
 	packetPort = (*env)->GetIntField(env, packet, dp_portID);
-	if (NET_InetAddressToSockaddr(env, packetAddress, packetPort, (struct sockaddr *)&rmtaddr, &len, JNI_FALSE) != 0) {
+	if (NET_InetAddressToSockaddr(env, packetAddress, packetPort, (struct sockaddr *)&rmtaddr, &len) != 0) {
 	  return;
 	}
     }
--- a/j2se/src/solaris/native/java/net/PlainSocketImpl.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/native/java/net/PlainSocketImpl.c	Fri May 25 00:49:14 2007 +0000
@@ -345,7 +345,7 @@
     }
 
     /* connect */
-    if (NET_InetAddressToSockaddr(env, iaObj, port, (struct sockaddr *)&him, &len, JNI_FALSE) != 0) {
+    if (NET_InetAddressToSockaddr(env, iaObj, port, (struct sockaddr *)&him, &len) != 0) {
       return;
     }
 
@@ -638,7 +638,7 @@
     }
 
     /* bind */
-    if (NET_InetAddressToSockaddr(env, iaObj, localport, (struct sockaddr *)&him, &len, JNI_TRUE) != 0) {
+    if (NET_InetAddressToSockaddr(env, iaObj, localport, (struct sockaddr *)&him, &len) != 0) {
       return;
     }
 
--- a/j2se/src/solaris/native/java/net/net_util_md.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/native/java/net/net_util_md.c	Fri May 25 00:49:14 2007 +0000
@@ -128,6 +128,23 @@
     return kernelV22;
 }
 
+static int kernelV24 = 0;
+static int vinit24 = 0;
+
+int kernelIsV24 () {
+    if (!vinit24) {
+	struct utsname sysinfo;
+	if (uname(&sysinfo) == 0) {
+	    sysinfo.release[3] = '\0';
+	    if (strcmp(sysinfo.release, "2.4") == 0) {
+		kernelV24 = JNI_TRUE;
+	    }
+	}
+	vinit24 = 1;
+    }
+    return kernelV24;
+}
+
 int getScopeID (struct sockaddr *him) {
     struct sockaddr_in6_ext *hext = (struct sockaddr_in6_ext *)him;
     if (kernelIsV22()) {
@@ -630,7 +647,7 @@
 #endif
 
 JNIEXPORT int JNICALL
-NET_InetAddressToSockaddr(JNIEnv *env, jobject iaObj, int port, struct sockaddr *him, int *len, jboolean isLocalAddr) {
+NET_InetAddressToSockaddr(JNIEnv *env, jobject iaObj, int port, struct sockaddr *him, int *len) {
     jint family;
     family = (*env)->GetIntField(env, iaObj, ia_familyID);
 #ifdef AF_INET6
@@ -681,54 +698,40 @@
 	 */
 #ifdef __linux__
 	if (IN6_IS_ADDR_LINKLOCAL(&(him6->sin6_addr))) {
-	    static jclass cls;
-	    static jfieldID cached_scopeID, scopeID;
 	    int cached_scope_id = 0, scope_id = 0;
 	    int old_kernel = kernelIsV22();
 
-	    /*
-	     * On first call get the JNI references
-	     */	    
-	    if (cls == NULL) {
-	        jclass c = (*env)->FindClass(env, "java/net/Inet6Address");
-		if (c != NULL) {
-		    cls = (*env)->NewGlobalRef(env, c);
-		    if (cls != NULL) {
-			cached_scopeID = (*env)->GetFieldID(env, cls, "cached_scope_id", "I");
-			scopeID = (*env)->GetFieldID(env, cls, "scope_id", "I");
-		    }
-		}
-	    }
-
-	    if (cached_scopeID && !old_kernel) {
-	        cached_scope_id = (int)(*env)->GetIntField(env, iaObj, cached_scopeID);
+	    if (ia6_cachedscopeidID && !old_kernel) {
+	        cached_scope_id = (int)(*env)->GetIntField(env, iaObj, ia6_cachedscopeidID);
 		/* if cached value exists then use it. Otherwise, check
  		 * if scope is set in the address.
 		 */
 		if (!cached_scope_id) {
-	            if (scopeID) {
-	                scope_id = (int)(*env)->GetIntField(env,iaObj,scopeID);
+	            if (ia6_scopeidID) {
+	                scope_id = (int)(*env)->GetIntField(env,iaObj,ia6_scopeidID);
 	            }
 		    if (scope_id != 0) {
 		    	/* check user-specified value for loopback case
-		     	 * that needs to be overridden, but only for
-			 * destination addresses, not for binding a local addr
+		     	 * that needs to be overridden
 		     	 */
-			if (!isLocalAddr && needsLoopbackRoute (&him6->sin6_addr)) {
+			if (kernelIsV24() && needsLoopbackRoute (&him6->sin6_addr)) {
 			    cached_scope_id = lo_scope_id;
-		            (*env)->SetIntField(env, iaObj, cached_scopeID, cached_scope_id);
+		            (*env)->SetIntField(env, iaObj, ia6_cachedscopeidID, cached_scope_id);
 			}
 		    } else {
 	    		/*
 	     		 * Otherwise consult the IPv6 routing tables to
 	     		 * try determine the appropriate interface.
 	     		 */
-			if (isLocalAddr) {
-		            cached_scope_id = getLocalScopeID( (char *)&(him6->sin6_addr) );
+                        if (kernelIsV24()) {
+		            cached_scope_id = getDefaultIPv6Interface( &(him6->sin6_addr) );
 			} else {
-		            cached_scope_id = getDefaultIPv6Interface( &(him6->sin6_addr) );
-			}
-		        (*env)->SetIntField(env, iaObj, cached_scopeID, cached_scope_id);
+                            cached_scope_id = getLocalScopeID( (char *)&(him6->sin6_addr) );
+                            if (cached_scope_id == 0) {
+                                cached_scope_id = getDefaultIPv6Interface( &(him6->sin6_addr) );
+                            }
+                        }
+		        (*env)->SetIntField(env, iaObj, ia6_cachedscopeidID, cached_scope_id);
 		    }
 		}
 	    }
@@ -750,24 +753,8 @@
         /* handle scope_id for solaris */
 
 	if (family != IPv4) {
-            static jclass cls;
-            static jfieldID scopeID;
-    
-            /*
-             * On first call get the JNI references
-             */	    
-            if (cls == NULL) {
-	        jclass c = (*env)->FindClass(env, "java/net/Inet6Address");
-	        if (c != NULL) {
-	            cls = (*env)->NewGlobalRef(env, c);
-	            if (cls != NULL) {
-		        scopeID = (*env)->GetFieldID(env, cls, "scope_id", "I");
-	            }
-	        }
-            }
-    
-            if (scopeID) {
-	        him6->sin6_scope_id = (int)(*env)->GetIntField(env, iaObj, scopeID);
+            if (ia6_scopeidID) {
+	        him6->sin6_scope_id = (int)(*env)->GetIntField(env, iaObj, ia6_scopeidID);
             }
         }
 #endif
--- a/j2se/src/solaris/native/java/net/net_util_md.h	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/native/java/net/net_util_md.h	Fri May 25 00:49:14 2007 +0000
@@ -161,6 +161,7 @@
  */
 #ifdef __linux__
 extern int kernelIsV22();
+extern int kernelIsV24();
 #endif
 
 void NET_ThrowByNameWithLastError(JNIEnv *env, const char *name,
--- a/j2se/src/solaris/native/sun/nio/ch/DatagramChannelImpl.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/native/sun/nio/ch/DatagramChannelImpl.c	Fri May 25 00:49:14 2007 +0000
@@ -215,7 +215,7 @@
     
     if (NET_InetAddressToSockaddr(env, destAddress, destPort,
 				  (struct sockaddr *)&sa, 
-				  &sa_len, JNI_FALSE) != 0) {
+				  &sa_len) != 0) {
       return IOS_THROWN;
     }
       
--- a/j2se/src/solaris/native/sun/nio/ch/Net.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/solaris/native/sun/nio/ch/Net.c	Fri May 25 00:49:14 2007 +0000
@@ -91,7 +91,7 @@
     int sa_len = SOCKADDR_LEN;
     int rv = 0;
 
-    if (NET_InetAddressToSockaddr(env, ia, port, (struct sockaddr *)&sa, &sa_len, JNI_TRUE) != 0) {
+    if (NET_InetAddressToSockaddr(env, ia, port, (struct sockaddr *)&sa, &sa_len) != 0) {
       return;
     }
 
@@ -110,7 +110,7 @@
     int sa_len = SOCKADDR_LEN;
     int rv;
 
-    if (NET_InetAddressToSockaddr(env, iao, port, (struct sockaddr *) &sa, &sa_len, JNI_FALSE) != 0) {
+    if (NET_InetAddressToSockaddr(env, iao, port, (struct sockaddr *) &sa, &sa_len) != 0) {
       return IOS_THROWN;
     }
 
--- a/j2se/src/windows/native/java/net/PlainDatagramSocketImpl.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/windows/native/java/net/PlainDatagramSocketImpl.c	Fri May 25 00:49:14 2007 +0000
@@ -455,7 +455,7 @@
 	address = (*env)->GetIntField(env, addressObj, ia_addressID);
     }
 
-    if (NET_InetAddressToSockaddr(env, addressObj, port, (struct sockaddr *)&lcladdr, &lcladdrlen, JNI_TRUE) != 0) {
+    if (NET_InetAddressToSockaddr(env, addressObj, port, (struct sockaddr *)&lcladdr, &lcladdrlen) != 0) {
       return;
     }
 
@@ -574,7 +574,7 @@
     	res = WSAIoctl(fdc,SIO_UDP_CONNRESET,&t,sizeof(t),&x1,sizeof(x1),&x2,0,0);
     }
 
-    if (NET_InetAddressToSockaddr(env, address, port,(struct sockaddr *)&rmtaddr, &rmtaddrlen, JNI_FALSE) != 0) {
+    if (NET_InetAddressToSockaddr(env, address, port,(struct sockaddr *)&rmtaddr, &rmtaddrlen) != 0) {
       return;
     }
 
@@ -696,7 +696,7 @@
 	addrp = 0; /* arg to JVM_Sendto () null in this case */
 	addrlen = 0;
     } else {
-      if (NET_InetAddressToSockaddr(env, iaObj, packetPort, (struct sockaddr *)&rmtaddr, &addrlen, JNI_FALSE) != 0) {
+      if (NET_InetAddressToSockaddr(env, iaObj, packetPort, (struct sockaddr *)&rmtaddr, &addrlen) != 0) {
 	return;
       }
     }
@@ -2406,7 +2406,7 @@
 	return;
     } 
 
-    if (NET_InetAddressToSockaddr(env, iaObj, 0, (struct sockaddr *)&name, &len, JNI_TRUE) != 0) {
+    if (NET_InetAddressToSockaddr(env, iaObj, 0, (struct sockaddr *)&name, &len) != 0) {
       return;
     }
 
--- a/j2se/src/windows/native/java/net/PlainSocketImpl.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/windows/native/java/net/PlainSocketImpl.c	Fri May 25 00:49:14 2007 +0000
@@ -57,8 +57,6 @@
 jfieldID psi_trafficClassID;
 jfieldID psi_serverSocketID;
 jfieldID psi_lastfdID;
-jfieldID ia6_ipaddressID;
-jfieldID ia6_scopeidID;
 
 /*
  * the level of the TCP protocol for setsockopt and getsockopt
@@ -223,7 +221,7 @@
 	return;
     }
 
-    if (NET_InetAddressToSockaddr(env, iaObj, port, (struct sockaddr *)&him, &len, JNI_FALSE) != 0) {
+    if (NET_InetAddressToSockaddr(env, iaObj, port, (struct sockaddr *)&him, &len) != 0) {
       return;
     }
 
@@ -436,7 +434,7 @@
     }
 
     if (NET_InetAddressToSockaddr(env, iaObj, localport, 
-			  (struct sockaddr *)&him, &len, JNI_TRUE) != 0) {
+			  (struct sockaddr *)&him, &len) != 0) {
       return;
     }
 
@@ -538,7 +536,7 @@
 	return;
     }
     if (NET_InetAddressToSockaddr(env, address, 0, (struct sockaddr *)&addr, 
-				  &addrlen, JNI_TRUE) != 0) {
+				  &addrlen) != 0) {
       return;
     }
 
--- a/j2se/src/windows/native/java/net/net_util_md.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/windows/native/java/net/net_util_md.c	Fri May 25 00:49:14 2007 +0000
@@ -790,12 +790,43 @@
     return 0;
 }
     
+/*
+ * Determine the default interface for an IPv6 address.
+ *
+ * Returns :-
+ *      0 if error
+ *      > 0 interface index to use
+ */
+jint getDefaultIPv6Interface(JNIEnv *env, struct SOCKADDR_IN6 *target_addr)
+{
+    int ret;
+    DWORD b;
+    struct sockaddr_in6 route;
+    SOCKET fd = socket(AF_INET6, SOCK_STREAM, 0);
+    if (fd < 0) {
+	return 0;
+    }
+    
+    ret = WSAIoctl(fd, SIO_ROUTING_INTERFACE_QUERY,
+                    (void *)target_addr, sizeof(struct sockaddr_in6),
+                    (void *)&route, sizeof(struct sockaddr_in6),
+                    &b, 0, 0);
+    if (ret < 0) {
+        // error
+        closesocket(fd);
+        return 0;
+    } else {
+        closesocket(fd);
+        return route.sin6_scope_id;
+    }
+}
+
 /* If address types is IPv6, then IPv6 must be available. Otherwise
  * no address can be generated. Note if an IPv4 mapped address is passed
  * an IPv4 sockaddr_in will be returned.
  */
 JNIEXPORT int JNICALL
-NET_InetAddressToSockaddr(JNIEnv *env, jobject iaObj, int port, struct sockaddr *him, int *len, jboolean isLocalAddr) {
+NET_InetAddressToSockaddr(JNIEnv *env, jobject iaObj, int port, struct sockaddr *him, int *len) {
     jint family, iafam;
     iafam = (*env)->GetIntField(env, iaObj, ia_familyID);
     family = (iafam == IPv4)? AF_INET : AF_INET6;
@@ -804,16 +835,22 @@
 	jbyteArray ipaddress;
 	jbyte caddr[16];
 	jint address, scopeid;
+        jint cached_scope_id;
 
     	ipaddress = (*env)->GetObjectField(env, iaObj, ia6_ipaddressID);
     	scopeid = (jint)(*env)->GetIntField(env, iaObj, ia6_scopeidID);
+        cached_scope_id = (jint)(*env)->GetIntField(env, iaObj, ia6_cachedscopeidID);
     	(*env)->GetByteArrayRegion(env, ipaddress, 0, 16, caddr);
 
 	memset((char *)him6, 0, sizeof(struct SOCKADDR_IN6));
 	him6->sin6_port = (u_short) htons((u_short)port);
 	memcpy((void *)&(him6->sin6_addr), caddr, sizeof(struct in6_addr) );
 	him6->sin6_family = AF_INET6; 
-	him6->sin6_scope_id = scopeid;
+        if (!scopeid && !cached_scope_id) {
+            cached_scope_id = getDefaultIPv6Interface(env, him6);
+            (*env)->SetIntField(env, iaObj, ia6_cachedscopeidID, cached_scope_id);
+        }
+        him6->sin6_scope_id = scopeid != 0 ? scopeid : cached_scope_id;
 	*len = sizeof(struct SOCKADDR_IN6) ;
     } else {
     	struct sockaddr_in *him4 = (struct sockaddr_in*)him;
--- a/j2se/src/windows/native/sun/nio/ch/DatagramChannelImpl.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/windows/native/sun/nio/ch/DatagramChannelImpl.c	Fri May 25 00:49:14 2007 +0000
@@ -282,7 +282,7 @@
 
     if (NET_InetAddressToSockaddr(env, destAddress, destPort,
 				  (struct sockaddr *)&psa, 
-				   &sa_len, JNI_FALSE) != 0) {
+				   &sa_len) != 0) {
       return IOS_THROWN;
     }
       
--- a/j2se/src/windows/native/sun/nio/ch/Net.c	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/src/windows/native/sun/nio/ch/Net.c	Fri May 25 00:49:14 2007 +0000
@@ -80,7 +80,7 @@
     int rv;
     int sa_len = sizeof(sa);
 
-   if (NET_InetAddressToSockaddr(env, iao, port, (struct sockaddr *)&sa, &sa_len, JNI_TRUE) != 0) {
+   if (NET_InetAddressToSockaddr(env, iao, port, (struct sockaddr *)&sa, &sa_len) != 0) {
       return;
     }
 
@@ -97,7 +97,7 @@
     int rv;
     int sa_len = sizeof(sa);
 
-   if (NET_InetAddressToSockaddr(env, iao, port, (struct sockaddr *)&sa, &sa_len, JNI_FALSE) != 0) {
+   if (NET_InetAddressToSockaddr(env, iao, port, (struct sockaddr *)&sa, &sa_len) != 0) {
       return IOS_THROWN;
     }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/com/sun/jdi/JdbReadTwiceTest.sh	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,262 @@
+#!/bin/sh
+
+#
+# Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+# CA 95054 USA or visit www.sun.com if you need additional information or
+# have any questions.
+#
+
+#  
+#  @test @(#)JdbReadTwiceTest.sh	1.2 07/04/10
+#  @bug 4981536
+#  @summary TTY: .jdbrc is read twice if jdb is run in the user's home dir
+#  @author jjh
+#  @run shell JdbReadTwiceTest.sh
+
+#Set appropriate jdk 
+if [ ! -z "$TESTJAVA" ] ; then
+     jdk="$TESTJAVA"
+else
+     echo "--Error: TESTJAVA must be defined as the pathname of a jdk to test."
+     exit 1
+fi
+
+if [ -z "$TESTCLASSES" ] ; then
+     echo "--Error: TESTCLASSES must be defined."
+     exit 1
+fi
+
+case `uname -s` in
+    Linux)
+      # Need this to convert to the /.automount/... form which
+      # is what jdb will report when it reads an init file.
+      echo TESTCLASSES=$TESTCLASSES
+      TESTCLASSES=`(cd $TESTCLASSES; /bin/pwd)`
+      echo TESTCLASSES=$TESTCLASSES
+      ;;
+esac
+
+# All output will go under this dir.  We define HOME to
+# be under here too, and pass it into jdb, to avoid problems
+# with java choosing a value of HOME.
+baseDir="$TESTCLASSES/jdbRead$$"
+HOME="$baseDir/home"
+mkdir -p "$HOME"
+
+tmpResult="$baseDir/result"
+fred="$baseDir/fred"
+here="$baseDir"
+jdbFiles="$HOME/jdb.ini $HOME/.jdbrc $here/jdb.ini $here/.jdbrc $tmpResult $fred"
+
+cd $here
+failed=
+
+
+mkFiles()
+{
+    touch "$@"
+}
+
+doit()
+{
+    echo quit | $TESTJAVA/bin/jdb -J-Duser.home=$HOME > $tmpResult 2>&1
+}
+
+failIfNot()
+{
+    # $1 is the expected number of occurances of $2 in the jdb output.
+    count=$1
+    shift
+    if [ -r c:/ ] ; then
+       sed -e 's@\\@/@g' $tmpResult > $tmpResult.1
+       mv $tmpResult.1 $tmpResult
+    fi
+    xx=`fgrep -i "$*" $tmpResult | wc -l`
+    if [ $xx != $count ] ; then
+        echo "Failed: Expected $count, got $xx: $*"
+        echo "-----"
+        cat $tmpResult
+        echo "-----"
+        failed=1
+    else
+        echo "Passed: Expected $count, got $xx: $*"
+    fi        
+}
+
+clean()
+{
+    rm -f $jdbFiles
+}
+
+# Note:  If jdb reads a file, it outputs a message containing
+#         from: filename
+# If jdb can't read a file, it outputs a message containing
+#         open: filename
+
+
+echo
+echo "+++++++++++++++++++++++++++++++++++"
+echo "Verify each individual file is read"
+mkFiles $HOME/jdb.ini
+    doit
+    failIfNot 1 "from $HOME/jdb.ini"
+    clean
+
+mkFiles $HOME/.jdbrc
+    doit
+    failIfNot 1 "from $HOME/.jdbrc"
+    clean
+
+mkFiles $here/jdb.ini
+    doit
+    failIfNot 1 "from $here/jdb.ini"
+    clean
+
+mkFiles $here/.jdbrc
+    doit
+    failIfNot 1 "from $here/.jdbrc"
+    clean
+
+
+cd $HOME
+echo
+echo "+++++++++++++++++++++++++++++++++++"
+echo "Verify files are not read twice if cwd is ~"
+mkFiles $HOME/jdb.ini
+    doit
+    failIfNot 1 "from $HOME/jdb.ini"
+    clean
+
+mkFiles $HOME/.jdbrc
+    doit
+    failIfNot 1 "from $HOME/.jdbrc"
+    clean
+cd $here
+
+
+echo
+echo "+++++++++++++++++++++++++++++++++++"
+echo "If jdb.ini and both .jdbrc exist, don't read .jdbrc"
+mkFiles $HOME/jdb.ini $HOME/.jdbrc
+    doit
+    failIfNot 1  "from $HOME/jdb.ini" 
+    failIfNot 0  "from $HOME/.jdbrc"
+    clean
+
+
+echo
+echo "+++++++++++++++++++++++++++++++++++"
+echo "If files exist in both ~ and ., read both"
+mkFiles $HOME/jdb.ini $here/.jdbrc
+    doit
+    failIfNot 1  "from $HOME/jdb.ini" 
+    failIfNot 1  "from $here/.jdbrc"
+    clean
+
+mkFiles $HOME/.jdbrc $here/jdb.ini
+    doit
+    failIfNot 1  "from $HOME/.jdbrc"
+    failIfNot 1  "from $here/jdb.ini"
+    clean
+
+
+if [ ! -r c:/ ] ; then
+    # No symlinks on windows.
+    echo
+    echo "+++++++++++++++++++++++++++++++++++"
+    echo "Don't read a . file that is a symlink to a ~ file"
+    mkFiles $HOME/jdb.ini
+    ln -s $HOME/jdb.ini $here/.jdbrc
+    doit
+    failIfNot 1  "from $HOME/jdb.ini"
+    failIfNot 0  "from $here/.jdbrc"
+    clean
+fi
+
+
+if [ ! -r c:/ ] ; then
+    # No symlinks on windows.
+    echo
+    echo "+++++++++++++++++++++++++++++++++++"
+    echo "Don't read a . file that is a target symlink of a ~ file"
+    mkFiles $here/jdb.ini
+    ln -s $here/jdbini $HOME/.jdbrc
+    doit
+    failIfNot 1  "from $here/jdb.ini"
+    failIfNot 0  "from $HOME/.jdbrc"
+    clean
+fi
+
+
+if [ ! -r c:/ ] ; then
+    # Can't make a file unreadable under MKS.
+    echo
+    echo "+++++++++++++++++++++++++++++++++++"
+    echo "Read an unreadable file - verify the read fails."
+    # If the file exists, we try to read it.  The
+    # read will fail.
+    mkFiles $HOME/jdb.ini
+        chmod a-r $HOME/jdb.ini
+        doit
+        failIfNot 1 "open: $HOME/jdb.ini"
+        clean
+fi
+
+
+echo
+echo "+++++++++++++++++++++++++++++++++++"
+echo "Read a directory - verify the read fails"
+# If the file (IE. directory) exists, we try to read it.  The
+# read will fail.
+mkdir $HOME/jdb.ini
+    doit
+    failIfNot 1 "open: $HOME/jdb.ini"
+    rmdir $HOME/jdb.ini
+    
+
+echo "read $fred" > $here/jdb.ini
+    echo
+    echo "+++++++++++++++++++++++++++++++++++"
+    echo "Verify the jdb read command still works"
+    touch $fred
+    doit
+    failIfNot 1 "from $fred"
+
+    if [ ! -r c:/ ] ; then
+        # Can't make a file unreadable under MKS
+        chmod a-r $fred
+        doit
+        failIfNot 1 "open: $fred"
+    fi
+    rm -f $fred
+    mkdir $fred
+    doit
+    failIfNot 1 "open: $fred"
+    rmdir $fred
+
+clean
+
+
+if [ "$failed" = 1 ] ; then
+    echo "One or more tests failed"
+    exit 1
+fi
+
+echo "All tests passed"
--- a/j2se/test/com/sun/jdi/ShellScaffold.sh	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/com/sun/jdi/ShellScaffold.sh	Fri May 25 00:49:14 2007 +0000
@@ -23,7 +23,7 @@
 # have any questions.
 #
 
-# @(#)ShellScaffold.sh	1.22 07/05/05
+# @(#)ShellScaffold.sh	1.23 07/05/09
 #
 # jtreg runs this in a scratch dir.
 # It (and runregress -no) sets these env vars:
@@ -132,7 +132,7 @@
 # This can be increased if timing seems to be an issue.
 sleep_seconds=1
 
-echo "ShellScaffold.sh: Version 07/05/05" >& 2
+echo "ShellScaffold.sh: Version 07/05/09" >& 2
 
 cleanup()
 {
@@ -238,7 +238,8 @@
              psCmd="/usr/ucb/ps -axwww"
          else
              ulimit -c 0
-             psCmd="ps -axwww"
+             # See bug 6238593.
+             psCmd="ps axwww"
          fi
          ;;
        *)
@@ -751,7 +752,7 @@
 	     #     and will fail  with a "ps: no controlling terminal" error.
 	     #     Running under 'rsh' will cause this ps error.
              # cygwin ps puts an I in column 1 for some reason.
-            ps -e | $grep '^I* *'"$jdbpid" > $devnull 2>&1
+             ps -e | $grep '^I* *'"$jdbpid " > $devnull 2>&1
         else
             # mks 6.2a on win98 has $! getting a negative
             # number and in ps, it shows up as 0x...
@@ -781,7 +782,7 @@
         myCount=`expr $myCount + 1`
         if [ $myCount = 30 ] ; then
             echo "WaitForFinish waited for 30; jdb still running" >&2
-            ps -e | $grep '^I* *'"$jdbpid"  >&2
+            ps -e | $grep '^I* *'"$jdbpid "  >&2
             myCount=0
         fi
     done
@@ -872,8 +873,21 @@
     startJdb 
     startDebuggee
     waitForFinish
+
+    # in hs_err file from 1.3.1
     debuggeeFailIfPresent "Virtual Machine Error"
+
+    # in hs_err file from 1.4.2, 1.5:  An unexpected error
+    debuggeeFailIfPresent "An unexpected error"
+
+    # in hs_err file from 1.4.2, 1.5:  Internal error
+    debuggeeFailIfPresent "Internal error"
+
+
+    # Don't know how this arises
     debuggeeFailIfPresent "An unexpected exception"
+
+    # Don't know how this arises
     debuggeeFailIfPresent "Internal exception"
 }
 
--- a/j2se/test/com/sun/management/UnixOperatingSystemMXBean/GetMaxFileDescriptorCount.sh	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/com/sun/management/UnixOperatingSystemMXBean/GetMaxFileDescriptorCount.sh	Fri May 25 00:49:14 2007 +0000
@@ -22,7 +22,7 @@
 #
 
 #
-# @test    @(#)GetMaxFileDescriptorCount.sh	1.6 02:19:53
+# @test    @(#)GetMaxFileDescriptorCount.sh	1.6 00:55:05
 # @bug     4858522
 # @summary 
 # @author  Steve Bohne
--- a/j2se/test/com/sun/management/UnixOperatingSystemMXBean/GetOpenFileDescriptorCount.sh	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/com/sun/management/UnixOperatingSystemMXBean/GetOpenFileDescriptorCount.sh	Fri May 25 00:49:14 2007 +0000
@@ -22,7 +22,7 @@
 #
 
 #
-# @test    @(#)GetOpenFileDescriptorCount.sh	1.6 02:19:53
+# @test    @(#)GetOpenFileDescriptorCount.sh	1.6 00:55:05
 # @bug     4858522
 # @summary 
 # @author  Steve Bohne
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/lang/management/MemoryPoolMXBean/ThresholdTest.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ * 
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ * 
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ * 
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * @test    @(#)ThresholdTest.java	1.1 07/04/20
+ * @bug     6546089
+ * @summary Basic unit test of MemoryPoolMXBean.isUsageThresholdExceeded() and
+ *          MemoryPoolMXBean.isCollectionThresholdExceeded().
+ * @author  Mandy Chung
+ *
+ * @run main ThresholdTest
+ */
+
+import java.lang.management.*;
+import java.util.*;
+
+public class ThresholdTest {
+    public static void main(String args[]) throws Exception {
+        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
+        for (MemoryPoolMXBean p : pools) {
+            // verify if isUsageThresholdExceeded() returns correct value
+            checkUsageThreshold(p);
+            // verify if isCollectionUsageThresholdExceeded() returns correct value
+            checkCollectionUsageThreshold(p);
+        }
+
+        System.out.println("Test passed.");
+    }
+
+    private static void checkUsageThreshold(MemoryPoolMXBean p) throws Exception {
+
+        if (!p.isUsageThresholdSupported()) {
+            return;
+        }
+
+        long threshold = p.getUsageThreshold();
+        if (threshold != 0) {
+            // Expect the default threshold is zero (disabled)
+            throw new RuntimeException("TEST FAILED: " +
+                "Pool " + p.getName() + 
+                " has non-zero threshold (" + threshold); 
+        }
+  
+        // isUsageThresholdExceeded() should return false if threshold == 0
+        if (p.isUsageThresholdExceeded()) {
+            throw new RuntimeException("TEST FAILED: " +
+                "Pool " + p.getName() + 
+                " isUsageThresholdExceeded() returned true" +
+                " but threshold = 0"); 
+        }
+ 
+        p.setUsageThreshold(1);
+        MemoryUsage u = p.getUsage();
+        if (u.getUsed() >= 1) { 
+            if (!p.isUsageThresholdExceeded()) {
+                throw new RuntimeException("TEST FAILED: " +
+                    "Pool " + p.getName() + 
+                    " isUsageThresholdExceeded() returned false but " +
+                    " threshold(" + p.getUsageThreshold() +
+                    ") <= used(" + u.getUsed() + ")"); 
+            }
+        } else {
+            if (p.isUsageThresholdExceeded()) {
+                throw new RuntimeException("TEST FAILED: " +
+                    "Pool " + p.getName() + 
+                    " isUsageThresholdExceeded() returned true but" +
+                    " threshold(" + p.getUsageThreshold() +
+                    ") > used(" + u.getUsed() + ")"); 
+            }
+        }
+
+        // disable low memory detection and isUsageThresholdExceeded() 
+        // should return false
+        p.setUsageThreshold(0);
+        if (p.isUsageThresholdExceeded()) {
+            throw new RuntimeException("TEST FAILED: " +
+                "Pool " + p.getName() + 
+                " isUsageThresholdExceeded() returned true but threshold = 0"); 
+        }
+    }
+
+    private static void checkCollectionUsageThreshold(MemoryPoolMXBean p) throws Exception {
+
+        if (!p.isCollectionUsageThresholdSupported()) {
+            return;
+        }
+
+        long threshold = p.getCollectionUsageThreshold();
+        if (threshold != 0) {
+            // Expect the default threshold is zero (disabled)
+            throw new RuntimeException("TEST FAILED: " +
+                "Pool " + p.getName() + 
+                " has non-zero threshold (" + threshold); 
+        }
+  
+        // isCollectionUsageThresholdExceeded() should return false if threshold == 0
+        if (p.isCollectionUsageThresholdExceeded()) {
+            throw new RuntimeException("TEST FAILED: " +
+                "Pool " + p.getName() + 
+                " isCollectionUsageThresholdExceeded() returned true" +
+                " but threshold = 0"); 
+        }
+ 
+        p.setCollectionUsageThreshold(1);
+        MemoryUsage u = p.getCollectionUsage();
+        if (u == null) {
+            if (p.isCollectionUsageThresholdExceeded()) {
+                throw new RuntimeException("TEST FAILED: " +
+                    "Pool " + p.getName() + 
+                    " isCollectionUsageThresholdExceeded() returned true but" +
+                    " getCollectionUsage() return null");
+            }
+        } else if (u.getUsed() >= 1) { 
+            if (!p.isCollectionUsageThresholdExceeded()) {
+                throw new RuntimeException("TEST FAILED: " +
+                    "Pool " + p.getName() + 
+                    " isCollectionUsageThresholdExceeded() returned false but " +
+                    " threshold(" + p.getCollectionUsageThreshold() +
+                    ") < used(" + u.getUsed() + ")"); 
+            }
+        } else {
+            if (p.isCollectionUsageThresholdExceeded()) {
+                throw new RuntimeException("TEST FAILED: " +
+                    "Pool " + p.getName() + 
+                    " isCollectionUsageThresholdExceeded() returned true but" +
+                    " threshold(" + p.getCollectionUsageThreshold() +
+                    ") > used(" + u.getUsed() + ")"); 
+            }
+        }
+
+        // disable low memory detection and isCollectionUsageThresholdExceeded() 
+        // should return false
+        p.setCollectionUsageThreshold(0);
+        if (p.isCollectionUsageThresholdExceeded()) {
+            throw new RuntimeException("TEST FAILED: " +
+                "Pool " + p.getName() + 
+                " isCollectionUsageThresholdExceeded() returned true but threshold = 0"); 
+        }
+    }
+}
--- a/j2se/test/java/lang/management/OperatingSystemMXBean/GetSystemLoadAverage.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/java/lang/management/OperatingSystemMXBean/GetSystemLoadAverage.java	Fri May 25 00:49:14 2007 +0000
@@ -22,9 +22,9 @@
  */
 
 /*
- * @(#)GetSystemLoadAverage.java	1.4 07/05/05
- * 
- * @bug     6336608
+ * @(#)GetSystemLoadAverage.java	1.5 07/05/09
+ *
+ * @bug     6336608 6511738
  * @summary Basic unit test of OperatingSystemMXBean.getSystemLoadAverage()
  * @author  Mandy Chung 
  */
@@ -33,48 +33,98 @@
  * This test tests the load average on linux and solaris. On Windows,
  * getSystemLoadAverage() returns -1. 
  *
- * Usage: GetSystemLoadAverage <expected load avg>
+ * Usage: GetSystemLoadAverage ["-1.0"]
+ * Arguments: 
+ *   o If no argument is specified, the test will verify the system load
+ *     average with the /usr/bin/uptime command.
+ *   o Otherwise, the input argument must be "-1.0" indicating the
+ *     expected system load average.  This would only be the case when
+ *     running on Windows.
  */
 
 import java.lang.management.*;
+import java.io.*;
 
 public class GetSystemLoadAverage {
 
     private static OperatingSystemMXBean mbean =
 	ManagementFactory.getOperatingSystemMXBean();
 
-    private static boolean trace = false;
     // The system load average may be changing due to other jobs running.
     // Allow some delta.
     private static double DELTA = 0.05;
 
     public static void main(String args[]) throws Exception {
-	if (args.length != 1)  {
-	   throw new IllegalArgumentException("Unexpected number of args " + args.length);
+	if (args.length > 1)  {
+	    throw new IllegalArgumentException("Unexpected number of args " + args.length);
 	}
-	
-	double loadavg = mbean.getSystemLoadAverage();
-	System.out.println("System load average: " + loadavg);
 
-	double expectedLoadavg = Double.parseDouble(args[0]);
-        if (expectedLoadavg == -1.0) {
-            if (loadavg != expectedLoadavg) {
-                throw new RuntimeException("Expected load average : " +   
-    		    expectedLoadavg +
-    	            " but getSystemLoadAverage returned: " +
-     		    loadavg);
-            }
+        if (args.length == 0) {
+            // On Linux or Solaris
+            checkLoadAvg();
         } else {
-            double lowRange = expectedLoadavg * (1 - DELTA);
-            double highRange = expectedLoadavg * (1 + DELTA);
-            if (loadavg < lowRange || loadavg >  highRange) {
-                throw new RuntimeException("Expected load average : " +   
-    		    expectedLoadavg +
-    	            " but getSystemLoadAverage returned: " +
-     		    loadavg);
-	    }
+            // On Windows, the system load average is expected to be -1.0
+            if (!args[0].equals("-1.0")) {
+	        throw new IllegalArgumentException("Invalid argument: " + args[0]);
+            } else {
+                double loadavg = mbean.getSystemLoadAverage();
+                if (loadavg != -1.0) {
+                    throw new RuntimeException("Expected load average : -1.0" + 
+        	        " but getSystemLoadAverage returned: " +
+         		loadavg);
+                }
+            }
 	}
 
 	System.out.println("Test passed.");
     }
+
+    private static String LOAD_AVERAGE_TEXT = "load average:";
+    private static void checkLoadAvg() throws Exception {
+        // Obtain load average from OS command
+        ProcessBuilder pb = new ProcessBuilder("/usr/bin/uptime");
+        Process p = pb.start();
+        String output = commandOutput(p);
+
+        // obtain load average from OperatingSystemMXBean
+	double loadavg = mbean.getSystemLoadAverage();
+
+        // verify if two values are close
+        output = output.substring(output.lastIndexOf(LOAD_AVERAGE_TEXT) + 
+                                  LOAD_AVERAGE_TEXT.length());
+        System.out.println("Load average returned from uptime = " + output);
+    	System.out.println("getSystemLoadAverage() returned " + loadavg);
+
+        String[] lavg = output.split(",");
+        double expected = Double.parseDouble(lavg[0]);
+        double lowRange = expected * (1 - DELTA);
+        double highRange = expected * (1 + DELTA);
+
+        if (loadavg < lowRange || loadavg >  highRange) {
+            throw new RuntimeException("Expected load average : " +   
+    		    expected +
+    	            " but getSystemLoadAverage returned: " +
+     		    loadavg);
+	}
+    }
+
+    private static String commandOutput(Reader r) throws Exception {
+        StringBuilder sb = new StringBuilder();
+        int c;
+        while ((c = r.read()) > 0) {
+            if (c != '\r') {
+                sb.append((char) c);
+            }
+        }
+        return sb.toString();
+    }
+                                                                                
+    private static String commandOutput(Process p) throws Exception {
+        Reader r = new InputStreamReader(p.getInputStream(),"UTF-8");
+        String output = commandOutput(r);
+        p.waitFor();
+        p.exitValue();
+        return output;
+    }
+ 
 }
--- a/j2se/test/java/lang/management/OperatingSystemMXBean/TestSystemLoadAvg.sh	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/java/lang/management/OperatingSystemMXBean/TestSystemLoadAvg.sh	Fri May 25 00:49:14 2007 +0000
@@ -22,22 +22,22 @@
 #
 
 # 
-# @test     @(#)TestSystemLoadAvg.sh	1.5 07/05/05
+# @test     @(#)TestSystemLoadAvg.sh	1.6 07/05/09
 # @summary  Tests OperatingSystemMXBean.getSystemLoadAverage() api.
 # @author   Mandy Chung 
-# @bug      6336608 6367473
+# @bug      6336608 6367473 6511738
 #
 # @run build GetSystemLoadAverage
-# @run shell TestSystemLoadAvg.sh
+# @run shell/timeout=300 TestSystemLoadAvg.sh
 #
 
 #
 # This test tests the system load average on linux and solaris.
-# On windows tests if it returns -1.0.
-#
-
-
-#set -x
+# On windows tests if it returns -1.0 The verification is done
+# by the GetSystemLoadAverage class.  By default it takes no
+# input argument which verifies the system load average with
+# /usr/bin/uptime command. Or specify "-1.0" as the input argument
+# indicatiing that the platform doesn't support the system load average.
 
 #Set appropriate jdk
 #
@@ -55,25 +55,17 @@
    $TESTJAVA/bin/java -classpath $TESTCLASSES $@
 }
 
-getloadavg()
-{
-   load_avg=`/usr/bin/uptime |  sed -e 's/.*average: //' -e 's/,.*//'`
-}
-
 # Retry 5 times to be more resilent to system load fluctation.
 MAX=5
 i=1
 while true; do
-  load_avg=0
   echo "Run $i: TestSystemLoadAvg"
   case `uname -s` in
        SunOS )
-         getloadavg 
-         runOne GetSystemLoadAverage $load_avg
+         runOne GetSystemLoadAverage 
          ;;
        Linux )
-         getloadavg 
-         runOne GetSystemLoadAverage $load_avg
+         runOne GetSystemLoadAverage
          ;;
       * )
          # On Windows -1.0 should be returned
@@ -89,4 +81,6 @@
       exit 1
   fi
   i=`expr $i + 1`
+  # sleep for 5 seconds 
+  sleep 5
 done
--- a/j2se/test/java/lang/ref/SoftReference/Pin.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/java/lang/ref/SoftReference/Pin.java	Fri May 25 00:49:14 2007 +0000
@@ -21,7 +21,7 @@
  * have any questions.
  */
 
-/* @test 1.6 07/05/06
+/* @test 1.6 07/05/24
  * @bug 4076287
  * @summary Invoking get on a SoftReference shouldn't pin the referent
  * @run main/othervm -ms16m -mx16m Pin
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/net/ipv6tests/B6521014.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ * 
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ * 
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ * 
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * @test 1.2 07/04/18
+ * @bug 6521014 6543428
+ * @summary IOException thrown when Socket tries to bind to an local IPv6 address on SuSE Linux
+ */
+
+
+import java.net.*;
+import java.io.*;
+import java.util.*;
+
+
+/*
+ *
+ * What this testcase is to test is a (weird) coupling through the
+ * cached_scope_id field of java.net.Inet6Address. Native method
+ * NET_InetAddressToSockaddr as in Linux platform will try to write
+ * and read this field, therefore Inet6Address becomes 'stateful'.
+ * So the coupling. Certain executive order, e.g. two methods use
+ * the same Inet6Address instance as illustrated in this test case,
+ * will show side effect of such coupling.
+ *
+ * And on Windows, NET_InetAddressToSockaddr() did not assign appropriate
+ * sin6_scope_id value to sockaddr_in6 structure if there's no one coming
+ * with Inet6Address instance, which caused bind exception. This test use
+ * link-local address without %scope suffix, so it is also going to test
+ * that.
+ *
+ */
+public class B6521014 {
+
+    static InetAddress sin;
+    
+    static Inet6Address getLocalAddr () throws Exception {
+        Enumeration e = NetworkInterface.getNetworkInterfaces();
+        while (e.hasMoreElements()) {
+            NetworkInterface ifc = (NetworkInterface) e.nextElement();
+            Enumeration addrs = ifc.getInetAddresses();
+            while (addrs.hasMoreElements()) {
+                InetAddress a = (InetAddress)addrs.nextElement();
+                if (a instanceof Inet6Address) {
+                    Inet6Address ia6 = (Inet6Address) a;
+                    if (ia6.isLinkLocalAddress()) {
+                        // remove %scope suffix
+                        return (Inet6Address)InetAddress.getByAddress(ia6.getAddress());
+                    }
+                }
+            }
+        }
+        return null;
+    }
+
+    static void test1() throws Exception {
+        ServerSocket ssock;
+        Socket sock;
+        int port;
+
+        ssock = new ServerSocket(0);
+        port = ssock.getLocalPort();
+        sock = new Socket();
+        try {
+            sock.connect(new InetSocketAddress(sin, port), 100);
+        } catch (SocketTimeoutException e) {
+            // time out exception is okay
+            System.out.println("timed out when connecting.");
+        }
+    }
+    
+    static void test2() throws Exception {
+        Socket sock;
+        ServerSocket ssock;
+        int port;
+        int localport;
+
+        ssock = new ServerSocket(0);
+        ssock.setSoTimeout(100);
+        port = ssock.getLocalPort();
+        localport = port + 1;
+        sock = new Socket();
+        sock.bind(new InetSocketAddress(sin, localport));
+        try {
+            sock.connect(new InetSocketAddress(sin, port), 100);
+        } catch (SocketTimeoutException e) {
+            // time out exception is okay
+            System.out.println("timed out when connecting.");
+        }
+    }            
+    
+    public static void main(String[] args) throws Exception {        
+        sin = getLocalAddr();
+        if (sin == null) {
+            System.out.println("Cannot find a link-local address.");
+            return;
+        }
+
+        try {
+            test1();
+            test2();
+        } catch (IOException e) {
+            throw new RuntimeException("Test failed: cannot create socket.", e);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/nio/channels/Channels/ReadOffset.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ * 
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ * 
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ * 
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/* @test 1.1 07/04/11
+ * @bug 6545054
+ * @summary Channels.newInputStream.read throws IAE when invoked with
+ *          different offsets.
+ */
+
+import java.nio.ByteBuffer;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.Channels;
+import java.io.InputStream;
+import java.io.IOException;
+
+public class ReadOffset {
+    public static void main(String[] args) throws IOException {
+        ReadableByteChannel rbc = new ReadableByteChannel() {
+            public int read(ByteBuffer dst) {
+                dst.put((byte)0);
+                return 1;
+            }
+            public boolean isOpen() {
+                return true;
+            }
+            public void close() {
+            }
+        };
+
+        InputStream in = Channels.newInputStream(rbc);
+
+	byte[] b = new byte[3];
+        in.read(b, 0, 1);
+        in.read(b, 2, 1);	// throws IAE
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/nio/channels/SocketChannel/OpenLeak.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ * 
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ * 
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ * 
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/* @test @(#)OpenLeak.java	1.1 07/04/25
+ * @bug 6548464
+ * @summary SocketChannel.open(SocketAddress) leaks file descriptor if 
+ *     connection cannot be established
+ */
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.nio.channels.SocketChannel;
+
+public class OpenLeak {
+
+    public static void main(String[] args) throws Exception {
+	InetAddress lh = InetAddress.getLocalHost();
+	InetSocketAddress isa = new InetSocketAddress(lh, 12345);
+
+	System.setSecurityManager( new SecurityManager() );
+	for (int i=0; i<100000; i++) {
+	    try {
+		SocketChannel.open(isa);
+		throw new RuntimeException("This should not happen");
+	    } catch (SecurityException x) { }
+	}
+
+    }
+
+}
--- a/j2se/test/java/security/Security/signedfirst/Dyn.sh	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/java/security/Security/signedfirst/Dyn.sh	Fri May 25 00:49:14 2007 +0000
@@ -21,7 +21,7 @@
 # have any questions.
 #
 
-# @test 1.7 07/05/06
+# @test 1.7 07/05/24
 # @bug 4504355
 # @summary problems if signed crypto provider is the most preferred provider
 #
--- a/j2se/test/java/security/Security/signedfirst/Static.sh	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/java/security/Security/signedfirst/Static.sh	Fri May 25 00:49:14 2007 +0000
@@ -21,7 +21,7 @@
 # have any questions.
 #
 
-# @test 1.8 07/05/06
+# @test 1.8 07/05/24
 # @bug 4504355 4744260
 # @summary problems if signed crypto provider is the most preferred provider
 #
--- a/j2se/test/java/util/Arrays/FloatDoubleOrder.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/java/util/Arrays/FloatDoubleOrder.java	Fri May 25 00:49:14 2007 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-1999 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,8 @@
  */
 
 /**
- * @test 1.5 07/05/05
- * @bug 4143272
+ * @test 1.6 07/05/15
+ * @bug 4143272 6548425
  * @summary The natural ordering on Float and Double was not even a partial
  *	    order (i.e., it violated the contract of Comparable.compareTo).
  *	    Now it's a total ordering.  Arrays.sort(double[])
@@ -82,5 +82,15 @@
         if (!Arrays.equals(unsortedFlt, sortedFlt))
             throw new RuntimeException("Float Array failed");
 
+
+	double[] da = {-0.0d, -0.0d, 0.0d, -0.0d};
+	Arrays.sort(da, 1, 4);
+        if (!Arrays.equals(da, new double[] {-0.0d, -0.0d, -0.0d, 0.0d}))
+            throw new RuntimeException("6548425");
+
+	float[] fa = {-0.0f, -0.0f, 0.0f, -0.0f};
+	Arrays.sort(fa, 1, 4);
+        if (!Arrays.equals(fa, new float[] {-0.0f, -0.0f, -0.0f, 0.0f}))
+            throw new RuntimeException("6548425");
   }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/PriorityQueue/PriorityQueueSort.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,98 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.3 07/05/17
+ * @bug 4486658
+ * @summary Checks that a priority queue returns elements in sorted order across various operations
+ */
+
+import java.util.*;
+
+public class PriorityQueueSort {
+
+    static class MyComparator implements Comparator<Integer> {
+        public int compare(Integer x, Integer y) {
+            int i = x.intValue();
+            int j = y.intValue();
+            if (i < j) return -1;
+            if (i > j) return 1;
+            return 0;
+        }
+    }
+
+    public static void main(String[] args) {
+        int n = 10000;
+        if (args.length > 0)
+            n = Integer.parseInt(args[0]);
+
+        List<Integer> sorted = new ArrayList<Integer>(n);
+        for (int i = 0; i < n; i++)
+            sorted.add(new Integer(i));
+        List<Integer> shuffled = new ArrayList<Integer>(sorted);
+        Collections.shuffle(shuffled);
+
+        Queue<Integer> pq = new PriorityQueue<Integer>(n, new MyComparator());
+        for (Iterator<Integer> i = shuffled.iterator(); i.hasNext(); )
+            pq.add(i.next());
+
+        List<Integer> recons = new ArrayList<Integer>();
+        while (!pq.isEmpty())
+            recons.add(pq.remove());
+        if (!recons.equals(sorted))
+            throw new RuntimeException("Sort test failed");
+
+        recons.clear();
+        pq = new PriorityQueue<Integer>(shuffled);
+        while (!pq.isEmpty())
+            recons.add(pq.remove());
+        if (!recons.equals(sorted))
+            throw new RuntimeException("Sort test failed");
+
+        // Remove all odd elements from queue
+        pq = new PriorityQueue<Integer>(shuffled);
+        for (Iterator<Integer> i = pq.iterator(); i.hasNext(); )
+            if ((i.next().intValue() & 1) == 1)
+                i.remove();
+        recons.clear();
+        while (!pq.isEmpty())
+            recons.add(pq.remove());
+
+        for (Iterator<Integer> i = sorted.iterator(); i.hasNext(); )
+            if ((i.next().intValue() & 1) == 1)
+                i.remove();
+
+        if (!recons.equals(sorted))
+            throw new RuntimeException("Iterator remove test failed.");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/Random/DistinctSeeds.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,50 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.2 07/05/17
+ * @bug 4949279
+ * @summary Independent instantiations of Random() have distinct seeds.
+ */
+
+import java.util.Random;
+
+public class DistinctSeeds {
+    public static void main(String[] args) throws Exception {
+	// Strictly speaking, it is possible for these to randomly fail,
+	// but the probability should be *extremely* small (< 2**-63).
+	if (new Random().nextLong() == new Random().nextLong() ||
+	    new Random().nextLong() == new Random().nextLong())
+            throw new RuntimeException("Random() seeds not unique.");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/BlockingQueue/CancelledProducerConsumerLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,190 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.6 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 CancelledProducerConsumerLoops.java
+ * @run main/timeout=7000 CancelledProducerConsumerLoops
+ * @summary Checks for responsiveness of blocking queues to cancellation.
+ * Runs under the assumption that ITERS computations require more than
+ * TIMEOUT msecs to complete.
+ */
+
+import java.util.concurrent.*;
+
+public class CancelledProducerConsumerLoops {
+    static final int CAPACITY =      100;
+    static final long TIMEOUT = 100;
+
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static boolean print = false;
+
+    public static void main(String[] args) throws Exception {
+        int maxPairs = 8;
+        int iters = 1000000;
+
+        if (args.length > 0)
+            maxPairs = Integer.parseInt(args[0]);
+
+        print = true;
+
+        for (int i = 1; i <= maxPairs; i += (i+1) >>> 1) {
+            System.out.println("Pairs:" + i);
+            try {
+                oneTest(i, iters);
+            }
+            catch (BrokenBarrierException bb) {
+                // OK, ignore
+            }
+            Thread.sleep(100);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+   }
+
+    static void oneRun(BlockingQueue<Integer> q, int npairs, int iters) throws Exception {
+        LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        CyclicBarrier barrier = new CyclicBarrier(npairs * 2 + 1, timer);
+        Future[] prods = new Future[npairs];
+        Future[] cons = new Future[npairs];
+
+        for (int i = 0; i < npairs; ++i) {
+            prods[i] = pool.submit(new Producer(q, barrier, iters));
+            cons[i] = pool.submit(new Consumer(q, barrier, iters));
+        }
+        barrier.await();
+        Thread.sleep(TIMEOUT);
+        boolean tooLate = false;
+
+        for (int i = 1; i < npairs; ++i) {
+            if (!prods[i].cancel(true))
+                tooLate = true;
+            if (!cons[i].cancel(true))
+                tooLate = true;
+        }
+
+        Object p0 = prods[0].get();
+        Object c0 = cons[0].get();
+
+        if (!tooLate) {
+            for (int i = 1; i < npairs; ++i) {
+                if (!prods[i].isDone() || !prods[i].isCancelled())
+                    throw new Error("Only one producer thread should complete");
+                if (!cons[i].isDone() || !cons[i].isCancelled())
+                    throw new Error("Only one consumer thread should complete");
+            }
+        }
+        else
+            System.out.print("(cancelled too late) ");
+
+        long endTime = System.nanoTime();
+        long time = endTime - timer.startTime;
+        if (print) {
+            double secs = (double)(time) / 1000000000.0;
+            System.out.println("\t " + secs + "s run time");
+        }
+    }
+
+    static void oneTest(int pairs, int iters) throws Exception {
+
+        if (print)
+            System.out.print("ArrayBlockingQueue      ");
+        oneRun(new ArrayBlockingQueue<Integer>(CAPACITY), pairs, iters);
+
+        if (print)
+            System.out.print("LinkedBlockingQueue     ");
+        oneRun(new LinkedBlockingQueue<Integer>(CAPACITY), pairs, iters);
+
+        if (print)
+            System.out.print("SynchronousQueue        ");
+        oneRun(new SynchronousQueue<Integer>(), pairs, iters / 8);
+
+        /* PriorityBlockingQueue is unbounded
+        if (print)
+            System.out.print("PriorityBlockingQueue   ");
+        oneRun(new PriorityBlockingQueue<Integer>(iters / 2 * pairs), pairs, iters / 4);
+        */
+    }
+
+    static abstract class Stage implements Callable<Integer> {
+        final BlockingQueue<Integer> queue;
+        final CyclicBarrier barrier;
+        final int iters;
+        Stage (BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            queue = q;
+            barrier = b;
+            this.iters = iters;
+        }
+    }
+
+    static class Producer extends Stage {
+        Producer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public Integer call() throws Exception {
+            barrier.await();
+            int s = 0;
+            int l = 4321;
+            for (int i = 0; i < iters; ++i) {
+                l = LoopHelpers.compute1(l);
+                s += LoopHelpers.compute2(l);
+                if (!queue.offer(new Integer(l), 1, TimeUnit.SECONDS))
+                    break;
+            }
+            return new Integer(s);
+        }
+    }
+
+    static class Consumer extends Stage {
+        Consumer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public Integer call() throws Exception {
+            barrier.await();
+            int l = 0;
+            int s = 0;
+            for (int i = 0; i < iters; ++i) {
+                Integer x = queue.poll(1, TimeUnit.SECONDS);
+                if (x == null)
+                    break;
+                l = LoopHelpers.compute1(x.intValue());
+                s += l;
+            }
+            return new Integer(s);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/BlockingQueue/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,129 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/BlockingQueue/MultipleProducersSingleConsumerLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,191 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.4 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 MultipleProducersSingleConsumerLoops.java
+ * @run main/timeout=3600 MultipleProducersSingleConsumerLoops
+ * @summary  multiple producers and single consumer using blocking queues
+ */
+
+import java.util.concurrent.*;
+
+public class MultipleProducersSingleConsumerLoops {
+    static final int CAPACITY =      100;
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static boolean print = false;
+    static int producerSum;
+    static int consumerSum;
+
+    static synchronized void addProducerSum(int x) {
+        producerSum += x;
+    }
+
+    static synchronized void addConsumerSum(int x) {
+        consumerSum += x;
+    }
+
+    static synchronized void checkSum() {
+        if (producerSum != consumerSum)
+            throw new Error("CheckSum mismatch");
+    }
+
+    public static void main(String[] args) throws Exception {
+        int maxProducers = 5;
+        int iters = 100000;
+
+        if (args.length > 0)
+            maxProducers = Integer.parseInt(args[0]);
+
+        print = false;
+        System.out.println("Warmup...");
+        oneTest(1, 10000);
+        Thread.sleep(100);
+        oneTest(2, 10000);
+        Thread.sleep(100);
+        print = true;
+
+        for (int i = 1; i <= maxProducers; i += (i+1) >>> 1) {
+            System.out.println("Producers:" + i);
+            oneTest(i, iters);
+            Thread.sleep(100);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+   }
+
+    static void oneTest(int producers, int iters) throws Exception {
+        if (print)
+            System.out.print("ArrayBlockingQueue      ");
+        oneRun(new ArrayBlockingQueue<Integer>(CAPACITY), producers, iters);
+
+        if (print)
+            System.out.print("LinkedBlockingQueue     ");
+        oneRun(new LinkedBlockingQueue<Integer>(CAPACITY), producers, iters);
+
+        // Don't run PBQ since can legitimately run out of memory
+        //        if (print)
+        //            System.out.print("PriorityBlockingQueue   ");
+        //        oneRun(new PriorityBlockingQueue<Integer>(), producers, iters);
+
+        if (print)
+            System.out.print("SynchronousQueue        ");
+        oneRun(new SynchronousQueue<Integer>(), producers, iters);
+
+        if (print)
+            System.out.print("SynchronousQueue(fair)  ");
+        oneRun(new SynchronousQueue<Integer>(true), producers, iters);
+
+        if (print)
+            System.out.print("ArrayBlockingQueue(fair)");
+        oneRun(new ArrayBlockingQueue<Integer>(CAPACITY, true), producers, iters);
+    }
+
+    static abstract class Stage implements Runnable {
+        final int iters;
+        final BlockingQueue<Integer> queue;
+        final CyclicBarrier barrier;
+        Stage (BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            queue = q;
+            barrier = b;
+            this.iters = iters;
+        }
+    }
+
+    static class Producer extends Stage {
+        Producer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                int s = 0;
+                int l = hashCode();
+                for (int i = 0; i < iters; ++i) {
+                    l = LoopHelpers.compute1(l);
+                    l = LoopHelpers.compute2(l);
+                    queue.put(new Integer(l));
+                    s += l;
+                }
+                addProducerSum(s);
+                barrier.await();
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                return;
+            }
+        }
+    }
+
+    static class Consumer extends Stage {
+        Consumer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                int s = 0;
+                for (int i = 0; i < iters; ++i) {
+                    s += queue.take().intValue();
+                }
+                addConsumerSum(s);
+                barrier.await();
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                return;
+            }
+        }
+
+    }
+
+    static void oneRun(BlockingQueue<Integer> q, int nproducers, int iters) throws Exception {
+        LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        CyclicBarrier barrier = new CyclicBarrier(nproducers + 2, timer);
+        for (int i = 0; i < nproducers; ++i) {
+            pool.execute(new Producer(q, barrier, iters));
+        }
+        pool.execute(new Consumer(q, barrier, iters * nproducers));
+        barrier.await();
+        barrier.await();
+        long time = timer.getTime();
+        checkSum();
+        if (print)
+            System.out.println("\t: " + LoopHelpers.rightJustify(time / (iters * nproducers)) + " ns per transfer");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/BlockingQueue/PollMemoryLeak.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,59 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.2 07/05/17
+ * @bug 6236036 6264015
+ * @compile PollMemoryLeak.java
+ * @run main/othervm -Xmx8m PollMemoryLeak
+ * @summary  Checks for OutOfMemoryError when an unbounded
+ * number of aborted timed waits occur without a signal.
+ */
+
+import java.util.concurrent.*;
+
+public class PollMemoryLeak {
+    public static void main(String[] args) throws InterruptedException {
+	final BlockingQueue[] qs = {
+	    new LinkedBlockingQueue(10),
+	    new ArrayBlockingQueue(10),
+	    new SynchronousQueue(),
+	    new SynchronousQueue(true),
+	};
+        final long start = System.currentTimeMillis();
+        final long end = start + 10 * 1000;
+        while (System.currentTimeMillis() < end)
+	    for (BlockingQueue q : qs)
+		q.poll(1, TimeUnit.NANOSECONDS);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/BlockingQueue/ProducerConsumerLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,191 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.4 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 ProducerConsumerLoops.java
+ * @run main/timeout=3600 ProducerConsumerLoops
+ * @summary  multiple producers and consumers using blocking queues
+ */
+
+import java.util.concurrent.*;
+
+public class ProducerConsumerLoops {
+    static final int CAPACITY =      100;
+
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static boolean print = false;
+    static int producerSum;
+    static int consumerSum;
+    static synchronized void addProducerSum(int x) {
+        producerSum += x;
+    }
+
+    static synchronized void addConsumerSum(int x) {
+        consumerSum += x;
+    }
+
+    static synchronized void checkSum() {
+        if (producerSum != consumerSum)
+            throw new Error("CheckSum mismatch");
+    }
+
+    public static void main(String[] args) throws Exception {
+        int maxPairs = 8;
+        int iters = 10000;
+
+        if (args.length > 0)
+            maxPairs = Integer.parseInt(args[0]);
+
+        print = false;
+        System.out.println("Warmup...");
+        oneTest(1, 10000);
+        Thread.sleep(100);
+        oneTest(2, 10000);
+        Thread.sleep(100);
+        print = true;
+
+        for (int i = 1; i <= maxPairs; i += (i+1) >>> 1) {
+            System.out.println("Pairs:" + i);
+            oneTest(i, iters);
+            Thread.sleep(100);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+   }
+
+    static void oneTest(int pairs, int iters) throws Exception {
+        if (print)
+            System.out.print("ArrayBlockingQueue      ");
+        oneRun(new ArrayBlockingQueue<Integer>(CAPACITY), pairs, iters);
+
+        if (print)
+            System.out.print("LinkedBlockingQueue     ");
+        oneRun(new LinkedBlockingQueue<Integer>(CAPACITY), pairs, iters);
+
+        if (print)
+            System.out.print("PriorityBlockingQueue   ");
+        oneRun(new PriorityBlockingQueue<Integer>(), pairs, iters);
+
+        if (print)
+            System.out.print("SynchronousQueue        ");
+        oneRun(new SynchronousQueue<Integer>(), pairs, iters);
+
+        if (print)
+            System.out.print("SynchronousQueue(fair)  ");
+        oneRun(new SynchronousQueue<Integer>(true), pairs, iters);
+
+        if (print)
+            System.out.print("ArrayBlockingQueue(fair)");
+        oneRun(new ArrayBlockingQueue<Integer>(CAPACITY, true), pairs, iters);
+    }
+
+    static abstract class Stage implements Runnable {
+        final int iters;
+        final BlockingQueue<Integer> queue;
+        final CyclicBarrier barrier;
+        Stage (BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            queue = q;
+            barrier = b;
+            this.iters = iters;
+        }
+    }
+
+    static class Producer extends Stage {
+        Producer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                int s = 0;
+                int l = hashCode();
+                for (int i = 0; i < iters; ++i) {
+                    l = LoopHelpers.compute2(l);
+                    queue.put(new Integer(l));
+                    s += LoopHelpers.compute1(l);
+                }
+                addProducerSum(s);
+                barrier.await();
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                return;
+            }
+        }
+    }
+
+    static class Consumer extends Stage {
+        Consumer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                int l = 0;
+                int s = 0;
+                for (int i = 0; i < iters; ++i) {
+                    l = LoopHelpers.compute1(queue.take().intValue());
+                    s += l;
+                }
+                addConsumerSum(s);
+                barrier.await();
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                return;
+            }
+        }
+
+    }
+
+    static void oneRun(BlockingQueue<Integer> q, int npairs, int iters) throws Exception {
+        LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        CyclicBarrier barrier = new CyclicBarrier(npairs * 2 + 1, timer);
+        for (int i = 0; i < npairs; ++i) {
+            pool.execute(new Producer(q, barrier, iters));
+            pool.execute(new Consumer(q, barrier, iters));
+        }
+        barrier.await();
+        barrier.await();
+        long time = timer.getTime();
+        checkSum();
+        if (print)
+            System.out.println("\t: " + LoopHelpers.rightJustify(time / (iters * npairs)) + " ns per transfer");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/BlockingQueue/SingleProducerMultipleConsumerLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,179 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.4 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 SingleProducerMultipleConsumerLoops.java
+ * @run main/timeout=600 SingleProducerMultipleConsumerLoops
+ * @summary  check ordering for blocking queues with 1 producer and multiple consumers
+ */
+
+import java.util.concurrent.*;
+
+public class SingleProducerMultipleConsumerLoops {
+    static final int CAPACITY =      100;
+
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static boolean print = false;
+
+    public static void main(String[] args) throws Exception {
+        int maxConsumers = 5;
+        int iters = 10000;
+
+        if (args.length > 0)
+            maxConsumers = Integer.parseInt(args[0]);
+
+        print = false;
+        System.out.println("Warmup...");
+        oneTest(1, 10000);
+        Thread.sleep(100);
+        oneTest(2, 10000);
+        Thread.sleep(100);
+        print = true;
+
+        for (int i = 1; i <= maxConsumers; i += (i+1) >>> 1) {
+            System.out.println("Consumers:" + i);
+            oneTest(i, iters);
+            Thread.sleep(100);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+   }
+
+    static void oneTest(int consumers, int iters) throws Exception {
+        if (print)
+            System.out.print("ArrayBlockingQueue      ");
+        oneRun(new ArrayBlockingQueue<Integer>(CAPACITY), consumers, iters);
+
+        if (print)
+            System.out.print("LinkedBlockingQueue     ");
+        oneRun(new LinkedBlockingQueue<Integer>(CAPACITY), consumers, iters);
+
+        if (print)
+            System.out.print("PriorityBlockingQueue   ");
+        oneRun(new PriorityBlockingQueue<Integer>(), consumers, iters);
+
+        if (print)
+            System.out.print("SynchronousQueue        ");
+        oneRun(new SynchronousQueue<Integer>(), consumers, iters);
+
+        if (print)
+            System.out.print("SynchronousQueue(fair)  ");
+        oneRun(new SynchronousQueue<Integer>(true), consumers, iters);
+
+        if (print)
+            System.out.print("ArrayBlockingQueue(fair)");
+        oneRun(new ArrayBlockingQueue<Integer>(CAPACITY, true), consumers, iters);
+    }
+
+    static abstract class Stage implements Runnable {
+        final int iters;
+        final BlockingQueue<Integer> queue;
+        final CyclicBarrier barrier;
+        volatile int result;
+        Stage (BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            queue = q;
+            barrier = b;
+            this.iters = iters;
+        }
+    }
+
+    static class Producer extends Stage {
+        Producer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                for (int i = 0; i < iters; ++i) {
+                    queue.put(new Integer(i));
+                }
+                barrier.await();
+                result = 432;
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                return;
+            }
+        }
+    }
+
+    static class Consumer extends Stage {
+        Consumer(BlockingQueue<Integer> q, CyclicBarrier b, int iters) {
+            super(q, b, iters);
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                int l = 0;
+                int s = 0;
+                int last = -1;
+                for (int i = 0; i < iters; ++i) {
+                    Integer item = queue.take();
+                    int v = item.intValue();
+                    if (v < last)
+                        throw new Error("Out-of-Order transfer");
+                    last = v;
+                    l = LoopHelpers.compute1(v);
+                    s += l;
+                }
+                barrier.await();
+                result = s;
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                return;
+            }
+        }
+
+    }
+
+    static void oneRun(BlockingQueue<Integer> q, int nconsumers, int iters) throws Exception {
+        LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        CyclicBarrier barrier = new CyclicBarrier(nconsumers + 2, timer);
+        pool.execute(new Producer(q, barrier, iters * nconsumers));
+        for (int i = 0; i < nconsumers; ++i) {
+            pool.execute(new Consumer(q, barrier, iters));
+        }
+        barrier.await();
+        barrier.await();
+        long time = timer.getTime();
+        if (print)
+            System.out.println("\t: " + LoopHelpers.rightJustify(time / (iters * nconsumers)) + " ns per transfer");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/ConcurrentHashMap/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,129 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/ConcurrentHashMap/MapCheck.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,618 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.2 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 MapCheck.java
+ * @run main/timeout=240 MapCheck
+ * @summary Times and checks basic map operations
+ */
+
+import java.util.*;
+import java.io.*;
+
+public class MapCheck {
+
+    static final int absentSize = 1 << 17;
+    static final int absentMask = absentSize - 1;
+    static Object[] absent = new Object[absentSize];
+
+    static final Object MISSING = new Object();
+
+    static TestTimer timer = new TestTimer();
+
+    static void reallyAssert(boolean b) {
+        if (!b) throw new Error("Failed Assertion");
+    }
+
+    public static void main(String[] args) throws Exception {
+        Class mapClass = java.util.concurrent.ConcurrentHashMap.class;
+        int numTests = 8;
+        int size = 50000;
+
+        if (args.length > 0) {
+            try {
+                mapClass = Class.forName(args[0]);
+            } catch(ClassNotFoundException e) {
+                throw new RuntimeException("Class " + args[0] + " not found.");
+            }
+        }
+
+
+        if (args.length > 1)
+            numTests = Integer.parseInt(args[1]);
+
+        if (args.length > 2)
+            size = Integer.parseInt(args[2]);
+
+        boolean doSerializeTest = args.length > 3;
+
+        System.out.println("Testing " + mapClass.getName() + " trials: " + numTests + " size: " + size);
+
+        for (int i = 0; i < absentSize; ++i) absent[i] = new Object();
+
+        Object[] key = new Object[size];
+        for (int i = 0; i < size; ++i) key[i] = new Object();
+
+        forceMem(size * 8);
+
+        for (int rep = 0; rep < numTests; ++rep) {
+            runTest(newMap(mapClass), key);
+        }
+
+        TestTimer.printStats();
+
+
+        if (doSerializeTest)
+            stest(newMap(mapClass), size);
+    }
+
+    static Map newMap(Class cl) {
+        try {
+            Map m = (Map)cl.newInstance();
+            return m;
+        } catch(Exception e) {
+            throw new RuntimeException("Can't instantiate " + cl + ": " + e);
+        }
+    }
+
+
+    static void runTest(Map s, Object[] key) {
+        shuffle(key);
+        int size = key.length;
+        long startTime = System.currentTimeMillis();
+        test(s, key);
+        long time = System.currentTimeMillis() - startTime;
+    }
+
+    static void forceMem(int n) {
+        // force enough memory
+        Long[] junk = new Long[n];
+        for (int i = 0; i < junk.length; ++i) junk[i] = new Long(i);
+        int sum = 0;
+        for (int i = 0; i < junk.length; ++i)
+            sum += (int)(junk[i].longValue() + i);
+        if (sum == 0) System.out.println("Useless number = " + sum);
+        junk = null;
+        //        System.gc();
+    }
+
+
+    static void t1(String nm, int n, Map s, Object[] key, int expect) {
+        int sum = 0;
+        int iters = 4;
+        timer.start(nm, n * iters);
+        for (int j = 0; j < iters; ++j) {
+            for (int i = 0; i < n; i++) {
+                if (s.get(key[i]) != null) ++sum;
+            }
+        }
+        timer.finish();
+        reallyAssert (sum == expect * iters);
+    }
+
+    static void t2(String nm, int n, Map s, Object[] key, int expect) {
+        int sum = 0;
+        timer.start(nm, n);
+        for (int i = 0; i < n; i++) {
+            if (s.remove(key[i]) != null) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == expect);
+    }
+
+    static void t3(String nm, int n, Map s, Object[] key, int expect) {
+        int sum = 0;
+        timer.start(nm, n);
+        for (int i = 0; i < n; i++) {
+            if (s.put(key[i], absent[i & absentMask]) == null) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == expect);
+    }
+
+    static void t4(String nm, int n, Map s, Object[] key, int expect) {
+        int sum = 0;
+        timer.start(nm, n);
+        for (int i = 0; i < n; i++) {
+            if (s.containsKey(key[i])) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == expect);
+    }
+
+    static void t5(String nm, int n, Map s, Object[] key, int expect) {
+        int sum = 0;
+        timer.start(nm, n/2);
+        for (int i = n-2; i >= 0; i-=2) {
+            if (s.remove(key[i]) != null) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == expect);
+    }
+
+    static void t6(String nm, int n, Map s, Object[] k1, Object[] k2) {
+        int sum = 0;
+        timer.start(nm, n * 2);
+        for (int i = 0; i < n; i++) {
+            if (s.get(k1[i]) != null) ++sum;
+            if (s.get(k2[i & absentMask]) != null) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == n);
+    }
+
+    static void t7(String nm, int n, Map s, Object[] k1, Object[] k2) {
+        int sum = 0;
+        timer.start(nm, n * 2);
+        for (int i = 0; i < n; i++) {
+            if (s.containsKey(k1[i])) ++sum;
+            if (s.containsKey(k2[i & absentMask])) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == n);
+    }
+
+    static void t8(String nm, int n, Map s, Object[] key, int expect) {
+        int sum = 0;
+        timer.start(nm, n);
+        for (int i = 0; i < n; i++) {
+            if (s.get(key[i]) != null) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == expect);
+    }
+
+
+    static void t9(Map s) {
+        int sum = 0;
+        int iters = 20;
+        timer.start("ContainsValue (/n)     ", iters * s.size());
+        int step = absentSize / iters;
+        for (int i = 0; i < absentSize; i += step)
+            if (s.containsValue(absent[i])) ++sum;
+        timer.finish();
+        reallyAssert (sum != 0);
+    }
+
+
+    static void ktest(Map s, int size, Object[] key) {
+        timer.start("ContainsKey            ", size);
+        Set ks = s.keySet();
+        int sum = 0;
+        for (int i = 0; i < size; i++) {
+            if (ks.contains(key[i])) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+
+
+    static void ittest1(Map s, int size) {
+        int sum = 0;
+        timer.start("Iter Key               ", size);
+        for (Iterator it = s.keySet().iterator(); it.hasNext(); ) {
+            if(it.next() != MISSING)
+                ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+
+    static void ittest2(Map s, int size) {
+        int sum = 0;
+        timer.start("Iter Value             ", size);
+        for (Iterator it = s.values().iterator(); it.hasNext(); ) {
+            if(it.next() != MISSING)
+                ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+    static void ittest3(Map s, int size) {
+        int sum = 0;
+        timer.start("Iter Entry             ", size);
+        for (Iterator it = s.entrySet().iterator(); it.hasNext(); ) {
+            if(it.next() != MISSING)
+                ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+
+    static void ittest4(Map s, int size, int pos) {
+        IdentityHashMap seen = new IdentityHashMap(size);
+        reallyAssert (s.size() == size);
+        int sum = 0;
+        timer.start("Iter XEntry            ", size);
+        Iterator it = s.entrySet().iterator();
+        Object k = null;
+        Object v = null;
+        for (int i = 0; i < size-pos; ++i) {
+            Map.Entry x = (Map.Entry)(it.next());
+            k = x.getKey();
+            v = x.getValue();
+            seen.put(k, k);
+            if (x != MISSING)
+                ++sum;
+        }
+        reallyAssert (s.containsKey(k));
+        it.remove();
+        reallyAssert (!s.containsKey(k));
+        while (it.hasNext()) {
+            Map.Entry x = (Map.Entry)(it.next());
+            Object k2 = x.getKey();
+            seen.put(k2, k2);
+            if (x != MISSING)
+                ++sum;
+        }
+
+        reallyAssert (s.size() == size-1);
+        s.put(k, v);
+        reallyAssert (seen.size() == size);
+        timer.finish();
+        reallyAssert (sum == size);
+        reallyAssert (s.size() == size);
+    }
+
+
+    static void ittest(Map s, int size) {
+        ittest1(s, size);
+        ittest2(s, size);
+        ittest3(s, size);
+        //        for (int i = 0; i < size-1; ++i)
+        //            ittest4(s, size, i);
+    }
+
+    static void entest1(Hashtable ht, int size) {
+        int sum = 0;
+
+        timer.start("Iter Enumeration Key   ", size);
+        for (Enumeration en = ht.keys(); en.hasMoreElements(); ) {
+            if (en.nextElement() != MISSING)
+                ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+
+    static void entest2(Hashtable ht, int size) {
+        int sum = 0;
+        timer.start("Iter Enumeration Value ", size);
+        for (Enumeration en = ht.elements(); en.hasMoreElements(); ) {
+            if (en.nextElement() != MISSING)
+                ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+
+
+    static void entest3(Hashtable ht, int size) {
+        int sum = 0;
+
+        timer.start("Iterf Enumeration Key  ", size);
+        Enumeration en = ht.keys();
+        for (int i = 0; i < size; ++i) {
+            if (en.nextElement() != MISSING)
+                ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+
+    static void entest4(Hashtable ht, int size) {
+        int sum = 0;
+        timer.start("Iterf Enumeration Value", size);
+        Enumeration en = ht.elements();
+        for (int i = 0; i < size; ++i) {
+            if (en.nextElement() != MISSING)
+                ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+    }
+
+    static void entest(Map s, int size) {
+        if (s instanceof Hashtable) {
+            Hashtable ht = (Hashtable)s;
+            //            entest3(ht, size);
+            //            entest4(ht, size);
+            entest1(ht, size);
+            entest2(ht, size);
+            entest1(ht, size);
+            entest2(ht, size);
+            entest1(ht, size);
+            entest2(ht, size);
+        }
+    }
+
+    static void rtest(Map s, int size) {
+        timer.start("Remove (iterator)      ", size);
+        for (Iterator it = s.keySet().iterator(); it.hasNext(); ) {
+            it.next();
+            it.remove();
+        }
+        timer.finish();
+    }
+
+    static void rvtest(Map s, int size) {
+        timer.start("Remove (iterator)      ", size);
+        for (Iterator it = s.values().iterator(); it.hasNext(); ) {
+            it.next();
+            it.remove();
+        }
+        timer.finish();
+    }
+
+
+    static void dtest(Map s, int size, Object[] key) {
+        timer.start("Put (putAll)           ", size * 2);
+        Map s2 = null;
+        try {
+            s2 = (Map) (s.getClass().newInstance());
+            s2.putAll(s);
+        }
+        catch (Exception e) { e.printStackTrace(); return; }
+        timer.finish();
+
+        timer.start("Iter Equals            ", size * 2);
+        boolean eqt = s2.equals(s) && s.equals(s2);
+        reallyAssert (eqt);
+        timer.finish();
+
+        timer.start("Iter HashCode          ", size * 2);
+        int shc = s.hashCode();
+        int s2hc = s2.hashCode();
+        reallyAssert (shc == s2hc);
+        timer.finish();
+
+        timer.start("Put (present)          ", size);
+        s2.putAll(s);
+        timer.finish();
+
+        timer.start("Iter EntrySet contains ", size * 2);
+        Set es2 = s2.entrySet();
+        int sum = 0;
+        for (Iterator i1 = s.entrySet().iterator(); i1.hasNext(); ) {
+            Object entry = i1.next();
+            if (es2.contains(entry)) ++sum;
+        }
+        timer.finish();
+        reallyAssert (sum == size);
+
+        t6("Get                    ", size, s2, key, absent);
+
+        Object hold = s2.get(key[size-1]);
+        s2.put(key[size-1], absent[0]);
+        timer.start("Iter Equals            ", size * 2);
+        eqt = s2.equals(s) && s.equals(s2);
+        reallyAssert (!eqt);
+        timer.finish();
+
+        timer.start("Iter HashCode          ", size * 2);
+        int s1h = s.hashCode();
+        int s2h = s2.hashCode();
+        reallyAssert (s1h != s2h);
+        timer.finish();
+
+        s2.put(key[size-1], hold);
+        timer.start("Remove (iterator)      ", size * 2);
+        Iterator s2i = s2.entrySet().iterator();
+        Set es = s.entrySet();
+        while (s2i.hasNext())
+            es.remove(s2i.next());
+        timer.finish();
+
+        reallyAssert (s.isEmpty());
+
+        timer.start("Clear                  ", size);
+        s2.clear();
+        timer.finish();
+        reallyAssert (s2.isEmpty() && s.isEmpty());
+    }
+
+    static void stest(Map s, int size) throws Exception {
+        if (!(s instanceof Serializable))
+            return;
+        System.out.print("Serialize              : ");
+
+        for (int i = 0; i < size; i++) {
+            s.put(new Integer(i), Boolean.TRUE);
+        }
+
+        long startTime = System.currentTimeMillis();
+
+        FileOutputStream fs = new FileOutputStream("MapCheck.dat");
+        ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(fs));
+        out.writeObject(s);
+        out.close();
+
+        FileInputStream is = new FileInputStream("MapCheck.dat");
+        ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(is));
+        Map m = (Map)in.readObject();
+
+        long endTime = System.currentTimeMillis();
+        long time = endTime - startTime;
+
+        System.out.print(time + "ms");
+
+        if (s instanceof IdentityHashMap) return;
+        reallyAssert (s.equals(m));
+    }
+
+
+    static void test(Map s, Object[] key) {
+        int size = key.length;
+
+        t3("Put (absent)           ", size, s, key, size);
+        t3("Put (present)          ", size, s, key, 0);
+        t7("ContainsKey            ", size, s, key, absent);
+        t4("ContainsKey            ", size, s, key, size);
+        ktest(s, size, key);
+        t4("ContainsKey            ", absentSize, s, absent, 0);
+        t6("Get                    ", size, s, key, absent);
+        t1("Get (present)          ", size, s, key, size);
+        t1("Get (absent)           ", absentSize, s, absent, 0);
+        t2("Remove (absent)        ", absentSize, s, absent, 0);
+        t5("Remove (present)       ", size, s, key, size / 2);
+        t3("Put (half present)     ", size, s, key, size / 2);
+
+        ittest(s, size);
+        entest(s, size);
+        t9(s);
+        rtest(s, size);
+
+        t4("ContainsKey            ", size, s, key, 0);
+        t2("Remove (absent)        ", size, s, key, 0);
+        t3("Put (presized)         ", size, s, key, size);
+        dtest(s, size, key);
+    }
+
+    static class TestTimer {
+        private String name;
+        private long numOps;
+        private long startTime;
+        private String cname;
+
+        static final java.util.TreeMap accum = new java.util.TreeMap();
+
+        static void printStats() {
+            for (Iterator it = accum.entrySet().iterator(); it.hasNext(); ) {
+                Map.Entry e = (Map.Entry)(it.next());
+                Stats stats = ((Stats)(e.getValue()));
+                int n = stats.number;
+                double t;
+                if (n > 0)
+                    t = stats.sum / n;
+                else
+                    t = stats.least;
+                long nano = Math.round(1000000.0 * t);
+                System.out.println(e.getKey() + ": " + nano);
+            }
+        }
+
+        void start(String name, long numOps) {
+            this.name = name;
+            this.cname = classify();
+            this.numOps = numOps;
+            startTime = System.currentTimeMillis();
+        }
+
+
+        String classify() {
+            if (name.startsWith("Get"))
+                return "Get                    ";
+            else if (name.startsWith("Put"))
+                return "Put                    ";
+            else if (name.startsWith("Remove"))
+                return "Remove                 ";
+            else if (name.startsWith("Iter"))
+                return "Iter                   ";
+            else
+                return null;
+        }
+
+        void finish() {
+            long endTime = System.currentTimeMillis();
+            long time = endTime - startTime;
+            double timePerOp = ((double)time)/numOps;
+
+            Object st = accum.get(name);
+            if (st == null)
+                accum.put(name, new Stats(timePerOp));
+            else {
+                Stats stats = (Stats) st;
+                stats.sum += timePerOp;
+                stats.number++;
+                if (timePerOp < stats.least) stats.least = timePerOp;
+            }
+
+            if (cname != null) {
+                st = accum.get(cname);
+                if (st == null)
+                    accum.put(cname, new Stats(timePerOp));
+                else {
+                    Stats stats = (Stats) st;
+                    stats.sum += timePerOp;
+                    stats.number++;
+                    if (timePerOp < stats.least) stats.least = timePerOp;
+                }
+            }
+
+        }
+
+    }
+
+    static class Stats {
+        double sum = 0;
+        double least;
+        int number = 0;
+        Stats(double t) { least = t; }
+    }
+
+    static Random rng = new Random();
+
+    static void shuffle(Object[] keys) {
+        int size = keys.length;
+        for (int i=size; i>1; i--) {
+            int r = rng.nextInt(i);
+            Object t = keys[i-1];
+            keys[i-1] = keys[r];
+            keys[r] = t;
+        }
+    }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/ConcurrentHashMap/MapLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,236 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.5 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 MapLoops.java
+ * @run main/timeout=1600 MapLoops
+ * @summary Exercise multithreaded maps, by default ConcurrentHashMap.
+ * Multithreaded hash table test.  Each thread does a random walk
+ * though elements of "key" array. On each iteration, it checks if
+ * table includes key.  If absent, with probability pinsert it
+ * inserts it, and if present, with probability premove it removes
+ * it.  (pinsert and premove are expressed as percentages to simplify
+ * parsing from command line.)
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+
+public class MapLoops {
+    static int nkeys       = 10000;
+    static int pinsert     = 60;
+    static int premove     = 2;
+    static int maxThreads  = 100;
+    static int nops        = 100000;
+    static int removesPerMaxRandom;
+    static int insertsPerMaxRandom;
+
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+
+    static final List<Throwable> throwables
+	= new CopyOnWriteArrayList<Throwable>();
+
+    public static void main(String[] args) throws Exception {
+
+        Class mapClass = null;
+        if (args.length > 0) {
+            try {
+                mapClass = Class.forName(args[0]);
+            } catch (ClassNotFoundException e) {
+                throw new RuntimeException("Class " + args[0] + " not found.");
+            }
+        }
+        else
+            mapClass = java.util.concurrent.ConcurrentHashMap.class;
+
+        if (args.length > 1)
+            maxThreads = Integer.parseInt(args[1]);
+
+        if (args.length > 2)
+            nkeys = Integer.parseInt(args[2]);
+
+        if (args.length > 3)
+            pinsert = Integer.parseInt(args[3]);
+
+        if (args.length > 4)
+            premove = Integer.parseInt(args[4]);
+
+        if (args.length > 5)
+            nops = Integer.parseInt(args[5]);
+
+        // normalize probabilities wrt random number generator
+        removesPerMaxRandom = (int)(((double)premove/100.0 * 0x7FFFFFFFL));
+        insertsPerMaxRandom = (int)(((double)pinsert/100.0 * 0x7FFFFFFFL));
+
+        System.out.print("Class: " + mapClass.getName());
+        System.out.print(" threads: " + maxThreads);
+        System.out.print(" size: " + nkeys);
+        System.out.print(" ins: " + pinsert);
+        System.out.print(" rem: " + premove);
+        System.out.print(" ops: " + nops);
+        System.out.println();
+
+        int k = 1;
+        int warmups = 2;
+        for (int i = 1; i <= maxThreads;) {
+            Thread.sleep(100);
+            test(i, nkeys, mapClass);
+            if (warmups > 0)
+                --warmups;
+            else if (i == k) {
+                k = i << 1;
+                i = i + (i >>> 1);
+            }
+            else if (i == 1 && k == 2) {
+                i = k;
+                warmups = 1;
+            }
+            else
+                i = k;
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+
+	if (! throwables.isEmpty())
+	    throw new Error
+		(throwables.size() + " thread(s) terminated abruptly.");
+    }
+
+    static Integer[] makeKeys(int n) {
+        LoopHelpers.SimpleRandom rng = new LoopHelpers.SimpleRandom();
+        Integer[] key = new Integer[n];
+        for (int i = 0; i < key.length; ++i)
+            key[i] = new Integer(rng.next());
+        return key;
+    }
+
+    static void shuffleKeys(Integer[] key) {
+        Random rng = new Random();
+        for (int i = key.length; i > 1; --i) {
+            int j = rng.nextInt(i);
+            Integer tmp = key[j];
+            key[j] = key[i-1];
+            key[i-1] = tmp;
+        }
+    }
+
+    static void test(int i, int nkeys, Class mapClass) throws Exception {
+        System.out.print("Threads: " + i + "\t:");
+        Map<Integer, Integer> map = (Map<Integer,Integer>)mapClass.newInstance();
+        Integer[] key = makeKeys(nkeys);
+        // Uncomment to start with a non-empty table
+        //        for (int j = 0; j < nkeys; j += 4) // start 1/4 occupied
+        //            map.put(key[j], key[j]);
+        LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        CyclicBarrier barrier = new CyclicBarrier(i+1, timer);
+        for (int t = 0; t < i; ++t)
+            pool.execute(new Runner(map, key, barrier));
+        barrier.await();
+        barrier.await();
+        long time = timer.getTime();
+        long tpo = time / (i * (long)nops);
+        System.out.print(LoopHelpers.rightJustify(tpo) + " ns per op");
+        double secs = (double)(time) / 1000000000.0;
+        System.out.println("\t " + secs + "s run time");
+        map.clear();
+    }
+
+    static class Runner implements Runnable {
+        final Map<Integer,Integer> map;
+        final Integer[] key;
+        final LoopHelpers.SimpleRandom rng = new LoopHelpers.SimpleRandom();
+        final CyclicBarrier barrier;
+        int position;
+        int total;
+
+        Runner(Map<Integer,Integer> map, Integer[] key,  CyclicBarrier barrier) {
+            this.map = map;
+            this.key = key;
+            this.barrier = barrier;
+            position = key.length / 2;
+        }
+
+        int step() {
+            // random-walk around key positions,  bunching accesses
+            int r = rng.next();
+            position += (r & 7) - 3;
+            while (position >= key.length) position -= key.length;
+            while (position < 0) position += key.length;
+
+            Integer k = key[position];
+            Integer x = map.get(k);
+
+            if (x != null) {
+                if (x.intValue() != k.intValue())
+                    throw new Error("bad mapping: " + x + " to " + k);
+
+                if (r < removesPerMaxRandom) {
+                    if (map.remove(k) != null) {
+                        position = total % key.length; // move from position
+                        return 2;
+                    }
+                }
+            }
+            else if (r < insertsPerMaxRandom) {
+                ++position;
+                map.put(k, k);
+                return 2;
+            }
+
+            // Uncomment to add a little computation between accesses
+            //            total += LoopHelpers.compute1(k.intValue());
+            total += r;
+            return 1;
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                int ops = nops;
+                while (ops > 0)
+                    ops -= step();
+                barrier.await();
+            }
+            catch (Throwable throwable) {
+		synchronized(System.err) {
+		    System.err.println("--------------------------------");
+		    throwable.printStackTrace();
+		}
+		throwables.add(throwable);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/ConcurrentLinkedQueue/ConcurrentQueueLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,146 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.4 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 ConcurrentQueueLoops.java
+ * @run main/timeout=230 ConcurrentQueueLoops
+ * @summary Checks that a set of threads can repeatedly get and modify items
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+public class ConcurrentQueueLoops {
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static AtomicInteger totalItems;
+    static boolean print = false;
+
+    public static void main(String[] args) throws Exception {
+        int maxStages = 8;
+        int items = 100000;
+
+        if (args.length > 0)
+            maxStages = Integer.parseInt(args[0]);
+
+        print = false;
+        System.out.println("Warmup...");
+        oneRun(1, items);
+        Thread.sleep(100);
+        oneRun(1, items);
+        Thread.sleep(100);
+        print = true;
+
+        for (int i = 1; i <= maxStages; i += (i+1) >>> 1) {
+            oneRun(i, items);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+   }
+
+    static class Stage implements Callable<Integer> {
+        final Queue<Integer> queue;
+        final CyclicBarrier barrier;
+        int items;
+        Stage (Queue<Integer> q, CyclicBarrier b, int items) {
+            queue = q;
+            barrier = b;
+            this.items = items;
+        }
+
+        public Integer call() {
+            // Repeatedly take something from queue if possible,
+            // transform it, and put back in.
+            try {
+                barrier.await();
+                int l = 4321;
+                int takes = 0;
+                for (;;) {
+                    Integer item = queue.poll();
+                    if (item != null) {
+                        ++takes;
+                        l = LoopHelpers.compute2(item.intValue());
+                    }
+                    else if (takes != 0) {
+                        totalItems.getAndAdd(-takes);
+                        takes = 0;
+                    }
+                    else if (totalItems.get() <= 0)
+                        break;
+                    l = LoopHelpers.compute1(l);
+                    if (items > 0) {
+                        --items;
+                        queue.offer(new Integer(l));
+                    }
+                    else if ( (l & (3 << 5)) == 0) // spinwait
+                        Thread.sleep(1);
+                }
+                return new Integer(l);
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                throw new Error("Call loop failed");
+            }
+        }
+    }
+
+    static void oneRun(int n, int items) throws Exception {
+        Queue<Integer> q = new ConcurrentLinkedQueue<Integer>();
+        LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        CyclicBarrier barrier = new CyclicBarrier(n + 1, timer);
+        totalItems = new AtomicInteger(n * items);
+        ArrayList<Future<Integer>> results = new ArrayList<Future<Integer>>(n);
+        for (int i = 0; i < n; ++i)
+            results.add(pool.submit(new Stage(q, barrier, items)));
+
+        if (print)
+            System.out.print("Threads: " + n + "\t:");
+        barrier.await();
+        int total = 0;
+        for (int i = 0; i < n; ++i) {
+            Future<Integer> f = results.get(i);
+            Integer r = f.get();
+            total += r.intValue();
+        }
+        long endTime = System.nanoTime();
+        long time = endTime - timer.startTime;
+        if (print)
+            System.out.println(LoopHelpers.rightJustify(time / (items * n)) + " ns per item");
+        if (total == 0) // avoid overoptimization
+            System.out.println("useless result: " + total);
+
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/ConcurrentLinkedQueue/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,129 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/Exchanger/ExchangeLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,138 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.3 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 ExchangeLoops.java
+ * @run main/timeout=720 ExchangeLoops
+ * @summary checks to make sure a pipeline of exchangers passes data.
+ */
+
+import java.util.concurrent.*;
+
+public class ExchangeLoops {
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static boolean print = false;
+
+    static class Int {
+        public int value;
+        Int(int i) { value = i; }
+    }
+
+
+    public static void main(String[] args) throws Exception {
+        int maxStages = 5;
+        int iters = 10000;
+
+        if (args.length > 0)
+            maxStages = Integer.parseInt(args[0]);
+
+        print = false;
+        System.out.println("Warmup...");
+        oneRun(2, 100000);
+        print = true;
+
+        for (int i = 2; i <= maxStages; i += (i+1) >>> 1) {
+            System.out.print("Threads: " + i + "\t: ");
+            oneRun(i, iters);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+   }
+
+    static class Stage implements Runnable {
+        final int iters;
+        final Exchanger<Int> left;
+        final Exchanger<Int> right;
+        final CyclicBarrier barrier;
+        volatile int result;
+        Stage (Exchanger<Int> left,
+               Exchanger<Int> right,
+               CyclicBarrier b, int iters) {
+            this.left = left;
+            this.right = right;
+            barrier = b;
+            this.iters = iters;
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                Int item = new Int(hashCode());
+                for (int i = 0; i < iters; ++i) {
+                    if (left != null) {
+                        item.value = LoopHelpers.compute1(item.value);
+                        Int other = left.exchange(item);
+                        if (other == item || other == null)
+                            throw new Error("Failed Exchange");
+                        item = other;
+
+                    }
+                    if (right != null) {
+                        item.value = LoopHelpers.compute2(item.value);
+                        Int other = right.exchange(item);
+                        if (other == item || other == null)
+                            throw new Error("Failed Exchange");
+                        item = other;
+                    }
+                }
+                barrier.await();
+
+            }
+            catch (Exception ie) {
+                ie.printStackTrace();
+                return;
+            }
+        }
+    }
+
+    static void oneRun(int nthreads, int iters) throws Exception {
+        LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        CyclicBarrier barrier = new CyclicBarrier(nthreads + 1, timer);
+        Exchanger<Int> l = null;
+        Exchanger<Int> r = new Exchanger<Int>();
+        for (int i = 0; i < nthreads; ++i) {
+            pool.execute(new Stage(l, r, barrier, iters));
+            l = r;
+            r = (i+2 < nthreads) ? new Exchanger<Int>() : null;
+        }
+        barrier.await();
+        barrier.await();
+        long time = timer.getTime();
+        if (print)
+            System.out.println(LoopHelpers.rightJustify(time / (iters * nthreads + iters * (nthreads-2))) + " ns per transfer");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/Exchanger/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,128 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/ExecutorCompletionService/ExecutorCompletionServiceLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,119 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.3 07/05/17
+ * @bug 4965960
+ * @compile -source 1.5 ExecutorCompletionServiceLoops.java
+ * @run main/timeout=3600 ExecutorCompletionServiceLoops
+ * @summary  Exercise ExecutorCompletionServiceLoops
+ */
+
+import java.util.concurrent.*;
+
+public class ExecutorCompletionServiceLoops {
+    static final int POOLSIZE =      100;
+    static final ExecutorService pool =
+        Executors.newFixedThreadPool(POOLSIZE);
+    static final ExecutorCompletionService<Integer> ecs =
+        new ExecutorCompletionService<Integer>(pool);
+    static boolean print = false;
+
+    public static void main(String[] args) throws Exception {
+        int max = 8;
+        int base = 10000;
+
+        if (args.length > 0)
+            max = Integer.parseInt(args[0]);
+
+        System.out.println("Warmup...");
+        oneTest( base );
+        Thread.sleep(100);
+        print = true;
+
+        for (int i = 1; i <= max; i += (i+1) >>> 1) {
+            System.out.print("n: " + i * base);
+            oneTest(i * base );
+            Thread.sleep(100);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+   }
+
+    static class Task implements Callable<Integer> {
+        public Integer call() {
+            int s = 0;
+            int l = System.identityHashCode(this);
+            for (int i = 0; i < 5; ++i) {
+                l = LoopHelpers.compute2(l);
+                s += LoopHelpers.compute1(l);
+            }
+            return new Integer(s);
+        }
+    }
+
+    static class Producer implements Runnable {
+        final ExecutorCompletionService cs;
+        final int iters;
+        Producer(ExecutorCompletionService ecs, int i) {
+            cs = ecs;
+            iters = i;
+        }
+        public void run() {
+            for (int i = 0; i < iters; ++i)
+                ecs.submit(new Task());
+        }
+    }
+
+    static void oneTest(int iters) throws Exception {
+        long startTime = System.nanoTime();
+        new Thread(new Producer(ecs, iters)).start();
+
+        int r = 0;
+        for (int i = 0; i < iters; ++i)
+            r += ecs.take().get().intValue();
+
+        long elapsed = System.nanoTime() - startTime;
+        long tpi = elapsed/ iters;
+
+        if (print)
+            System.out.println("\t: " + LoopHelpers.rightJustify(tpi) + " ns per task");
+
+        if (r == 0) // avoid overoptimization
+            System.out.println("useless result: " + r);
+
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/ExecutorCompletionService/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,128 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/FutureTask/CancelledFutureLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,145 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.5 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 CancelledFutureLoops.java
+ * @run main/timeout=2000 CancelledFutureLoops
+ * @summary Checks for responsiveness of futures to cancellation.
+ * Runs under the assumption that ITERS computations require more than
+ * TIMEOUT msecs to complete.
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+import java.util.*;
+
+public final class CancelledFutureLoops {
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static final LoopHelpers.SimpleRandom rng = new LoopHelpers.SimpleRandom();
+    static boolean print = false;
+    static final int ITERS = 1000000;
+    static final long TIMEOUT = 100;
+
+    public static void main(String[] args) throws Exception {
+        int maxThreads = 5;
+        if (args.length > 0)
+            maxThreads = Integer.parseInt(args[0]);
+
+        print = true;
+
+        for (int i = 2; i <= maxThreads; i += (i+1) >>> 1) {
+            System.out.print("Threads: " + i);
+            try {
+                new FutureLoop(i).test();
+            }
+            catch(BrokenBarrierException bb) {
+                // OK; ignore
+            }
+            catch(ExecutionException ee) {
+                // OK; ignore
+            }
+            Thread.sleep(TIMEOUT);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+    }
+
+    static final class FutureLoop implements Callable {
+        private int v = rng.next();
+        private final ReentrantLock lock = new ReentrantLock();
+        private final LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        private final CyclicBarrier barrier;
+        private final int nthreads;
+        FutureLoop(int nthreads) {
+            this.nthreads = nthreads;
+            barrier = new CyclicBarrier(nthreads+1, timer);
+        }
+
+        final void test() throws Exception {
+            Future[] futures = new Future[nthreads];
+            for (int i = 0; i < nthreads; ++i)
+                futures[i] = pool.submit(this);
+
+            barrier.await();
+            Thread.sleep(TIMEOUT);
+            boolean tooLate = false;
+            for (int i = 1; i < nthreads; ++i) {
+                if (!futures[i].cancel(true))
+                    tooLate = true;
+                // Unbunch some of the cancels
+                if ( (i & 3) == 0)
+                    Thread.sleep(1 + rng.next() % 10);
+            }
+
+            Object f0 = futures[0].get();
+            if (!tooLate) {
+                for (int i = 1; i < nthreads; ++i) {
+                    if (!futures[i].isDone() || !futures[i].isCancelled())
+                        throw new Error("Only one thread should complete");
+                }
+            }
+            else
+                System.out.print("(cancelled too late) ");
+
+            long endTime = System.nanoTime();
+            long time = endTime - timer.startTime;
+            if (print) {
+                double secs = (double)(time) / 1000000000.0;
+                System.out.println("\t " + secs + "s run time");
+            }
+
+        }
+
+        public final Object call() throws Exception {
+            barrier.await();
+            int sum = v;
+            int x = 0;
+            int n = ITERS;
+            while (n-- > 0) {
+                lock.lockInterruptibly();
+                try {
+                    v = x = LoopHelpers.compute1(v);
+                }
+                finally {
+                    lock.unlock();
+                }
+                sum += LoopHelpers.compute2(LoopHelpers.compute2(x));
+            }
+            return new Integer(sum);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/FutureTask/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,128 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantLock/CancelledLockLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,166 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.3 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 CancelledLockLoops.java
+ * @run main/timeout=2800 CancelledLockLoops
+ * @summary tests lockInterruptibly.
+ * Checks for responsiveness of locks to interrupts. Runs under that
+ * assumption that ITERS_VALUE computations require more than TIMEOUT
+ * msecs to complete.
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+import java.util.*;
+
+public final class CancelledLockLoops {
+    static final Random rng = new Random();
+    static boolean print = false;
+    static final int ITERS = 1000000;
+    static final long TIMEOUT = 100;
+
+    public static void main(String[] args) throws Exception {
+        int maxThreads = 5;
+        if (args.length > 0)
+            maxThreads = Integer.parseInt(args[0]);
+
+        print = true;
+
+        for (int i = 2; i <= maxThreads; i += (i+1) >>> 1) {
+            System.out.print("Threads: " + i);
+            try {
+                new ReentrantLockLoop(i).test();
+            }
+            catch(BrokenBarrierException bb) {
+                // OK, ignore
+            }
+            Thread.sleep(TIMEOUT);
+        }
+    }
+
+    static final class ReentrantLockLoop implements Runnable {
+        private int v = rng.nextInt();
+        private int completed;
+        private volatile int result = 17;
+        private final ReentrantLock lock = new ReentrantLock();
+        private final LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        private final CyclicBarrier barrier;
+        private final int nthreads;
+        ReentrantLockLoop(int nthreads) {
+            this.nthreads = nthreads;
+            barrier = new CyclicBarrier(nthreads+1, timer);
+        }
+
+        final void test() throws Exception {
+            Thread[] threads = new Thread[nthreads];
+            for (int i = 0; i < threads.length; ++i)
+                threads[i] = new Thread(this);
+            for (int i = 0; i < threads.length; ++i)
+                threads[i].start();
+            Thread[] cancels = (Thread[]) (threads.clone());
+            Collections.shuffle(Arrays.asList(cancels), rng);
+            barrier.await();
+            Thread.sleep(TIMEOUT);
+            for (int i = 0; i < cancels.length-2; ++i) {
+                cancels[i].interrupt();
+                // make sure all OK even when cancellations spaced out
+                if ( (i & 3) == 0)
+                    Thread.sleep(1 + rng.nextInt(10));
+            }
+            barrier.await();
+            if (print) {
+                long time = timer.getTime();
+                double secs = (double)(time) / 1000000000.0;
+                System.out.println("\t " + secs + "s run time");
+            }
+
+            int c;
+            lock.lock();
+            try {
+                c = completed;
+            }
+            finally {
+                lock.unlock();
+            }
+            if (completed != 2)
+                throw new Error("Completed != 2");
+            int r = result;
+            if (r == 0) // avoid overoptimization
+                System.out.println("useless result: " + r);
+        }
+
+        public final void run() {
+            try {
+                barrier.await();
+                int sum = v;
+                int x = 0;
+                int n = ITERS;
+                boolean done = false;
+                do {
+                    try {
+                        lock.lockInterruptibly();
+                    }
+                    catch (InterruptedException ie) {
+                        break;
+                    }
+                    try {
+                        v = x = LoopHelpers.compute1(v);
+                    }
+                    finally {
+                        lock.unlock();
+                    }
+                    sum += LoopHelpers.compute2(x);
+                } while (n-- > 0);
+                if (n <= 0) {
+                    lock.lock();
+                    try {
+                        ++completed;
+                    }
+                    finally {
+                        lock.unlock();
+                    }
+                }
+                barrier.await();
+                result += sum;
+            }
+            catch (Exception ex) {
+                ex.printStackTrace();
+                return;
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantLock/LockOncePerThreadLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,132 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.3 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 LockOncePerThreadLoops.java
+ * @run main/timeout=15000 LockOncePerThreadLoops
+ * @summary Checks for missed signals by locking and unlocking each of an array of locks once per thread
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+import java.util.*;
+
+public final class LockOncePerThreadLoops {
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static final LoopHelpers.SimpleRandom rng = new LoopHelpers.SimpleRandom();
+    static boolean print = false;
+    static int nlocks = 50000;
+    static int nthreads = 100;
+    static int replications = 5;
+
+    public static void main(String[] args) throws Exception {
+        if (args.length > 0)
+            replications = Integer.parseInt(args[0]);
+
+        if (args.length > 1)
+            nlocks = Integer.parseInt(args[1]);
+
+        print = true;
+
+        for (int i = 0; i < replications; ++i) {
+            System.out.print("Iteration: " + i);
+            new ReentrantLockLoop().test();
+            Thread.sleep(100);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+    }
+
+    static final class ReentrantLockLoop implements Runnable {
+        private int v = rng.next();
+        private volatile int result = 17;
+        final ReentrantLock[]locks = new ReentrantLock[nlocks];
+
+        private final ReentrantLock lock = new ReentrantLock();
+        private final LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        private final CyclicBarrier barrier;
+        ReentrantLockLoop() {
+            barrier = new CyclicBarrier(nthreads+1, timer);
+            for (int i = 0; i < nlocks; ++i)
+                locks[i] = new ReentrantLock();
+        }
+
+        final void test() throws Exception {
+            for (int i = 0; i < nthreads; ++i)
+                pool.execute(this);
+            barrier.await();
+            barrier.await();
+            if (print) {
+                long time = timer.getTime();
+                double secs = (double)(time) / 1000000000.0;
+                System.out.println("\t " + secs + "s run time");
+            }
+
+            int r = result;
+            if (r == 0) // avoid overoptimization
+                System.out.println("useless result: " + r);
+        }
+
+        public final void run() {
+            try {
+                barrier.await();
+                int sum = v;
+                int x = 0;
+                for (int i = 0; i < locks.length; ++i) {
+                    locks[i].lock();
+                    try {
+                            v = x += ~(v - i);
+                    }
+                    finally {
+                        locks[i].unlock();
+                    }
+                    // Once in a while, do something more expensive
+                    if ((~i & 255) == 0) {
+                        sum += LoopHelpers.compute1(LoopHelpers.compute2(x));
+                    }
+                    else
+                        sum += sum ^ x;
+                }
+                barrier.await();
+                result += sum;
+            }
+            catch (Exception ie) {
+                return;
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantLock/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,128 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantLock/SimpleReentrantLockLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,137 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.3 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 SimpleReentrantLockLoops.java
+ * @run main/timeout=4500 SimpleReentrantLockLoops
+ * @summary multiple threads using a single lock
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+import java.util.*;
+
+public final class SimpleReentrantLockLoops {
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static final LoopHelpers.SimpleRandom rng = new LoopHelpers.SimpleRandom();
+    static boolean print = false;
+    static int iters = 1000000;
+
+    public static void main(String[] args) throws Exception {
+        int maxThreads = 5;
+        if (args.length > 0)
+            maxThreads = Integer.parseInt(args[0]);
+
+        print = true;
+
+        int reps = 2;
+        for (int i = 1; i <= maxThreads; i += (i+1) >>> 1) {
+            int n = reps;
+            if (reps > 1) --reps;
+            while (n-- > 0) {
+                System.out.print("Threads: " + i);
+                new ReentrantLockLoop(i).test();
+                Thread.sleep(100);
+            }
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+    }
+
+    static final class ReentrantLockLoop implements Runnable {
+        private int v = rng.next();
+        private volatile int result = 17;
+        private final ReentrantLock lock = new ReentrantLock();
+        private final LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        private final CyclicBarrier barrier;
+        private final int nthreads;
+        ReentrantLockLoop(int nthreads) {
+            this.nthreads = nthreads;
+            barrier = new CyclicBarrier(nthreads+1, timer);
+        }
+
+        final void test() throws Exception {
+            for (int i = 0; i < nthreads; ++i)
+                pool.execute(this);
+            barrier.await();
+            barrier.await();
+            if (print) {
+                long time = timer.getTime();
+                long tpi = time / ((long)iters * nthreads);
+                System.out.print("\t" + LoopHelpers.rightJustify(tpi) + " ns per lock");
+                double secs = (double)(time) / 1000000000.0;
+                System.out.println("\t " + secs + "s run time");
+            }
+
+            int r = result;
+            if (r == 0) // avoid overoptimization
+                System.out.println("useless result: " + r);
+        }
+
+        public final void run() {
+            try {
+                barrier.await();
+                int sum = v;
+                int x = 0;
+                int n = iters;
+                do {
+                    lock.lock();
+                    try {
+                        if ((n & 255) == 0)
+                            v = x = LoopHelpers.compute2(LoopHelpers.compute1(v));
+                        else
+                            v = x += ~(v - n);
+                    }
+                    finally {
+                        lock.unlock();
+                    }
+                    // Once in a while, do something more expensive
+                    if ((~n & 255) == 0) {
+                        sum += LoopHelpers.compute1(LoopHelpers.compute2(x));
+                    }
+                    else
+                        sum += sum ^ x;
+                } while (n-- > 0);
+                barrier.await();
+                result += sum;
+            }
+            catch (Exception ie) {
+                return;
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantLock/TimeoutLockLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,146 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.5 07/05/17
+ * @bug 4486658 5031862
+ * @compile -source 1.5 TimeoutLockLoops.java
+ * @run main TimeoutLockLoops
+ * @summary Checks for responsiveness of locks to timeouts.
+ * Runs under the assumption that ITERS computations require more than
+ * TIMEOUT msecs to complete, which seems to be a safe assumption for
+ * another decade.
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+import java.util.*;
+
+public final class TimeoutLockLoops {
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+    static final LoopHelpers.SimpleRandom rng = new LoopHelpers.SimpleRandom();
+    static boolean print = false;
+    static final int ITERS = Integer.MAX_VALUE;
+    static final long TIMEOUT = 100;
+
+    public static void main(String[] args) throws Exception {
+        int maxThreads = 100;
+        if (args.length > 0)
+            maxThreads = Integer.parseInt(args[0]);
+
+
+        print = true;
+
+        for (int i = 1; i <= maxThreads; i += (i+1) >>> 1) {
+            System.out.print("Threads: " + i);
+            new ReentrantLockLoop(i).test();
+            Thread.sleep(10);
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+    }
+
+    static final class ReentrantLockLoop implements Runnable {
+        private int v = rng.next();
+        private volatile boolean completed;
+        private volatile int result = 17;
+        private final ReentrantLock lock = new ReentrantLock();
+        private final LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+        private final CyclicBarrier barrier;
+        private final int nthreads;
+        ReentrantLockLoop(int nthreads) {
+            this.nthreads = nthreads;
+            barrier = new CyclicBarrier(nthreads+1, timer);
+        }
+
+        final void test() throws Exception {
+            for (int i = 0; i < nthreads; ++i) {
+                lock.lock();
+                pool.execute(this);
+                lock.unlock();
+            }
+            barrier.await();
+            Thread.sleep(TIMEOUT);
+            while (!lock.tryLock()); // Jam lock
+            //            lock.lock();
+            barrier.await();
+            if (print) {
+                long time = timer.getTime();
+                double secs = (double)(time) / 1000000000.0;
+                System.out.println("\t " + secs + "s run time");
+            }
+
+            if (completed)
+                throw new Error("Some thread completed instead of timing out");
+            int r = result;
+            if (r == 0) // avoid overoptimization
+                System.out.println("useless result: " + r);
+        }
+
+        public final void run() {
+            try {
+                barrier.await();
+                int sum = v;
+                int x = 17;
+                int n = ITERS;
+                final ReentrantLock lock = this.lock;
+                for (;;) {
+                    if (x != 0) {
+                        if (n-- <= 0)
+                            break;
+                    }
+                    if (!lock.tryLock(TIMEOUT, TimeUnit.MILLISECONDS))
+                        break;
+                    try {
+                        v = x = LoopHelpers.compute1(v);
+                    }
+                    finally {
+                        lock.unlock();
+                    }
+                    sum += LoopHelpers.compute2(x);
+                }
+                if (n <= 0)
+                    completed = true;
+                barrier.await();
+                result += sum;
+            }
+            catch (Exception ex) {
+                ex.printStackTrace();
+                return;
+            }
+        }
+    }
+
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantReadWriteLock/LoopHelpers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,128 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+/**
+ * Misc utilities in JSR166 performance tests
+ */
+
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.*;
+
+class LoopHelpers {
+
+    // Some mindless computation to do between synchronizations...
+
+    /**
+     * generates 32 bit pseudo-random numbers.
+     * Adapted from http://www.snippets.org
+     */
+    public static int compute1(int x) {
+        int lo = 16807 * (x & 0xFFFF);
+        int hi = 16807 * (x >>> 16);
+        lo += (hi & 0x7FFF) << 16;
+        if ((lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        lo += hi >>> 15;
+        if (lo == 0 || (lo & 0x80000000) != 0) {
+            lo &= 0x7fffffff;
+            ++lo;
+        }
+        return lo;
+    }
+
+    /**
+     *  Computes a linear congruential random number a random number
+     *  of times.
+     */
+    public static int compute2(int x) {
+        int loops = (x >>> 4) & 7;
+        while (loops-- > 0) {
+            x = (x * 2147483647) % 16807;
+        }
+        return x;
+    }
+
+    /**
+     * An actually useful random number generator, but unsynchronized.
+     * Basically same as java.util.Random.
+     */
+    public static class SimpleRandom {
+        private final static long multiplier = 0x5DEECE66DL;
+        private final static long addend = 0xBL;
+        private final static long mask = (1L << 48) - 1;
+        static final AtomicLong seq = new AtomicLong(1);
+        private long seed = System.nanoTime() + seq.getAndIncrement();
+
+        public void setSeed(long s) {
+            seed = s;
+        }
+
+        public int next() {
+            long nextseed = (seed * multiplier + addend) & mask;
+            seed = nextseed;
+            return ((int)(nextseed >>> 17)) & 0x7FFFFFFF;
+        }
+    }
+
+    public static class BarrierTimer implements Runnable {
+        public volatile long startTime;
+        public volatile long endTime;
+        public void run() {
+            long t = System.nanoTime();
+            if (startTime == 0)
+                startTime = t;
+            else
+                endTime = t;
+        }
+        public void clear() {
+            startTime = 0;
+            endTime = 0;
+        }
+        public long getTime() {
+            return endTime - startTime;
+        }
+    }
+
+    public static String rightJustify(long n) {
+        // There's probably a better way to do this...
+        String field = "         ";
+        String num = Long.toString(n);
+        if (num.length() >= field.length())
+            return num;
+        StringBuffer b = new StringBuffer(field);
+        b.replace(b.length()-num.length(), b.length(), num);
+        return b.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantReadWriteLock/MapLoops.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,191 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+
+/*
+ * @test 1.4 07/05/17
+ * @bug 4486658
+ * @compile -source 1.5 MapLoops.java
+ * @run main/timeout=4700 MapLoops
+ * @summary Exercise multithreaded maps, by default ConcurrentHashMap.
+ * Multithreaded hash table test.  Each thread does a random walk
+ * though elements of "key" array. On each iteration, it checks if
+ * table includes key.  If absent, with probability pinsert it
+ * inserts it, and if present, with probability premove it removes
+ * it.  (pinsert and premove are expressed as percentages to simplify
+ * parsing from command line.)
+ */
+
+import java.util.*;
+import java.util.concurrent.*;
+
+public class MapLoops {
+    static final int NKEYS = 100000;
+    static int pinsert     = 60;
+    static int premove     = 2;
+    static int maxThreads  = 5;
+    static int nops        = 1000000;
+    static int removesPerMaxRandom;
+    static int insertsPerMaxRandom;
+
+    static final ExecutorService pool = Executors.newCachedThreadPool();
+
+    public static void main(String[] args) throws Exception {
+
+        Class mapClass = null;
+        if (args.length > 0) {
+            try {
+                mapClass = Class.forName(args[0]);
+            } catch(ClassNotFoundException e) {
+                throw new RuntimeException("Class " + args[0] + " not found.");
+            }
+        }
+        else
+            mapClass = RWMap.class;
+
+        if (args.length > 1)
+            maxThreads = Integer.parseInt(args[1]);
+
+        if (args.length > 2)
+            nops = Integer.parseInt(args[2]);
+
+        if (args.length > 3)
+            pinsert = Integer.parseInt(args[3]);
+
+        if (args.length > 4)
+            premove = Integer.parseInt(args[4]);
+
+        // normalize probabilities wrt random number generator
+        removesPerMaxRandom = (int)(((double)premove/100.0 * 0x7FFFFFFFL));
+        insertsPerMaxRandom = (int)(((double)pinsert/100.0 * 0x7FFFFFFFL));
+
+        System.out.println("Using " + mapClass.getName());
+
+        Random rng = new Random(315312);
+        Integer[] key = new Integer[NKEYS];
+        for (int i = 0; i < key.length; ++i)
+            key[i] = new Integer(rng.nextInt());
+
+        // warmup
+        System.out.println("Warmup...");
+        for (int k = 0; k < 2; ++k) {
+            Map<Integer, Integer> map = (Map<Integer,Integer>)mapClass.newInstance();
+            LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+            CyclicBarrier barrier = new CyclicBarrier(1, timer);
+            new Runner(map, key, barrier).run();
+            map.clear();
+            Thread.sleep(100);
+        }
+
+        for (int i = 1; i <= maxThreads; i += (i+1) >>> 1) {
+            System.out.print("Threads: " + i + "\t:");
+            Map<Integer, Integer> map = (Map<Integer,Integer>)mapClass.newInstance();
+            LoopHelpers.BarrierTimer timer = new LoopHelpers.BarrierTimer();
+            CyclicBarrier barrier = new CyclicBarrier(i+1, timer);
+            for (int k = 0; k < i; ++k)
+                pool.execute(new Runner(map, key, barrier));
+            barrier.await();
+            barrier.await();
+            long time = timer.getTime();
+            long tpo = time / (i * (long)nops);
+            System.out.print(LoopHelpers.rightJustify(tpo) + " ns per op");
+            double secs = (double)(time) / 1000000000.0;
+            System.out.println("\t " + secs + "s run time");
+            map.clear();
+        }
+        pool.shutdown();
+	if (! pool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS))
+	    throw new Error();
+    }
+
+    static class Runner implements Runnable {
+        final Map<Integer,Integer> map;
+        final Integer[] key;
+        final LoopHelpers.SimpleRandom rng = new LoopHelpers.SimpleRandom();
+        final CyclicBarrier barrier;
+        int position;
+        int total;
+
+        Runner(Map<Integer,Integer> map, Integer[] key,  CyclicBarrier barrier) {
+            this.map = map;
+            this.key = key;
+            this.barrier = barrier;
+            position = key.length / 2;
+        }
+
+        int step() {
+            // random-walk around key positions,  bunching accesses
+            int r = rng.next();
+            position += (r & 7) - 3;
+            while (position >= key.length) position -= key.length;
+            while (position < 0) position += key.length;
+
+            Integer k = key[position];
+            Integer x = map.get(k);
+
+            if (x != null) {
+                if (x.intValue() != k.intValue())
+                    throw new Error("bad mapping: " + x + " to " + k);
+
+                if (r < removesPerMaxRandom) {
+                    // get awy from this position
+                    position = r % key.length;
+                    map.remove(k);
+                    return 2;
+                }
+                else
+                    total += LoopHelpers.compute2(LoopHelpers.compute1(x.intValue()));
+            }
+            else {
+                if (r < insertsPerMaxRandom) {
+                    map.put(k, k);
+                    return 2;
+                }
+            }
+            return 1;
+        }
+
+        public void run() {
+            try {
+                barrier.await();
+                int ops = nops;
+                while (ops > 0)
+                    ops -= step();
+                barrier.await();
+            }
+            catch (Exception ex) {
+                ex.printStackTrace();
+            }
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/java/util/concurrent/locks/ReentrantReadWriteLock/RWMap.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,114 @@
+/*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/*
+ * This file is available under and governed by the GNU General Public
+ * License version 2 only, as published by the Free Software Foundation.
+ * However, the following notice accompanied the original version of this
+ * file:
+ *
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ */
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.locks.*;
+
+
+/**
+ * This is an incomplete implementation of a wrapper class
+ * that places read-write locks around unsynchronized Maps.
+ * Exists as a sample input for MapLoops test.
+ */
+
+public class RWMap implements Map {
+    private final Map m;
+    private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
+
+    public RWMap(Map m) {
+        if (m == null)
+            throw new NullPointerException();
+        this.m = m;
+    }
+
+    public RWMap() {
+        this(new TreeMap()); // use TreeMap by default
+    }
+
+    public int size() {
+        rwl.readLock().lock(); try {return m.size();} finally { rwl.readLock().unlock(); }
+    }
+    public boolean isEmpty(){
+        rwl.readLock().lock(); try {return m.isEmpty();} finally { rwl.readLock().unlock(); }
+    }
+
+    public Object get(Object key) {
+        rwl.readLock().lock(); try {return m.get(key);} finally { rwl.readLock().unlock(); }
+    }
+
+    public boolean containsKey(Object key) {
+        rwl.readLock().lock(); try {return m.containsKey(key);} finally { rwl.readLock().unlock(); }
+    }
+    public boolean containsValue(Object value){
+        rwl.readLock().lock(); try {return m.containsValue(value);} finally { rwl.readLock().unlock(); }
+    }
+
+
+    public Set keySet() { // Not implemented
+        return null;
+    }
+
+    public Set entrySet() { // Not implemented
+        return null;
+    }
+
+    public Collection values() { // Not implemented
+        return null;
+    }
+
+    public boolean equals(Object o) {
+        rwl.readLock().lock(); try {return m.equals(o);} finally { rwl.readLock().unlock(); }
+    }
+    public int hashCode() {
+        rwl.readLock().lock(); try {return m.hashCode();} finally { rwl.readLock().unlock(); }
+    }
+    public String toString() {
+        rwl.readLock().lock(); try {return m.toString();} finally { rwl.readLock().unlock(); }
+    }
+
+
+
+    public Object put(Object key, Object value) {
+        rwl.writeLock().lock(); try {return m.put(key, value);} finally { rwl.writeLock().unlock(); }
+    }
+    public Object remove(Object key) {
+        rwl.writeLock().lock(); try {return m.remove(key);} finally { rwl.writeLock().unlock(); }
+    }
+    public void putAll(Map map) {
+        rwl.writeLock().lock(); try {m.putAll(map);} finally { rwl.writeLock().unlock(); }
+    }
+    public void clear() {
+        rwl.writeLock().lock(); try {m.clear();} finally { rwl.writeLock().unlock(); }
+    }
+
+}
--- a/j2se/test/javax/management/ImplementationVersion/ImplVersionCommand.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/javax/management/ImplementationVersion/ImplVersionCommand.java	Fri May 25 00:49:14 2007 +0000
@@ -25,7 +25,7 @@
  * @(#)file      ImplVersionCommand.java
  * @(#)author    Sun Microsystems, Inc.
  * @(#)version   1.6
- * @(#)date      07/05/06
+ * @(#)date      07/05/24
  */
 
 import javax.management.MBeanServer;
--- a/j2se/test/javax/management/ImplementationVersion/ImplVersionReader.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/javax/management/ImplementationVersion/ImplVersionReader.java	Fri May 25 00:49:14 2007 +0000
@@ -25,7 +25,7 @@
  * @(#)file      ImplVersionReader.java
  * @(#)author    Sun Microsystems, Inc.
  * @(#)version   1.4
- * @(#)date      07/05/06
+ * @(#)date      07/05/24
  */
 
 import java.io.BufferedReader;
--- a/j2se/test/javax/management/monitor/MBeanServerBuilderImpl.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/javax/management/monitor/MBeanServerBuilderImpl.java	Fri May 25 00:49:14 2007 +0000
@@ -25,7 +25,7 @@
  * @(#)file      MBeanServerBuilderImpl.java
  * @(#)author    Sun Microsystems, Inc.
  * @(#)version   1.4
- * @(#)date      07/05/06
+ * @(#)date      07/05/24
  */
 
 import javax.management.MBeanServer;
--- a/j2se/test/javax/management/monitor/MBeanServerForwarderInvocationHandler.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/javax/management/monitor/MBeanServerForwarderInvocationHandler.java	Fri May 25 00:49:14 2007 +0000
@@ -25,7 +25,7 @@
  * @(#)file      MBeanServerForwarderInvocationHandler.java
  * @(#)author    Sun Microsystems, Inc.
  * @(#)version   1.4
- * @(#)date      07/05/06
+ * @(#)date      07/05/24
  */
 
 import java.lang.reflect.InvocationHandler;
--- a/j2se/test/javax/management/remote/mandatory/version/ImplVersionCommand.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/javax/management/remote/mandatory/version/ImplVersionCommand.java	Fri May 25 00:49:14 2007 +0000
@@ -25,7 +25,7 @@
  * @(#)file      ImplVersionCommand.java
  * @(#)author    Sun Microsystems, Inc.
  * @(#)version   1.4
- * @(#)date      07/05/06
+ * @(#)date      07/05/24
  */
 
 import javax.management.remote.rmi.RMIJRMPServerImpl;
--- a/j2se/test/javax/management/remote/mandatory/version/ImplVersionReader.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/javax/management/remote/mandatory/version/ImplVersionReader.java	Fri May 25 00:49:14 2007 +0000
@@ -25,7 +25,7 @@
  * @(#)file      ImplVersionReader.java
  * @(#)author    Sun Microsystems, Inc.
  * @(#)version   1.4
- * @(#)date      07/05/06
+ * @(#)date      07/05/24
  */
 
 import java.io.BufferedReader;
--- a/j2se/test/sun/net/www/http/ChunkedInputStream/test.txt	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/sun/net/www/http/ChunkedInputStream/test.txt	Fri May 25 00:49:14 2007 +0000
@@ -1,2 +1,2 @@
-Fo`kMsMr*sth,dr2oD]eg<N\U4KGXn2VQ,&'!;AW&I["@nZQJWPG$PpAl;1AZ@R:DZ4;<,C-&1H7ZAjAq7LN`NV[,YDa^JKW_!oW6>=tA9q;%*^5$1[C24!O=7p0LB8c6EQ:bpii++fSJsNl3$mN#K[2eL#%d>c<39pV55VfO? t5a5 >+g-X9Yc=es5sI-ts)A9R5TqE?/<M.C8Zl  ^n"s`BDQp*W@oVW;]I'.h/b1:4!=.W%/01LcYF7=Um1(.GNWiH:Kp?Ch12Bg[5M`,TTE5TN\pba^I;oM2Z<O9%>*KqR@#RqN\;Cb99`H0d5^K*=fJ<]%Sd>i1hpA>313bbb#MCtA</sM^aXko^.Z7+VJ+(EYJgJ;j*\%5@13(Wd^;PcjC44A  I$8,VM>Eg2T*ancMc.8]W(7%'g.#VpQC!2AZ(0oW12mIhnJ-G]D;Q6OF(eon*,UL_H'UE 4Y? 1?_,2ia$K^X?R  hhS3!R5BC,T<ZN5!YZd6fQIR#P5G9=')C%:&VSLdN^\Q=(FarUg3XE_^O&Pgd`)M[,?JMA(tc@C?lCJElM>>@#rs1 gF*lPf8,*cCi:nAaE%*,qP_ofK<pL[$Mk;2G/Yp+/G1pr\l^!M.q<kd)1"Vlkr(fg;1^t-eBC0# -tR@1_oAY#7OSHnE=pt A/h_K[T[d[A-EmPnXA5VV2dAf(\"gAR$FA%$4@ TTG_O?MHmO[);^]F##V68*QQ'&B%p<rfl-LXiD[Oi3s;l6aCfB$UP,?TEArTRhX@<"d8%#K%`]$V=3-:kb%mdJi8CW_5"V/'4\V6cVn(YGRK[4odBH!?#G1^%*8Li_!UI[(4t4WjSQ6>]hZ@>cQ?A;,EH7fU]GR2p25#UA$.oE5/ETshg=9Vk_;RC#<^nK?)PKL(YCWdtBChB*T>[%;/@Rq[Alea)>Ib^QrKHG2KcbE0mD&/'eS*tX&\8+U,Q#Zm_`8?%In$FLme]D% ()6T<AlGK&rH3GdpT73OclHAtINAkXo9otK-O+;eAd>Es4q.E.\S$= 7iO_5W?K\UA2:WSgk?-p@=SQU#E/1!4.@--8IW]kLr"j;$TCUr,R,s7OlD-(o#dlEef9VK?'53Xq]/3-s!3B?Z&#3Ok&d'a*X1TRs$(?O`DTmX3mPX,>=,![AO/gEO^,W^00>Aq\)M=a3:U`AVleR6-Y[4qdj1Nl,gEDi']t&><6n0#)'Z0Ci.pZY$d1A4E?FrX9Blka%LHWMd ilA@#'i'db3# ]+Mf4VRQn6'nXg#E)I(8<5I&(lT5Y$1s.5[0tYd]IbLkgGlt%?^MHq!k!h'FP18>%"oqn+V?WX<FN2Q /=3aW@ri7A8]i.)<pP=;OJ(Z)@(kk2Yt5_?/'hVjq&n'-sU`1& BJfe9*!JgT84l]Po+=(geID#F=d-($??n#64nRW<AAK%Zd$#23 k]M-`N#T9dnAkX-$WAJJ"GjGp($L]#7h9WMkYN3Z:#eW-.0F=5CeT(K$afW!t/NcVDa*SIRV`ASWk#UnIfb8Q.&pG\)7]E\abi^ )"W*e4ab\s"V@D%?Hp`?sB,We. -tdprb$B&>AK,6JP\KYb5jIJZAG 7F"0a<l?+^R6iPro=S`r-VWEcYRA'-*)?Hn;sK<dN;B2ZRZ&EsB20,MGK34ODqIA<^D(#*Ahcm]XRb_7-jtt0AegK\`A+pX5l dcLk(K!!FcE-9CcI.`M5%N(X$)lMOAaX;8Aei1;3MS<c#/eKhS]\\X;7&aoO:S$^efs9po;[JAY2LrVMALKAXAoY)jW2+n6'?#,UrD.3Y);"?JBb_5B7T]o/.dZc\qaF5-] ADXZJM:f%F&bDZ8dBK;"iW]_U_?TeC+[*a.o[MR.$G/7^Q\fRhVa BfIhNA&Z]=;<#mFl<G$/V=#WI\cG0;+g e-*7cAg<UIci"i+If;"PRrU&C;4poA#%*GfQ>'"A"g<61lB:'^;idCr*82Z>MEOCeHqH^Se6;OgKSWR]"Kji7'1:VI)AC"nA7mhWM*okf"4,G7hm:c6B&$3CM.8fIbb[kPX',j^&K6"IoHXPQgY>@E=HE0-$10tpQ mXol_g,-rmo11C4F3b8t;@JINZ.t#TT7eG.+CjQ&NN)IqI3hE6EF'h)!f6spqV,fm4`H<PRgV5N*r3r2%sSB.9g3R,SAqV8XY<n5WR\)2)2GA H)bCIH2iNTJFY_lg%KBX22=lb66^K?_K<FCbElA(IEaX`H',\"!Z9l][*XjKscs(;`\fgDRPrVO1.1N%NnWgl;I^B__B/-Zs%!K(5jV'A!$">irb'7+pr8Y,NqR^eF_23mt@C3@MGAHm0d;3rY&+tsK(BK30:V]"gTpI=$ms8j*+qddro#A(Y64/i!F2PA3<9/WQX?l6QV^V&)m!C]F$@ 468?JD8p``2c.;]3mkp)Bl'X2PpYSi+[t<UF\X)=EGfr K?A/;Dgq)qD4.4c)7Y&PKg>Es^%2h ,)YbjfMA5:dYE'1)rb]]/j1?:Iq37N8f.>+UQ "f5qFFYloKPQ`I#0ZFLrgQ1WQ\:)L-6\$Q1UDgI!d,_#),YE90K^EY0*0saQnl48<1`(@3L%LshoA?:5Gd0,6/b0!pIibT2UW9c()EH0.+>aoE%`]e[:.A!9^rMan?\peDIlNkjaJiX-=gYcFoXXbTYD8+Sp8US^t0cBf3@I?<7&f 2%n9!]t9rF_flN_U)74!ghb.I!'D4n5dim##5j:?GPH_3Q;gB$j'qNk[H,%n6dc0aBl&>PN-X3J.[pNK 2S'oEm_dsA1&+b`$3h32%m)"QNiJIWX9%*49_)I&ARkIm:gM(Vh!$bI1iSC/;/efNr_`sWbT'>T;fNIJ<%NW0%A)]npAdI>\KaA[.G-8An8&VN6)A$o4,@31\];&k4*=*?pasKksN1=1l-ISJEY/_`=U]j[Td-9;(OFh(IHp70H!2kN/9to//b/_ar&/Xe9CmaqX:`X,S\^A+AQes./s'F:2ZMY :m H+3XS)l35Y]AicKV"G?4XERT;gD?+<J[(S*sDO2A:?Dc6?!]qOVRIYF4bMiFBo]LVRGl:L0SGI!&i%Rq<%KXn8l<)B"OVtd?=)5)>)S.0?,8X=414C$hD`@?3"Sc/s>A[bFPR^]CnN8ml`[pD$q%M,aSCC$b$#D!JN=;^9VLl>P$8s).J7tC!^LQ,c4dPAoU,M7NmQk=90>$=:&_J;s?Y$/#I^h_`%2K$[^S[7f=Oa21<Q!o%eb5FXT:E'=\8k,(oaOF#76[i!T,B`i3",0)r[-8]tkf-ZUWdKr+Sf2dM',1-Pkn/V:p1=[ @a@rRgtt79k<Y"HK+;U;l-5*JOGJ: IWH[>sm+aM=G89g8RD:KQ)pRKLf'/>$ciA'Lr7+X'%1D+6hDh@8G#+B:/_"EaEc)VPU$kLC32rR-?HCm_QlA2Qlf[@n^K'OC7CDEFI]`;C5.EQ$V0,V.<E#;<.J8PiYa`M$FYCP[8$3Dif8K'RSc@r@MF XRPUqE?nmRhVE4D<^[%5D(^.>V.t4OQ_mqq37`53H)8rAGPldR>'1DEec?"2IGY,ane;V7C:K/__[:&<dboAG@@WqJZ:3<^.ffSXTgT(X./0T_"B06!]fejtX+?"dGC#nSP\JUSFU'mS22bI#WAp\83A2T(Mah,S`@56_B\r2.1Q9=f(-6K6YZ#Gf\;o,  7.cIAMP_(5X2O("&7fr`;lO2!5=W,8`9`s/6N!+6+A!PJfWK\A< XSr6gdpo=eE!e1jqMR"ptea0A+i[Qf<Y ,)YtQ4qA7e+#E6pcFs\c^_<f0?p9bUt .#?dSj*JeK,#2QE0&9/;dM6iN8/$K%=&i[%pcCmf(;`,.]qK;nG4UM2&k%eI;9$sf-XZBajKVLE==J>-Q(Y3DN39tKGiOML4ZJ/]jY*6K("f?j]d5Q`"(!a6^EnhMXs-4(YR2'J@A!hV_VYI.'P,/ct:aiY%I5X*@<<!f2$Fd[n#8H>Xt!%`[GkeOtZa[( XBpK2#?$1UiL2i!?t8SDQ]UBEW@5SN:$qlSg'CJ>p4G44h[U)U*4s.C'ieWRf:.VF]R(Q.iRPRAi0HUFmR8GAtnR1D&03T):Hpn+RlJa>:[U<*83'R<ZrVPX-mARst:2b;nm:8`R ah-e5GLA0Go'>8Fr%W$]?l)KANM[Ttj gKAfib:.oo,D?jMTW<3DIFMR\io*NBBA$.9'8A6g#(FsamZsV[>Ml`Y&<W"Tq)i8'ROHA9<4@#a56nIM,ME'>LJ18>52+]9pLqoAIWG##HMS2`^SX$'N0dP/)7*DVh;<llE__mo^GAHFXh+,nhg[,\S`oWd!dH+iUA)Cg@qlT jQ?8()W2`0RA9_i Y;!EBdA$_[IX'ERYmL[MkeMnHN0 R"8Nhn7>%)Hh\ e7F@VP61nAQm%^$@;fs\@mrNC5=Or32)c[O6G3#OrFN>k;m0oPe/]0!.i=5Qp+Is@bJ<-cLh_.Ia91f6M+7l;+\mH7V$Ai"4mAp<H ?)6O[-8rE,S6O8'mV??t@ZF^mZ@@"eZhf6M(b3j32;E*13J*rZY9jWB=Zn[(?GU-9>-#Y(jDKCL:4Vr!h#<idLO<paCgYJdN^6rIJk_[]])O!JW%n`EiNq^ZUXL#,))es;M*=%h'#*Mlod2%-Db)MM 0OJMn95,+N]`l#5MM/:hb8V0R/n[)D3WG#@^.(#o%be-5gJH-Fs"`2VH 5"Aj/cV#ZXCH:9ko0jWL:<B+)"J`.l<8K*)-A:QX/($milWkP1L1aQR%"0GU-_".E5"J>k8<CSPU?ieVSFh<(F niAmnHk!l$\sf_=jraa7AK2de2AF2>R"*n88`WB?N)/o9KG\8A]PmSnj,=q=h!%(X+*%Z_/ApSK0`.L;*%@G22NlmG='pP%,JBl$WZ:AS)rlaK/^(J+B^6fW*E C37[ae^A]L@g$;/Ho)o)1:W6q><Q\E7s9(h`c'OTETn:!q`s_K2),-SGXf+!'fgMAqN9/N3,ZS`+`VOd9oT'@B@5ZB<Pk>GbC42!!oDY5#cMQmK3-<)(m#=s%&K37[:CU-hm->V"/AM\ 2BfkNtlILl`fPWRG^+d_9rcNQFckrMM.!3jP2V<b(:QZiAo6XdK KD5-f95M&=Y$'1hV=]F?b6E"^W.I2<7kgnUQlj;YQ'W8NFe?Pc%[Z^BEnel*gUJ:gp`7lGR<sWo*:L^,I@i9KoZ#DK_R/P.QZ(FLN!RO<DH7qY33s88-V/:nIV`_e;!=EW,M.@b!K8$g(:&dKd 1+ghHQ_)#L(]BR=Qg"d1(IaaPU#L=m;U#U;_cR6@A4CTAWnRrdMHjg%2blsYFa!,V9q^;*o"9e]j/-fg.; I@=hri190<\Ld;U^_L"G%ZtjfQor;mA/V<SE%9 DK;JB9^>fg5_YboSY$].O r)#c8_"RFR#WEt(dQCYF8h;ngfPe^Ero9^bWGha"p"J]+f.<>A`G s^Xs9s%I&aF3k(BC@WZ"J9-425-K3884cC_#0Z2 >%o<rip@)O#%A62g?fTsOAa4a36q&D3AJ!JkoSjsXE1#n12,,+n* ,d(o'Z+`;2,RCd]9[%@hTE9r8I-HJOJ$P6':P0,A<mgI%NSe 5tm=.l9eXj9T'I%al,J9A(=rRomPh3n@OVjk>&UIrn.]!F8I#*Th=17cHo>^;_8dB:>NMm'XX<sZM8$3UsY(sJ-An`L0_.!/GhED.&$37=>@beT#@YdX3DFir7"Q'tVgC2$3="Ke9<0Z/i"]":tD-/nfkbdT_X/f)ka]=.l!c?IXTW,9!34IK/'*,/.8OA`8aoXCBI)SGDZdi3b?a`%bI;nNAAfg5KXSC8NdCa(!E:T*fIo2V)8Q;j q<42ACAa.4[Y@<OX&G&JJ[bW[3;Akj&&e!.;;=)abrA2*`5TQlbpi!?P,f_eV#Tt G!rd bi-Rf'-p%U@V7Q\/I?J+mR0m/p6VW4g26;7BUG<N[UJ=U9+@IQ:rA.O&(=O#KK2dj46OP&TFFE+h*8()fLTP[2OW qr=`X;/tCWh905f8f52;D?m-BG4h=I(A]L(K?G&YK'0eI=%:%%>fQscTe-Af]R9Z!B&f@3T2^G+NfGDgHPP!J,Qi5oCC\$B@I3Eg/6D3&MOfs$K9V),ESse*!h_BL_h-mA_19@P&s3i_0iHT+8#"? A)te`srfDN:Qjr =9Q5@m&S8U*eG`k+UC@Op]SVZp7bRqca5TaQ@_g,K"O>Mk8Tkh6K%[TeG=k1P'^k4`43Kp'N+A3+'>e M>-a!M@QEPf>BY.M^D#@!8RAn&" lZp*=gJD#>l.44)c8kJ0EjKn@q4]@Y"mKH\t@=Q!ISeN@[A=q`kl;K32M1r$LV+mb_ni\;IE,6>d$t)B ABH]7&-OK7"('Usf4<rZ=KP(HgBg&TC*^@Z$<'X5<C=+P=iUk1OUaFWGOg<I_Ac$@I-'EcO]6eGc.GF5ZgC8Y jRT@J:3/#*_>U>;ZrgSANg+q7P8tFk0)r%3<-aN*%<p`FYk>`<IX2bS/j42rGs'?VYO0`OQgi8T`, r'(&F7Y<g4oA"(o?:r.*(e6<hBRF( '"$j+Ci Xpr#5Iee>ZUL-14<T&jITd+]Vlgb4/,'YE&ftJ:Qe1+E&F;>2$\*VD79cs,gWm0n+DS7bcA,c14[CZQn]kb/-$nM,K;F";E%4X[R#GGd<f86&...O.Mb+V3jBs5@hK]OH/lW.[;O/J'sa]E4'^eHRV?!l@QpA/];D2c3j\<qo%GhV=[Zb3&I;=(-;)O9m*P7'-k/Kp:NEm\OaD A\ZA[0A5r]:" %S:V%Mk`0rTs8t-U^gGBcn!om 8^-6V&T;$>pX"!h`j=A9_%pt4mZ]LJoOMh#$*nX57JktJ5HI?#ne/qIMd:?F9:q*_UG%ILCdH>fQ\f#>Y7OP(+"C'HD9a&F`'6ir+QkiB`,/EAT'IaBH$'gIAGL>;RtSL"1gO>XeX.L 1SZ+A]31S4W9T>4EJSpPk<4#("#CaOV>>iGftc7?5[?(T^qc:kl2I'#.t^6,`H4W!DfMI9oGt$p0>5f.Z>JO`^Be%q=/\eg]7MaOY:2_,]-,b!A;;_OR A5P.n%Z7YLIe^)1_$N$?&>oA6$59qS8"@VbD8bV:ON"!]`jC@4n:6eDT)3E=(orMJ<ft(p).*n&m(@gO<&[R1jce$dE08i+I=@4P?/E&(bWUdCBsK;OmWDah$02GZfA?,bY7&S=OS1LU>r+U/U-E%h*J> ]QinJS"Z`-9QP2rBF?g#8m PlBDJg^)p'Y4bRk OZ\`0<"p0#`Fq</t8\@flWcEKrMGqD-Y6;3'C<B:6'/BGq9/On3bqOa">LDRa_Md",AR>_7tqgAI6q>F\ rUP2h6P5Lf!WmaGeslT$$:km:"^b98>B@[58%Vk8o[4,3`i<A:q'ZH_pY%"Uo'`nA`#.43cb+C9`m)k1!%m`%&mseOt9o+e2ctA0Xb8dWXkl.0F'&Ws"&JQZjC1QUQf.DqldZ3oGQ+pSCX4l +Al : eAjU?!67ReeTYltifnT*HmKGP&t$Js=8ZnCR/*m-r_j5Ydp3Yr. M0aD?'P4lkVs=?d(*<BdY]jr[SC$jp_YS)j2F=AR*8^m2hgOQOSV_^JjP#sX/\A+-aQhT>-j`:lG Jp3Z@RD+o"K;Oo`cgPBOm:XS[SO/hl\%V"K%rXTaOdA?!`KL0[jil< IE9'PaXf@/tA#eT'SBZ]es'ljk^/f?i^b`k`CeqsgEah1N^_qKFOQ:V,nH;?rWTWAh8@J)AJ-fdM.Ub'>9`Ht>[4A\. HEF2YA9)_X3L^H[B`)dO UYDPi]b&C`-AD`ettad@A#nG>L4c\Wcp"a%d1Bt4"atAn_#%3>VCJq$l$>O^!?$WB17E,NMQI9hqOM,Kl#0QC)5_M%B2E"K7Dt9XK[s@_HW( Xp8-1+,k\hs [i?3?4XCQ9R@I1c,D4?]8FU17BbSEg^J]H'(3&Hist50)n$_k1ZX=$.[:O6<T_<j:`*J"-r,6PMD[9MCs;(Yq<qrK<Kq2 J>R_<AA$0bA>rL`V#&"G<PcUU"nGF[?"W"J%&'nZ3tMD,D?pgc7A[?SXo%+^=?0?FaG6ec"!m0<,H.U\iP;\JS/:dXZGG5nH)-IHaoe&IakR!hcp"nK]q@t7`#Z-1UKD.](XQ4EW%OCAFPfP)%cOW6(#1fA2hleLsX"6qIZI/PkPB,0TlRj+XZg1,g6)<;R6S)`lP]B&`:YB""-A=\O-PW7ZQ5U=IgKgI73V""LsK$)r@T[dQOqtq-7)1MNFR\Rg3hUS*XK"i#A1MPRbhNF),"n!G4AVFnsDl/,!rMD'IHqp@^Aq4TA5'gcKQ>Pi<GU 8>(Zk4\Tk?UAZ@,gt)S#c&n/KSYnFGZjs^cACm;+eP6F^)MAGhoqV4`@1%UMmfNba+"& 0sY!5$N>XYCnoO9^FI6'Y >U,i^S$3>sW3#itq3RnU@rgE1^8\UTrjTcUHbl$CFC%)UY[P:8'M'a6^1O4Y!kgi@n8:%j^P-cslBH*r,-Q"Psn4bKbHHAUVLl%F_fB\P<sD$^O&VU,5Xf';b#cibEC64s c(Wl+T+UE&N,[r6Z`Nk>h*jme`BI1EhI<tPT8'd=nFNSC\#76?:Dg-hVE!L7pL?']SHj@mFc=fH>TO9_gA%]A"i<37)\jY=sD8sYN^9XFA/Ml<+seGea<.6D^$UC[=X-4`tk@fI<aWHN,m37@@E2"LW<b *\l-lL,4OFomrX8JB7`Z<r=tOtOtBdE?[FQ2%g_I.@Ucq$@saZ*AA )qZ,[^(UCZYG*2A $&* (UTA'26U@"_(=Ze]H?i0U3W'K(b$s=&+V+[eBthtG:"E%4^;;jJR2q'1>XG]%M_h:kVC\&C#1j,`E;2[CP_;_,SHb+MHJ][=@?Z\J[!ciZMSC=8T_P^+7+4 #0aMJB#M7+J9Y(TrprMRVhqp%6n6ph[#A1N=8i>Md4<b+GoT'>mPMB$b#AdB5bdamqKn6.so 1O2dscP,`V1#sAjYn-$).Ao3moshGH`Y^-a>1T?/^VeV aR(MfbU*_)lD<ARlksCHYS6i 0q>iE%8OlO?pr$ZfBJj5#^-GX'h(lCVdm<9)q".PI$+*5j!p_'3#>\Il_VpFG6Zjia`Dtp+IWhr@EH#]l0o\`js?_);^=90%PE(m]hA-Vm/r)\W9ENd*Edn+NM,G^,U/n;tA^+<A5Y)VTN+$7=+2ij&=^i_/.T,gT^.CdD1Fs5d+qNl1Z2T*20$cnAc:sB^.?$+m(IlJae \18$h,GVofc!rdX asfdnd4&SOrds;@9][aJ.3h0=t`NnA(fim`UiZ&H '=Ic!`JiOWM-#T6QXPDcec3!J;+/UBa%!onsq92N4A':K7*FnRl=J6H(bRmHG)clO6e2CEd6,GgGO_Q8n!>RQIGfiB9s5o6`RjM[ALmTXqNm@Z:O<6NTbU`)NOr.a:gm^b_XYn-Va0`;P? RCFlYsq/5Pr%$G?r^AZAE&/d*X7AYM\AC,Mes7G -Hb0B\BfX6kcUaJ_5[W1+lp6<;T$WU=C&\hg6A^NeR%bIWE6ZjlN\:k&Kb+<V9boOA3We'Pp+PkF'h5^NrL(PC&SIg%Y$%fDpJA1Ol[+q!_>B7A6N4>?q&C-pLlqE@c)j/Ml\i'J'\/N4((UnMeLVG-?rc?_4]>TAn6!eU_:tUA?OYrAJ[:n>nrn>tOl]L,L3tb%q1"TKD_Ggo :+r<+/L<-QrHM<=)i1La%Yj<M[$U\-a6&PRZ>*#]#`akcXI$cXUbL:=!nNd4$]flE;,Y4(i51saE!5.n97EkgA`IQUb$1s_<[*SpA1j/,IYK=$^.'*J;^U1Aa:r\*[5'BfPm5i\R:Y5N7XT*fJ;m"f<-*_ ePoqPlR7d4nX0[ZP'7CSd8!faj)#/6e[R+!^]L[R:N:TNV!(Kr]OV]a<fSRWGjM;1A!UfQY! C:.jXD2k)AUFH55Z/Te`=;5\Gr'3UgmS0.MhbHXAV;hAcl1a)Zj)t]QG+:J3nm8^\)kSha(@A;OIiY[%%8ieU-cE,_"JR=4l]Lliip./\p7dYXRt\R1@5NG:lQi1nFOXUC4(MW*b+,o\MmaV.BBKf5E5k!+5tih6q+FeghS53%/T*4_R^PLV*EhW0>`% 'I-L hGX.orUtGE^39%Nio,O9MZ;+bHD?BJDBPVAk'SV%5rFKf"?[RX`W\t+Dp$)0NZ_IgY*9I"+GSj3NL[k%HI-fm9A]*9\;\>CTdiC02j0G04t_2C)XN&a6!NCi&_[W*E+K*."oe6R:Oj2/4rU6PtGm9X]s>ho)^1<44a0AT%ELqE/-UKgS#AB$kVBmie4$1Y`0JHGlRa#Xc:]60EN9KE:of(H;ElD[-b]C6hOqKdd#&8/Ct2<^WG->k1N._J]6)TUC2mWRXrPD"Y8'_,rqThCrN+A_oEMMkM5-rY2:3Xl1 @J)D=WVEo)paNT7\MYaD^:\WF6(/m:-oEQ98:]mn#U2rTqm7W07fcC6V&pFWgZ!A^-Ri>AAY?!!sY[:h8OB'o6t?3_]rM4^IfRmA$'mPB<X2,@j62V_BkPrr/6H*4NiF23S%(,EO!Q %$RLjP5X& dHpAUB h>9RjB34sA:CJ(R;lN6,kj1<ao`E3p<h7J&^s-p8+kdahR+Y@q*=<`rl6FFZjEh^OG5d9>[LNKFk4a!+V_Rh;;O><H9'3C`YRV`s4%/?%b_%E3?f0*`?[>&;)P/*^g?AAiJ`@*k?1A%p<4%9L2R:C!fReCA[f#T1C6sYjI_D/H`IV@*N6LX'`m>\qeb %'3.)%;s#s!6/B#8dd`5di,iDPHo7MQ$AV=J+V0:(\<8fZa%E@Z7)"ZlWG4;b.0*S,<"K9#CihF83rBU(&mA&5EDV,'WJ=3NXeAW3@.j2/:!+an"I%3q(9(kqZgY9h]q,VK&![P9;J=D07JI_\6+/:21[RB<D#^-Y-KhIW\TX_dUF=AfD@otfd$h))fGYlq#h\<lEMa!SAs_@+SZ`r;8E0m5UMYmKlI''BkpF:iLERQ,?tXamgrf/;%Hr^Mg0l1ZPn[q4cFoOX'\=>q#4%?Fci'2V,<%6krSb@]jEo:YMGB7P87$W;76_U"nHlgO2 o>'[KdHeIl+n@'5[+Bd.^:\Jt&\Ap\RO2Fn5C&`omaZk:sGJj&_SIO0P-Cd;G3#j(I.Z=NCqc 2%(:[0hN1FV89i'a+ed49r,f\kNa@_KE*@ZZP\GFNRNQ;"+ghd]$LgM%3U05!U-$#I2.6 GM.Cq7P&WDYj$qd`V_755b9$K7,9(Sf=6>@<R(]Q=)V9p>J9pbr+RAf>A74AV_E>Fg]^+-0UtQi_m3+rkfFFEr'<%iHE/4B7N'sRWoRIc*U_qfZ(pFqp,^'0-ja;tcTHL5:+gJPb_qrZl.X(/0TeXTVZ"4AR/'lPAsqA<$@Ej%/0XV$:][.2mhWZ]lgQ?eD8q:t>_himc#`.>6[$mMSG)7CZBZ\e!QI7U=HDoP,3)B5C7Ulm`8l&?97]9bV$) c*11'-:+% f_[-8hncgs>Na[J (jUgspZ80?Ia_'AV;n/VQ0I`\QC$>`:PG6t >1!Z%s\a(FNgB;;HbsFWlOV9<aAs^'\eZZJQfc>3M8[Ut%f/AT2]4a@fo=&HiQ$Kkle-FYl a@C8F)*S;lU"VkdR9\LfU/TT!@.L'+ZhI,!?G3A!#@-h)^!B?)c>:O8cA4gUgeE#kH'\>E:`Rn#i!@2,*)H+HVeRB,cATD8U;RsNOXr*M;O+NTMJGe<Db!OI6eErYAcU4/Z$[Ce_ZE.)Mrc=8:OS%hl.!c[67W8cM1Z4Ge.6/3CFfjm?$6!S<A96Ork]-0/AZn!U)@-0kt%]CT0`F@B.b9`3,8o"sRTPW;sWU'Sl@^FZ$^=RcfT>6$J,BI9'cg'=ei>U`gN41oN#SSV`o[8aOLPjsb0pL^G#MnmAgsb=(`-0T)4mP2A43]F!Uh\hA*V6 9#@Za_'IE2Ai;qk$JQY5>o;L)\]:h7Z)^G7#b'r-KqtTi'dWAC^g,"^i)abIli*HV@7rW#;[H.^HAVDU0)J:8sn2XK+LR._$[Vr,4Eb/7LT^MBL3gcWI7E?sZRn0YPA.\N%UqZT"H:5"Ha +4&mLinhZ%SHpL<hhc.<+2Aihamn$t14&e;M=G6)=c6hP,D:eK>CjG@EpaTt]I+&VgLL6s%+epH56Rq<iMh6<oRS59M$sl:-VHg7r>LE_Js\!*V6EAJ<m@[g2W`/]fs@W3-.LWqIB=:jc1,1YB(:0]qBBrVa;b(D5%kqiYA0p8l<V1oTY<-S&=lmsbAiZ;jFdCEU`lA7O<?aG'W?CH[GC"TB3lPt1#">4"tQerAf'c/]icOeFc]>qJahlBP61.6Zn1e:I5 r78,#,F /3<;SFor=OJLpMlGrc$]/91SAq6(c[(Fle]DE#!/r\CdaCl)$0R$4B-4bm4dX ]0#T,5#9#(`J,Aj?&_%EcpA7_aQJCpFA*F[0r;.q*f0>3$$!_'tRVnR?Q$Q"0XT9?Tq+c_9!_^8eph%AR25K>)6T%F01e/>&b2f]l+nAgmQg8kHb psX3`)5MpE1O.Q!"DT'`;7A-tWqW@P20f$$T/a>PebHtn1!qZT'&JHqr[&'* =52"&7dpe?g.$o%-kKF7q,O0.i?lc698?&86,"NdI3_=M]nkAfAt(7"*tEfQ=-K#I<q67aDF@AD3&RHa4,3Bc[e]FUa>I_0<o0q@cqtbSi'aJ8b6oEkG J=S(Ttr#a2]iN ?&EdQn\N8?m.0f$c2s29AXnfRrD0YYe=h@Q@ZjCe-a08LEl`k96Pm;B-<tbV"TjZ2\.:n<A*IAH*]W6C#lBaq,@epQ^EPI_"<RQ(7m*tZ3gfRVdE2[rolYp (b:G1:C(l@+s2(JL8pEFNE,;km0RU)GA/:St7D"n]JriFr`1*e ]VIpQXV":%co r[GN6H@-:9J5_+['9e5=d,XAB;2fAn.MM5/W!RVm'L?3EHh8/T];)\7jY3Wid<)fCaM=*.kGVRA@W1mb+q&dFAjlpU+nNr=\N@N8j4bA88ZKcfaP7sP%?4@e9)@p`oa2LKVmUKTm%Z 1?4l.rE*WsK5=B&!>q:N.0:K]80%4=JdT[F0n":o%VE& 7;;]E)'rAS_Nc-$P\NppVegCj</=2bL4E>TY>\lcF!%a:rq;9ndd'A:RY&IhG`p5>>Hj^/Wpn)<!^EIH^`@AFS3WL3P>7-f6$8A)=>0+* 7$4o63)&!i-YA,0;t\MkD(AgZah_<[k0o"0]af<(rDd%XfeBU#W/^QS!LTO[PV+8,<A(+P-d42rItI3#eVmFj&F7^as+NJ-RDht  (2FRGbRSQ_R758OHQsFWg`3FHh?jF.r31]^^`[>jY_N*bApB[$!`-QWWA#H->h<CZ`F8p.-B=Z=N-BO!^@@8$A.;" ^/3%Q<]?/b:+VlD`YsUmk":QAh/r;[7d+L/-_*/VN$`h8c"T/[ha@9 );p@U"m<k8[0ejHl^OiCYaZpLE5rnODn':*[M+/RP-GJ3%q8t X9Qp*>AJRlr$UQ<H43#THG3BsI)J0io;C<M:O5*r5SNQ&Z[@ F<G'-VtA?q)I,6hH&b4&g+OJ<\97!`N'/2(3BBpJ!\.&`@q4\^4H)+O=$sHgA#Gc]G='R<Dff"7aF+',VTNeSUi?bbj%FIk_DtiL9cK"DdmW1gr,KX>N(hAmmN\m!Y3R?nY'"`Rr4e))Ys@rQ6(FW9+m(8<6%1roR(^ERjQ>Aoab3a1MR=_]O/h4Irsl=W5+T3ZFSn`X0+r#O*iHD?bGj?I7m8IpjrFoa5J\97IJE66Ob[-4N_o,d-KP#P<rEf@]!AW"4o;D^Goel3A8a5;V^i)cCQ'4sJSm^kAM&`S8Ai!X/Agp3n3?5Ob/K7ojG_+#I&jY%Qr&k@`\`<[mF 0p_(Zi''$a//E?[a<-Wi:7Kl-Sk@N o'eCcBgHJ(ZU,(!e8,ffV6q\[[a0L5bK)\hefXm4A2s#<t6Ed)(<6K0)\7iq>mqB;G=fXB8#]1AL7.HQoXm-[$A\\CQb)f*TfEH1Q()dJ?f&qZlmRo;a%-LnqtW3e&5PgT5d;Has=F2_VI@'FrHm,WN?2 *"=U `K'P$PchrWcdt L92Kj4;kTH=#?H"cf<@iiXjA_QN^rj4qLV";XPD9j:bV01]`34N1\%DX/Ff@#XBI/Lo#c QCoJ\IZ$g(1KG2#(:XDi8A.P"d,A_68DjmdT!hT<G[=Z0H@%j]'jQbkp"q)+)g_:[AYDW,K'LPY:rM4t>.V&m?$9OAM3$5kn+,%0o>W\p<o"Ae_E^)e-oLT@Aa-acg7_9No=J;<@rA`?'!YGG8OG7D(M/H>IXT`^5!6<K>k%@p]Ybdg-7nI"A9V]]2SWb!JA!k)D#W8R2B'ALHClX$++inHhl,8AXH!U j.7%W"Z6tNSf7STULk#,b=c2Y`\M8=j>!L<n>A\i$qn8!`$El*4qXlSbfb3!aDY?2R%[1R5Ah"'$$j?8pp1qtA'Jl$%CW/KX#QcL%9a)['lmKB/(.BG]:TpH6$]M$P3\E,H=El.).V_g'G(SKG/>Z +7Sg  PKXjE]t+.WS^o8?G+9(9o@9@1c?LgS<$&?)tbb^'g(er+C2@fq_->mnAkE5cCn*<>AI/KAoGo>9TcH+h`Wd? FbbjTld6Hm+oYiT&XBq+Tb/Yf9BTIB^Ch`AZldmX'?0WT. @VSl&XcW:#(FW^.IAG(a&UF<A?aHQG"9sAEKhicer@J0`1<UJ5=."s:r-FqL+^:<&;JHr(L3T0ZR,>98DfaUc5>!o9$?$41o7OMdQN 'CCF=3n9,H-GV1:C2fre*/DfeAAgS<E.bQ^&4b7:#ChiO0/(Hq\S%DaH#T+7lYsIUS\"[Q60MRkTZpC29Uam&%?)2LniR"^$@&4Hta5=hQXj*M_EC.4DGJ0,V:>Cp#^OM0Us`WMg\KO4)#5VeCjJY-3Nn8Z+dM8HLr;&bU#`l<.J:+WdI9TALi7"Z.An&t-&SVX`XOY)D:7Cd+Ddfr!Y-0&jr9ie)OCg-)sqid!Q,>ghj*.<4]<aYpe8Z`Vad^(+]OgLt*-%P:?DMA?-;qR_g+>]ft=rY!g>i%M`] kX-bg;:*`c :*FpCB="6Cp?Ksn1E-<iQp4'(EO$[MdUB'LhkB*?DXotl1:)esp1#_CP/O7k@'eH,j5`?lltq,NR?Q3)K,]9^#>$6*nP"]JM1==QSeI5@A"1AJ/Z_F%\AC')E3ZqSM3L2.IfClYbSGOb?aeeqJU!UH_2PI2IVoJ6q7pYVa71dJ%G/foptEZbhC#'i^4tPXI0= KDT Y-TRaHabpCCf="Bi`+t-V#dP>j"+FJN/KL',;Gpj3Q5Vfn6i?8El?H4p'?A"h)A'K%Sm^r%a7-\YgE9pLcSV!=?p%(7TIdC,UJ?]b+L0LJ3PEHeXZTr].8CR?b;,t4!C'>Z&UEGNT$PlIQW6DSsF;WeM2&o=mUA>cAhf-i$qW*KB I)9.1Ae%^ICbAe2h7.Z]$UQ!@G,pCLD1J>:A#U>hg/fathk5@GnFm"/:HW)V\+\*c)m2Jk8DoKPOBc7]fSih46G/\rq0&rm#,G3J;E-*rDh sS= *C%f.NATZooQ.modqS#(%dHm;?a4d&b!XWgOmKTf"q9I -2%%q,8R4,J#$5gBfZTYMm,tq2SA+ifSo/r[#P]T<sPs5P<%KADoLC3t3T5l\d?-pNlf\80T4:jo?sq9f38r_%_-@PQ8+"U!Ab)\ LCp)RsPYNC`D.'j(FQrAA(hW2+LK)TB/#2e@CpI`![q 2=&b;kl8k6FWhGZ`.>^7eX0(.?F6`%@`NXKL=PX\?XR7=GQRsCYK 0Q\S/ZOS`W`[9W)X7V_(iTHfR:`>AF:g>b LA3A@>8dj3,0ned.C9%JHBg&lDPd9#)n\@9&p)jY]=s!9 :qp)mJ3#7,;-Nm`e9lY@:m]"The#>be)i8Ahq]g 7#C_o&cdPH13j;Sp$)0sqL`Z.WA8[L%sSPF@E-(\S\9Y(fRCOm4p#A+/.-O(a11A$tQm_(R)NjCW(2f_' o<TL."!<4`.@SM!<'b%HSI$t&j)#!<Wh ,9Oj91s8X\Cb6M]3nnBlIEnkDQ8ZRbQOc"P<q8n--ZMNV#jA`Z=gKe&t/66IcPh>fHL0$??.r'tB.`X]ZTY-740RoY&iN5STW5I Y>I!rPHtYX"Y/ ]m]QInX!&K`NQRU)A>OG5&En."WC4<;4;\=BB.gia@?>Y0'ASF+mY*2#Lb[9B#JZ%RhJ<>fc!Ae*daYAO6gYGjGl-!/h69M)#jlf86;rh<sq1e06H9`D4a:\QGK68r%_AJApp^r6rn-q$h+8o;^HR@AA:_*O#O2q)6>Hb)i)DV\@*_9T:__^KQ^l @<85s9 M6]?eA!#Cps.,Y$ e<B?lq`ob11&"+0p!0%L'*]25G*f$f_n45Rk_7\3_%Qk.TAGETBtgV3eb-_@XHi*ed>-f$32>G2bhqZA!B1LIA6@arX(5L&:e!eg0X=_bWcP-%",09!bEk^C&Y!irAW36XKqV6/MbI)Ak![%,TPZ/AKX0]*\WPq";OP*pa)Ki!,)$\1N-X$@M$b!8<;O`$8OZh_`N0mh_cs1/7&k3=["]tI-OGV.oYk Y2+.riBdRQ7k\d8GFpG/PVm.]tlq>RU!J_h'fVdeSAbW[^.iXN!f<qMX:0;PEoMR^?NslZ`NHj W/&G&!Z6a4LQp]2]=t"X]6<NEreD:Z<PrBEW)?^`KRf9Zt)9*<i=e3L8q"3d5L0'\j_PKdUYRLp069hp*/<t,R*OgK#ml^-8&AN]kS=IDMlD0ZI3iiIAj-;MOY>#;cO[m]U9NstB?Y`+Ab(IDE;'P.!Z\N@8!HZ>d>(@m"p(lS4/XIEKNZ5h:\YL>SWGK#]2OP.q;][.s$84/9FJeN')"(&q42g3SKpVUbifkn,mpIT- @XaW8j?_3AJT:Voh==X>+ ?1)iFM(aWV4]1_hI,\Slc#pWkThsicBRV[AhADB9KKPUJ`KL$eSSPC/['K8(hb?AE3^sJ7;;oEH6A8j$#1`AQXq]ps9,Ap08AYc/MO]IfN[.]7tD;U%6lh8;XH`s+'p4diRJ%oF_4b)cOH8 /J=sS"0kO<gBSc8Lf:tS5Gk2`_Ed[*XG@A#sH@fAg?QcggR_AJ&qZQ8mE<6R _A:&n0)9"WXr]aZ9hb1;JT_;OU0h[Kqdl>l)4^H.]mP3:cCoJ8V1hl+)+2d7W Dt$0'FNfB&5PlW9$M]DJE_D:(;N(?$o&d,qGsnX ;1dOdm0c.Rl^Ct/GiDTs&^SYV.PW$"J@8 b,>I-<Gm&@`:[p6h-V:)e7A-96;h8:[364n4pA5C8I3WTj%5k)'[,&4>:#.aEXL8niGh;KrT F$=M/DjA'3s5/9@r1hqn@+HYf""RWe9Afnlh*\tg\T$X6dE9C#q[\0\:dFf4!-]NS4`VIJhs6-=6e2lT5cCT"24i5G$]2n^Bb$3$,R6:ZKjn@n.A;.MEscbH4 DdA?j8O`!mM#7p (Mb(4\qBrt!VG>>(BIRK#@Z:0AcaHk=6AL-/"R+&S!c-BagoA8,om0\!GWbeO<.R":,J>2aU0%PFdYd004$69J5;\tLU!5Ym$k)$!cm%OR%btd4g`bla;Q&(f`X3:4DeNTAmo=m*:p1]Al3O5H8,'4G6c-h3;7n>Fq/)Q'@lk!K[k0:eKGq8'^Hbjj*^%?XnWb)/soo."AmJq]dI[KZDV&75oA*F:"*VWY1d`DAUtYXFESlLQ:A9c_qNT^NYs.-:rZkgJ".PD:)-#CMDPjY3^/mi\s+r67s[c@V=;W6e*^PJO9`@2fPXRojZ9Z%1+:OLf?fGi)YF]!H %Bcbh!X*+F!)/)#Eg+Z;c*f^&2;tm2;slJSP2a+& \W;9)_LH`"+S$tAU MQ+dCr^ $1(AY!.b-2ce%n.dInA;#Vipk<ZTp!@%11QGD'M!';#,H>o"dmU?'\qoV[ngS)K[\IhNC`)f:On)g3&qdb!?cQ&r,.UoSd%9KA8O!L([*H.!:j;cE7V65ae>>K)=('.65D3G;+]qYa4Nd;5l+PW ACE1@pAZ!9fj`ksP&cNdARrG dHjaQ5oVOYpEt>,V<p)n9MF3r<fAH*Y)q/2!P'XI_0.m'+PPISi+df=_YP42`P]8hh9VI5*`8`_F(k%t`FWUsFa-g!M3EB[9$=@ptPVQE;?!0CC8 i+Scs]<Ys,t*BbZ2 J89^"H;`O.8XETtXsd4L#I-o-Q#%GN0Fr')D'bg,l^]mW@^%MqYtbp0Ae!K=0TBeXY2GpWZ@n#.qaJHhR2IelC(MZ`76Y%p0$QYS"H"^?O:<Y3(M9c2_iApo9aeWBUcdk75sd+`R/_$N#%,7.T, g8RAMjc`4r0OY#;\J-+'Y2s8taO >3SV /T)`t (A(e3UG*B:$*U%1qF'.Y&@4m;$i R!&GXLANqSP^N1$/D5SG9AP@nCAr=gHZ8G\FWrDbm\'IM:n4b!oXSa#UmqGQ(=UD*99GH#2FAcmitH0)]lAfKB;T<fbN'_q=TPt.AbP]s=6gRTsAZ#aJ&+`[3'.,nkmM(M)>S5Q$tR],lh]mC$AP;D@ULc0jXAAT>pBH_EDkoO7H2_oUJ!P@01V94VqMXTS"fUAS.Vq8W7*8jMn9>mJBQE0?C@1KW>E7_AGr\BQ+q/kpCe2B@)'1.3pE0P9NbKC$`5X^'*+[69n.gQDdk!6N`c(r5CV8O[#$Yh Ur$&Z?,6iif)l0lTtGi%Iq;+L3_U>Y#*78dKr,BH6An2gR5bcap^l)Y3!c&>n<VT:7o<)r>.ee4 aE5AcSdA>MPOSQK\#=r&1"0_S^@0*e--@bKC&1Nc"P;&2Z:GGifAL6]:t%t"Y$9O#GW]?-?>OCNKs A@-ekH0m`@*!+J>,/WJ9]kt5tQC6j+V'VG<.77h*g&)+G9nKA#5TAW\ti)&M3$k/7W?A%\=P*&&L]kY&rKk!kjTUl,N6R7-cPUHak5odkeHZH9A3V/]e/;fYF-<pP^Vka]?L%Wt!> 'JSsn/16VA/c<cMH<R^q/)0e2;0#JS*Xe+:^ZA+@)L0T3EJ 1lbB1h=[9 QmbKjA&XLtea]?:Kq.jhNK)7a,bhS4;J%1oUk,]tlO*Q.p?H.tDIt()%AIRd@hS?]l4F[1!8eqI]gkcXGY9AIHK`GRgr/9G,oWDWTf\o=J]eNYPGqdFoS^/F`V;UhkT9^Z3a-6;;9XcCV8mb5-90k Xmq58-!MoS=8,7rZ;cs*fZAMb!<CkeAbep3lEp[jA#UOt_+rso4GBAt&E8rg[G8+pt6V4DJ] RFDWkE]hic+BC`XB(#,e[PrXb8N#OTM+.a$*(GA<g 2:'FJo+n!I9ng@d9oi N9R )XYP`32,r+K7ma4 X3`\4?)8dR6fAA>,(-q T)"`=2JkBj^:aT8sTp_+2o%N:b%QTZ\gnq"t4D_1B<2AXOUM;O4*Ak@I\ENSP'eanE/Y^fL4TOAcV4an>(*-1fPMHG=)$j4j8tKb`59DFSA+QR?.;n<(j:3sr33Q-o]b5;j^l<-Ua&+5%1?7smY;jbUM/7"Zf70BdK@GnRNi%\2A+%S1J/`?(NH8JFjLeJ Z9$Mq986!Pn6H8E=Eg=k)e]>^Ad=?#rpK+J^5QQsb)oT1ch=rtJrj.c+XK;Mk_e_dc ^YG[aWPCZd!n,D+1F=_O\K:LU3"G'rFViOQO`B+PP`%XRnoQg9V:YCrJjl'rh5"HeFCLg:B.Ic7r:&F($UKC>8#C"lGT*p!A\3'_A*'.InVC1n8Z@F<O&B45YQ%bA*n<&r7.WlfF@>O;Iq7_7tWk:q")I#=4mK(56h45X,q,9s.lG#mPRAAfs2l/8MBX_na39RX1B$7LKc'&?bV/'g' k_D5I67WP!W/a'l%;ZWeAH"f7AQ;OA"^*oPXQ%;8;iU46BOD>lJO;8k,[Z1W`N'Bk^0Q_7P<#K%b*cpHlEFaJMNaHL>>J(p@9OVAOb8]Djdf+69iccX jG1Q9kqd*.,"lmC3"mYNY35[,2A=oIg#`S8OJ#be %49a81hN,bj1J-ZB9MYg,p*,1gKDe=>lr'+(fb:U#_eKBV(:H8\eY-Zs4bW5CLo:PZ]&V ]tV5`';2A@r:h;k(62p]F.ga5.r%EH)=f=AlBO-d;9*;n-=Ztn1@/-,7Nm%3t"^-?+sTCNYq)pZ[R#%BRe&5EM98USIRnH]R,Ij[aJ5#GcDL1P2'U%1U4B-0pLMZ:],^f*a)tYI6->6F]]rp*Fi! h2_=EaBn5D)(F?1amaLA4)UZ)\;;4B8F8PAa#XA\=$>RK>aPcH's.cHKk<BWbJU:7&jj98ZP5/@p%_NQLhE>9o9Yc*7CO;(m<DreX]hQ>1f5^ZpkRf`5#?(],]V@j"JEi_U&+ec^F9L+o6S`$P;g':4%8Z<MZ]C-&Y)X3#^):;bkY-`MWD?WY4ITg#;T&5.PXB-F]a3[st">IGG_$22bNQ589\KG%;CdqrRWd"f"GcNj&-$QNs2'2=j6AojYZakT>_KbC4HIT4.^o=ES8*5JZ1<(!\Fs8!r;!:!@U[/oA*I:A<UkZKmV'rAjCFA7,$O?h)W XtghnUp)YO&g_sO%b&`L`TqZTi&s\"<%DbkaLEV2US2PV$id")V]d9E6-=as:T:%$."r)<#XcD2hH^S77"(gi,MW;ZDi7k@@e6YIQ\dZ"1GBHB-N_BK%A/WHnX#kan&g=<!g"?[,\!\EGsZ9@<4-IK-[Q(3N]?-+Zi(F'HZ5L\,$IE.8$;Gf4KeiqHA]nUR4)r\1dqj()?S=l&]`gfXAF'K:(&f `H<!-DqdR#aKYFFKLcbp0>eFUE.UE$^[dbN6./B(o+&Khj";/-h7U]"6pb4W:3Gd^'+82&iV6L$oar/'MR?Wo,W"@YY&)#&_<%eETG(I??e'7cRH$>%H&b/BCF"pt+97)2YnSV3Mc#`m#smn^Y9jpKoO=e9j+fnJ+/V]Fq?:kpU(tq8hX\Zl2#ppK'X.Xa2UdC95c)8D?q!AQmHL.qh+?a.PN;ZHUmo&jm]dEnA] fin@9St "5i+L-4s,$sW Kt^;[K.q2QQO`?!m\?:*]-<cpg)U6pDr]\21@-BmSIt:YjN0l3kdDB3c]"]/PIRn<'5)Wp@b$ =o"JPcV@\Y:,#[m,R;G$qdBQ&^2>j;5\Te*TYd[R*#XE?R%JJRX*gELE/ZOMoP'"i-U[8We5H5qOj30C-lI1\#MAo36hPt.caONtgNM,:8aFnFomY<fHcg[Hl8P AV!IbYROa,4^dDX__$F5)S)b2e+73K [4#q_+mrX?'YC]N2RB0U(S&QQ hOgA,AZ0`qb3V'n35LNK?FZT]%o7_G4B)+dH!WFj;?`(OA0p<tr_D)-h+-XQ:Q9,UH[4R3M Xc7#T:Fe&#h<:H23V*WWo046t0IM*G9$@%ALWR2C?ADqS3e>4(g]IO/H0$Sc3nH@nd0n)lG;+$%`nilmb*S[gZE0_`<Ab7&5h[g$Hs6l<p&6g3;54A#Z=W $^m? "RN4LN+&Gr3.`s:f98/6gD,<HA>Rm/T>RR%">8C$4 be`Y$@B<K$s&?R_MqP#,lh7?+Nh*F!&sN)b"%3P0q./&hbZ K_tl(BPd%#@DbcW&:E+eJH0,#@NU!Rqn\f9U^EZCM#XXWgnh4eIcm"Ji#dlEjAGiUt7`2^X.([Gf3s\jEJ=ZE%"@;Ibi,&Ill,[nhG0:1Wqc6"+G*E!Vjqhq!,n3RTG*4h4pl?RW(9U\mYdNrK0NBAdOFf>j$^Sdf4OY_N+C;K8]fi!4g3cF<58F-dat-XT:DkVSopo4.^ZhsjV/eX7AEPS2FVgB$'&Xh=VSM$Ik07PPW6ZO':)3Uf7)dOK8jh_f3?H(Jr=J.LgWnT1_eA=M1e,6f!4%<&&A2&DON;?=N_kWlt\Kag$9 26W#;7 crF(` K+M`V"PfX,K%Ul_d@0i!KWfX*OGp4i=EUbSAqBrLALs%[A.^,l0Al?QEaj(amk)]FBXCiYK--?0U(pfC?q!+_=@mSLkXrK'J*gge)^B-a6n'9Hs)75QM5+cQ_[B^[je,\&h0,L2:n3+V:eA6RKlQnAh^: #7oU(C74=K*?8a@iMN 6)?Y2imWkj)jL3t=2lo%-A%Gmm6AGXq_4!Ebb6`hLDF-N:dPU-j=o@5]'ibipCDlZg8,*CGk`A71c00>eW@`lpcN++U?e,n3\;Y#a($_>;^HrjD$XkV&[F,SB1n7gSqmnJZ[Id1g^sl*\E]lIB<oj:'TWj$k:erJc7cB#:UY/,soH*e[nmF5hk/+"E]31?=^CldeS;qF-frFMr]d0s8E[&`j^Zhngt?@jiqb%>^ROG<^fcY4L[M^]n*O[R3GnNg9P/XAAM=16F,A^V<TeQH,>M3/HLLO3#gZ!31(_R8kt:X2EhY\#6)+[`Q$B#C"A7-qpb/JiZ$eMFkn^MD ,TL4B?tXT&-c,/po271,` L:QjqT/q!K'EqA 6n#[#UY9n`TOWqC)=[A!8`QY#;#:fLP\"n+9hd&@S8<L/Wg?HaBIt%gi=K#]t*8\Qae71@(a;j(FHc(.siW;\+Z6Sr!A*.bSSjG[D:[SV]`QQ>&98>gA1=@,sIE$B1.F\m[c2-092XJD<`_R109#40(RH4j$j'(^%l%LaG[tiNdNZd*L1\p&tAXEZ$g?Z02SLek*3eor_IN&2ML+)kQAg3r8Y'\^UIW-h=C1`3]Rm:6]k..&0H*fa8l.nU@9a"kZS0BW6]KU9Y(_gF#p kT*6NY\(LoI+>=7e":1nOl4V"$!IZZY,B8hj6fhZ7>HA^rt'%q(_IeAAN#$#"8MFmA2qMckc  cP)2Nn9;YF3LqeBr;(\kgkAfI0eR*@.h`&A+`ESKeNB"7q;S,6E/B'U'\'1FYW[5,sBM+KgV1A\l(q.oU7(!F0!T<MI#A;:dt2WI6mcC\`WgY4fI4)O-*OT-bBsfU9k:)75H"QIg8B2!CH8),!@-B?;k;9e@L%n))6Sg?45t^H6D97\-0T%b+pEoA3(Hn *@9\5L59I.h_Tjr^P=!R>:g&.X(<$bD-DS]3<q7*PkT4BfJ!km`,/j,&=TXKS>Cr[2U?^/0_g%5=6=#cQt:]hj1O7-*[drWco:"b>R!sa"'8k_dU;QMV?:lSAcFDAaK\bO':MoAp_lAh`-" EqBL/@I)1Q -:'/=tR0A#;9N?(II?^7"Wr8-.@_*/KdAbk=Kb?6/5k /l5J:::)1cNJBbV/bC%Gf<"1r>kprV$Fat17n#07s,X$5"q5'M'%q%f6Y%r'\tr>+\UpOrS;&:N*0&?rVlQp*q:9LhNVteW"%&_0nP]CVWeWYb[mIOkkDfKdF9END]C>(/EUcC3'KtX3)- l^9"AmN>%a7htW0sKW>ko+-1p0A2NPU8ZpPV[sI'QOYe4+[F%aIY!jA `E?h[:=BrTgJ4p^1d7sAitnEHsfm)?\MJ1WA/Zim*4V-B3o'kMrct@h.>*OB-/7YG=)EW.tj!CpSYMa>CdEqFI@`1G#apQ/J4LV_VFO/PS]ZRV;s84(hSM.n8(`6LX.,jiGkF5,*UkI$H> /@ArY_7EOm)/2;6=EjTI&*#\e'#*2*>k.3T'0C*o0^#OHh[&"W6Eo2A]5GB6S,?"m$ER:Pi?aY4pH?Z; A,m!K+sF/-oB >-Xh?WQ'TI)L'mBJWWF>%mY!F&X8ZZ0)q2UfsSqZ[X:d0[4EWb!S7I(,FN3F)LdFX^3:`EhCO=U9>5.6\R;PF;)`^3M4EI/A b]Re\,*TD!C[T"iYIj^+l.tUG<0&ID.JdLFEF,lrZogh-JT-;a?2`HS7"eQ+Ng&icHUr[3kn'?nNUVD /b@7[cRe(U6BA?<ltXNL(\1rAT_cr.IH8&2GZQ=KT!*moSn^:b]=SUlC &s*mXBh-Bq0%O3RX,b:8(nBC`tYafYW]A]tJf^b`;XqJ+m^7q;nPAC@3p2T9cEOd_[J70^#Ilo-0F9s,1"K`=#?,^s!OqC*X],J`NgbSo["=#3SR/r1b4RaG4/i;]YRj7CL2nmSRk5qB6jqB#K6dAi1>jmq>ToSn @6G03!ngRd&*ZKo'Ue,Ll)_2'- h1 98UTlo@LX@i7hgRUb\trHlfc\epd$Dd'=6=tm#:U]S;d(`ABONL L6q,6lVID;;TVbZb3oT?#!cjhA#S9'J =Iie<<Ubr^#:;pOhpn.P%"P0<&C>3KV%eXg9ST cf%hT!aZIG74l-^bP>18WA:NRE,1<W;ioB;n-g*(PJteLc38\2U-"74d.Nt;:ofSZ=rTdIK*3^n&d?['Xh+L5n?C.j]T6l+>VQ>8n_d`II+&4Ume@SeS  Q`+>dmTNhhU+8=+gc/Unj=\LBal9)>-cT7]Nh_,!'&2Oa;O_!p:NEnWnKsUke-I@`1=nB)N>[)qs_)>E2r3B@c%N0R7PVUEf['eeAnFZtesX'+o>$5Y<"i?6&_Fj*<nm*UZ_nXNnnOek6 W[0ej.je1[^M?MKO,G%A0h#Wc!jZQKcG]jfmj1 gE\Ti:!7"`A#jO4Q,4!TI$2A(S;),-k Ia!GO8inSYdhSrZPLGY5m(S4:KAn(NDGpsoLs^38'`,>iY\Rr]5)>7/E\e?mp^dUAn]C3?#T'`oEe&3R5g7t[U21bj#^2t?>HAVAb)D]gf2B,<VBQV:PMU,nkL>esZ"shAWS=WN)5P.p+TZGN3\KAkAmHN2DbS[<A@RE3@aHjXFA([<8LrV%(pD+U1@=%%n2/'@[\%?E3@emR'38S9#&e!R1#T?B)b:AEU<flsK&_XEV+QTV/)/:_2YO,6nEkF6'T9?A.'?r&9VUnMC!rRjX%W9/A&53XK a2;9tcnD"Fm4k<ra0Ycl:*?IMgkgm_>V@R1fS`Ti50@R0>UdFAK1)Zi*-J0D7j"/FE0RKgkB;0iln2Vh\<@,l5r]`W?D2+"Bt/6X 0L>3\066f+6tYoOA`?=e&r+4_C(F?pZZ_h]2V0&2bBhp(=3Q,]EGTePc&Co;lFKHts'mB>=OgZJ5rFRoEQ6$W?]j$BT@RHgO*82+a[@NRqTPYs:Ks+^NG+2"2jF(5!\ kq?M9_GhBC&']rj'Qj)AhgNs#OnOQBXPO!o0&=nZ!i%DX?:cOY,2Y8#2IALK1>:R^N+o6hfqpU f[O%m6lQ%_I0=>bgAG'%\7(4Tm+LtH]Q(f[KYjRnf`W0-B'*LL- s90UE*\Ks5< =BWFKc485lcUBk(R%gULHnT^KAPiWSR6 ?35O?d<ff?d:X)7qR0&20Y"+F# 2jA>mV- ]mH45?&LV=EsoB]%)!*$'edIP"HG+V/A::SkJdA1k+@K%D:IB_ENml)0&r$<b[)qh6Ke(orDDnsEPjrVWQot<!(j%'>NS`[IdK@Tg>Jq5]_0*/Q-KIb IWTFA!SDl%\-:M7ni6Aj9j[^!L?`ZS^T47j`e`*lc!hK=rb;UG! #CL$5c(3iTti:(pCpR34`];*shFF]-d>MAn59AF1a_E2<NF57<Xt4jQKmk 'ks'DL^;mk\(#\a6&B2pYbaN-R(oKP$>>SbWX_0to6Q'":44aKk4,_OWN'WE+p9GES&eVmA!ZAWe;*qsL/ l"?l1kg2d9<5jgb5\#OmVf3WH4:$$ad*b>=G)U/n7dEeC([U<@`qL2iOVk(Dib2-K:K?Y*r+^A^,72qI)JQ>.VE SfW`"Qrg1`cJ!oAKC>O @CpG-A0t@M*]8=":inR=\NnGXpC1+<J7f1QKdi"G &GAc37cmGW'*sk!8:G:AZb=k8LcF0l&+[H=]J%m,:gflb*.i9I?=;h,UcLclokkEijJRo"]LiA "]#^bs*Qr-PeR@k9`_6Z++1R6"&99>]3lQO]i?5P-WsB0t^VO<idr5N0l8S=<pUV9]A&#(PAR+fAAo'$Sr6A5BgK#2-amroKkQAjs7'beCcn/;)!*lkA-\iAb$=*_DA<OdZ_&i1rN;6/]C"@14U>rZ8kQAh<njeqG0W`F-t8J7e 0f[isgmTg5$:#KpJ[q`""%O28,f>M+-/3'N7[h^&Rg#A%K\^9XV4Rl gZWEKgPEG;$^iRChYtWGPlp<8#\6OVm7h`lDL-V11E[4QN:,kH"\o(<)7`=*@tKW]66kb^#jiLt*/%]!)A1(Vt]%e;K0FREkVrC,XQJA'Foj:?I&Zb>^Z+4D4"A9)%`=J nAEL+q\'\F$kAP627_qJekh$gb:_FDj/*fM#1<(;qNmOX/:T&7B?EMGQ@QAs.O*o2ld' jcoHpC:/'!;`CX*#, 0M[`0HB[A5#]4&jDdoo' tf6e",o(9HXr,bVrfLCL0Aa.bjVFXVF5P>b%R(lpHU*'3XDQQWq-8MECG"t"'dj!OM[_N,FfE40OCHd6Z!'M&6f!PZ0t+@\2#)&%Pk45Vf#O<Zbbpid[CjWaL&nd&DZFr#6he(H@s]ah6W\O;[S2<>#be?9I.6D<CG0:7!l'4-HNnB/`+>kcS<X6L"SfT*23Tm`'LY4/)o%J3X&0%2R1&)qc/0O3LH':V?g$d!ho*i@s80,\#"aM;BrKe-ggAW7Ib6j^GsG\@:U8$^djWn4)8VB1->%rV/\^Te((c*l7+cDmM<W>aFCFhGD$Lh\ID5;V0a7iRq<ss]O[b<]O1FBh7:*jb&?@?)&RGF]%/C2;&J+%f=Q>dV)<],d-e>q<E[ (Zg7.D!\4EJ4[U(b@(AhmE`_OS9G&&pdj" q3PHR$me3_LRA`G[9poS)*7?L"2_qe3HXV^[%rTArKEAa:S-aOS](?hsJW_/7R\[+%;pe*kZC<@(q`<8!`&7#4.OoJfbpo3ig@50+m*eR53JRhqrU=H3GH@^KM2%<`pA;<7dUD]9FE$tYmlj:4&)lWpjoXje\L;nHsWBi%7Z8@%W2M>]WatN3e&Ya$P?d1_j+a6i$FGQe [Ho%f;j#Qt=QGoBobQN?lP1[f>fL:)H^SC9A[ ;\(Q=WAm6cp29%SZhY@(8TsDkq=l3_I,h(g?%q73K`h6-55N<Bb"<b\qiCo;QrFJc6nT`ib7p%bK]Z8Uo(27M&*.E"M&]] lBHe%8YL9fSs[i&o1*qNq$\A[-3b;`9KbFn% MU:@"4#9NFH/B`<"#)MUg^'k_Qf4rEI<!.UE0!%eTi"%_=E*?qJZhS7.2#l/]i(bnr=1mN@N7X,5(WfVX"5(.Ik%gY]ibjqG>igo3:!<3c4d]&(M/RC4(eIiBKW,oI1d4!(<;B >+HZ0)e.bs' II6_7LN#)q0a(AMn+>q59C$8oU0La]iI)[H9,gNXL4&:BUAfcT@[h'dlY4tBR90i9=:_bb?\QZ673eo0Ea?JZDAFnHn/UH6]C0 /t-;)e78-B$@6G%OBSL1a.\&FQb-+OUZWrW&1ecmcSTKW#ZdeT1+ApR'5DMSQ, 9Am& )+ e<2W.Wa?CO"X_l"lQ/Q<*[.hDltZ^6JjC[PiL2814O$?j/QC&AnOGmtW_ b]KA( 9LO*7FOICC/7dr)pPH!B?r[;`^d[\V2,^08jK/C[M3k,V78XZ@LoJf\HmZAko6>rTepb]iPgEQ!;4.c$ClmhP9 1T=6&0GBA*(Un49P<=BR:p2#p&bO!H74&n`98`_,VkA6]t dGYqeN"grDXBFk;-"dNOARN[5-%).(_NSa/RE::QKJnZG%f"PP=6#l'D'I48qT\;))GTV2K1E51o"_6-RH;:$cTB5"48AR<+U*>A7N?5J+iEFl=h2p+f6]h.fH!E0CtSO7t;SK_/h/-^ndjRX4$rfi7Onn#B$CE="L=nLG[]3HB=/ph65]@7dB7P#"MTBSV[P-60h'#NLG=klt'MR8q=d)fN])[O_`Ia-7BMX%eFAkcF6JT8MYV!)/cL3[cj3VnH$ &O"iJ_g2HosF.@ To@V7OK2"LsYYcAs5>M+d:Z1XW*e%Gm*K>tcI$W6Q<m7EInq/U],5G4#:r7W_snqr9AieIk[DsKbjR*EesIK>:;KCW8U@<iGC:efI1WKpq5*f0gc`oGC-Wmtq\Al+mGjk*8c((J7.$P@<I'/PWQ^3 [M<U_1e;3N#q,e*I>LOlgU_)VE bAQRh6)apl,gr3^#@NbAij\=Sg_"teo$a WNJ s\q8+r@pKXeJ*sh/T'M=L+QaNK!gdrAe+n+FWDCZ"d73O&/!Sn/0.[9[> *,%SA'G#Dns-Q(RBA`aO7ehjH)t4Y>ad%^:]&QOX[M[-RS5W3846J>)6])AO>s&GdJcAMo[mJfU<>YkGoI%14nSCT:feac2KYL[-! Hm.Y7Q=:(&B9'$OVjb=d*e?S5Zdm'Ar!qIRLG soR`^Z:ee +eAt.Y.FKE>i*h$BGo)I4L)L;A(P!-]ofO(X1\m,kn4&;6HQO>TAD--L"8+>W$6m'Yod<GBS 4$1ens['o\'?+TM1IQpBQP/k*mtYi%&NL""F7t rD]T(-,tU=e&hqTU8r$:1KQ4m<DqNX3""9'$,J-g+2>9bg_4 (a%XKa;;A)@t<"XXDf7j(g6/+W`o$%[@-c;ZRqq,9bkhLBa&X@(>sY^DS2fB`Aq]pm'8iB!RsL`MeG$<n%bbFG;ZT_,C=fKoAn0`.Z\6>atMf'^$hk"i[r`s^LS>(WP-?,oPj_BMIs_1gA$qsn?'oA1Z4Jf0BFg8n.[sf@=gm]33!6m8_IK6',"ZM[M>)ejl/M5S`7NlJr(^!dDiF[.n U%6S]nseSC8=_2>bcCgcoW!sAbgRfL!kgj%81")9\EUQiobP%Cd`[S%t/WV[A`T+_WFrsRlPS]gK0G?)-l)i*_X"4J,[Vl8mQb= 9Y(a^=%lViQp(feU]A`:-MU`^2tA#_mKUj<U[ln(,ai!8it:%F'1nCeHsVA5gTbn-@*%Q>SE*h,pKKl^n\bg5LBF6AiUAt tY,8kJ; B:*CQk]!fn4<&*&JQh^]rJ`k'j?PUQbR*dHa<FYHk *jLESb/Y3>YWN`IqI5d'*L%2HmA?=HK>g73Ih#/Xke3&c^@O\rHG&A1&F!VPW^2'H.[9WD[;=#!Wk267l\ggd@B3(CnV"@!^C*Cm!@8dg6pIAiaVY`omHsisq:(]e_iW)(,.3 '_Y;bm&qQXO[_c2L*>=U)U@@V6QWjlsCA_(^TWdoAjC-6.^kp/8PVmW2pMq]*<[rB(-bHH34$OB\;86lq!,hgU*m&"",6e6`dNO<;S',0jLdrW/-I\P1IJV##$>:TEl=B"$WHA0]k*cVFE+#j,%ZID]KSV=&2S=<XD\h!tWF-.maP>L(qS-;P%%"9'*IqA5:E(-H-^IJo4-*9<L[D_@F$SRC/WcRbs#2`+"a/.2'(ld0]^PMBfHHkYf42TWB'd`9LA5\>o]ogIIqd^G[U"P(ngM;AYL)a\+on0"<DIEF&RKY b4#dbB\N;Ss_eI0cNWF;S6$_N9ZM8`?4IN)9-[M A?H=9RqL"]"V30(Fn(d#- ATpboT=s  B\q&s6GkUoA 8C7.Q.`dP)!oV%I$cesctqRsMFS42&rj]@MZlMsC&$&6P]>$+A,iYLYIb]Ok8_fBRloV?h^>i?A;VM,5F5!O;mI)Dp(7"H4E+C8$M,K=ZHM3iaT?t;s*\4tZ*f-!dB\9UqF>siV<VlSi619AtA_cRe0I_E)Xa5 3aP7#M+6\jp5\D_I?$-Eh*5gIaAKH[X#_-rA%PiIQC-XBWRC:Ct^<fA$RdGAD#VN(&6l)"otBPmOtE1e=dY!,K`.FS7%oC.!:4V9ggb,W0X]]%dtHA?:i[0LKsX)?_lmCg$Z9b2V8ljqB%g/A"X6BBAFDA16s(VLC2f[:mAAZQ4kcBX%1^h>D8lFn_eNf,5In:nD#h.;fF#M\P=*J,DKKM\,pq1^`=90Q!e=,@LU-4PFS(IJm/J$0mrDUEDlDZ/<>SUO"MSWAb 3ER:/YH*ab4%E-KC g>8dCgZT2$7/5Q?8<p*T'SHb"$frE4\^<KG*A*+pSj"",Qt<M,Xdb]Y8n4!@T<:Do[P/8dIQeA@+"sD-$63g);7?\p`ihr:<\/ nf_A`RaC!]B]-!h7i1>)&MqjOj34>%m-^a8GG<j)p+YmhIaGFg[5=.Cn+To:r:$Ido$jIrj;9CIBUB<#s8oO<tM:ek@(Y@$,D1(g,^LIWC;F5]r=6Hhe"L&l]ohA9Kp)QLtaXKB-X0b6:4:L[bPb#;B.O($=%0ci#agASXh4p 0U6l.L:t<_AA72q%Y"1e6J.WqFIgq5!MDQ$C\0U`HX9hg L^6D,BGRt7 .%fX d!N:ZQ0j,Fc9 9!HBmEY`#9%bU5>*P_fF?dA5rhNmWU`@XTe,O;S)EF%,0Aj@T#8(1A%1:!?bI?U_mGggnQe\\1 1cTr:Wbbg\b 4RGo#7Tm]\@0EaI]:G>4:2CR+IQ<r=/Y- \^QC<A;@k3Bt=A A@QD,^$B$7rt9gKP4$QlsnRSU(%aNGmBsG`R>dHmOsFBgo2;1g6@jt1pC'GAaa>/HM16?i(+/9=h'-[]OL4Q0&eTGc8GP:A'>O:1-$#.AdI0Bh9aALq^#XbS:^>me<*kQ*H&g\+oAO*#U;L=M;?#!"b[^K*?iL+1seV,e4]M?p3A(=L3ee/?D#p#)2ffq\lXn;"b N]1#GMBaGDTEk:[<n-_qNHo6M4EH`3*\\h"_a_3NZBf4U?SW<;YeTUM?;ho_2;(,]`5F^f7dF>r(4ZF$j50M\K5W6NIMrpMI;E5X@I:nZH/sA8 3)#,l,1J`/8f/gJKP[h%Z5\"ir_WiH; Gi.-dF4UA+#(<t`cg<W$J5s=kj8&$lA4pS\G]FF@'NfC&YU$99[\lG.F)1&s<!XIrO&k$r]KA[*A=WGf V2r#pDWYD]!C=DUW/dG<J6a/a=FHG89&FeVM;*_\i].@;bd\Z03MrH(9SU9/QZ*Jtm)Q^6bbL)6 H4C<tOF+\s"gYp*[n-OAkRA%cVYK-;"9o\)I>`EgB$!\CiV2nfHi8bde#CNHNg20q>KZh[O)2T2+'cUU'#L>iBnDJG]`*"i>:Ua<\bJcA,b&*i3Ct02M))Q$$3LAH90I7TA-$1SWpR3JmHQ<)ki([726n)#$LJV,] k[ZnXs")+P1EUfF--*kAWRgp(e%h^Asmq_E<I.W.q'B17B\ND/M"HDt-[;51:[%/"gh)I/^b#Otc*V5tSE+b AcfWW9`Q=B8V3J.Gn3(_3E!3@^p/`V^3GX`lW\2>WK[CS,AN5U 'bgNLlUYn]A/FkZ]F9KU6OtVFV$>M*1!/q2pKO]8#:Q4'9BA@6#N/UWi@^,Bsq!R32;%Za_A-;;q+5&R4?8endVAd#"[O-?b$`E**Fk "Pl@d?3>T!taRl5"Q=GW"IE2?;81%s_b-n7jH!"VH[-Y8`^?[#8kBfN)oKgJ8?k8lA#0V]i'[dc'tcNEbaV9XT_85*(<5S_W!m^l;h`Z$`U;\rVKW:V8RA-ctGH7O9roE6;F0D*$C-(n&Do^tXcL3=$%@K3&An&/?/iFaHn6&7'X0!G0o5ng*#fHsl*_BQT`N$fgYGUC.Le]JG@NV!fAS0"WEGApT!R/^)#D]m`H'O*kPd?&kkAPrR\(3?X2lX1D'"*Q+,A!:</BMNMZRMT=QZMAgA,(WW:kr,tpU2o5%YXolD6!SO7?#9`;:PjdXfDA_ZWVB2312&rXY)k/GEQ)I]--qt A811(&AQSTQK[_ K_%s8AL^n7td8HYL'!hq^Uena/5L6M"FQUrU17qncbE0F2%jTj,e?*;RUE)R)S;,r4HBpG -OfI?2fBE(U&$_P[U^o[AQSp+BKN8P$2Q`B5YA\rda ^8tTNUr(h)4fYhg'Dm-U6%m[7^Xlta#gD#J%.mJhc#`qaG2!^r_0`cUJ>&N_W,l!H_'PZIDVD:X%1j40E[>c#3l,j,.JdXlY&NsLrL!<ZqXLS^TeAb\QLkQ-J7Z$&4lr]sd(kVX7J1#5:X0cJ9="( lF4%1J@eg2IU\dcY7QOE5$B41?BR-onR6[U'P(Rkn;k%L)"'B^YF+Kp*p`%T1Gmb?NTl*e'-sa3c5qb&6=pM#X ]* ^3WM7JP4KgG6Sc`*/TCn5[":$96sITMJC`US>Xff8R;6#%K2R(p%O$rB")9tr3-FJd7EYgTS A(\!dgX4Vq_'_<0FJ%8V_0U>)jDSpRJU0K79LT2(C2*?NnnAIF9a\&AI['B$EkH,Tt8<rKj?Nk[:Um6XeGmVQ')F/jBAR=!?)MA%@rbNhA$T)j(L(A##"8=Ap-J$k%korCPtLHoak4'Yosa%Z2Ss_TK&eiQlp;G/LAa*SFGY2#VJQ%'LM G9p-b@!I^A/dDob-/".)9LAarqp)<s DB3%dA8jD-VN0PT"]Ab6];DA]AIt/m0ec_3R"\CoNl>:JRd7bJ1BW6 imLcO27=Y)o%j4C/6;%[gAZ$k#`cWCil0[:sbU)Z9`f46@>X/\>MO)3kLnmtL^/4Q=^9a@N/n^D4sZi)R\]0\4Q9YE4VG,@d:V<V6NUq3^-ss4'(]n@;a8.k__Z7QP9\N#k:22"l+#Fs=9G8]!r:JGZ\pOZ_MFA7O=#_EZ7aNLn_Y/(;"E5j0!*F](5 Zc5edh3[Y7<=>?\ jrTm)Ra).W^YAq]-=@8(kKhc7O_^CUdZ$3ZlC-ZAP0"s=^\Q7':=+X^r%*," OZQ M, l%oH2X7Z-1;/p,f%i!'Qg90ll9cE.]6)=J-71ClLUS*D?Phg.t,`^g6c`q#O/NtWP1dHAWYFIJL/;ZbrMd6Nl=UDrs:\9D?#YHFK\ BUhIJD#A`faf6"1X3g:bkS?4a 5dY`"A6Z]+jeU1QMhcQ6]@,e"3)oU<<sV1`r/;M]$)/HUT3fp<N52-Q.l_1Dat5o8m5N8TVaip8'BAmA[g[V'+=jU2iop!WX+#+<!X]X" HXGoc'PIo5p](%7d:jM8\oCJQD>Ql59VhPq IO$3[=&)j] 6e!fY+9$Z;C)%p+csgdnAl;Ote]k !n9/`Wc`<PfNs)&S-cG9O0*^?N*jOt$+HB!3E@c^rN$eD8nj7A\mNG4'9'70<(_".;qVTM?:L6OB$* dl"tKpleT4\"H5h'1L(M6(A]Vt76dgr=S-;2*D+$`!kN.B@Hg;A^1-M. .OCb5tAS:S`_.Ijdt/T5Hqr>,9QV4F&Yq ?[pE_lG*nmem<mTPP'.2a2Q+6.BY0dmi-U$%.Jrt37@07jAjF/'6$!#taa;Z_f0[)2l"8L8Z(-+5*2s!PB9Z4K+dUcX@PA olt(IN]T3[r*q2O( gV,8 \5 pQAUq`ln#KObIiH$)Cm=\<BS0([.VmB3K0$1Hkq sS_\pl/t.GDGG%Mjo(P5/(d5'XOrgl2N8Y9s[.bb?oXoB@Whl^A%BT9YW1#'SoQA22s7_G$"QR7B+!$f`kK*4=B"9)*P'm"EL>BaQ-[+WAEL@:j@A$niZoQp*\fOtI0/peWG0>OL?Z`B?s@i_?J9<$23sGI OA.(ZQ6hotT-KjOi4ai.T*27jrJ!jg$3WWE87=8\kBD`%53Pibf[jBAnKP)\XD*6Md-nrH Lk'o-Bmk6SO-l=d(Wj"Amp!VC ,iOF8IgAb7_0*XW3o`/eXiVN3q2k@p=+sSiPVs7$1hR4b@MA-'Oo_9\hcJV7AA#it^L&S$GV(m1FWq38)d\CY-Bq)\ZaHDdA%"5-IJ\=S(-)^BHEJm>Nt'fdfi-`6%+o(qa2o;j$$AN\K=I$'FEap4D6p8,#L-8%-oUFbgAWiG[nD.g5p<%0FM:ef*AtRh5jJE=N2'fciPam'd#\@*R>?2Kr3Yn'RA)($i%$,/eo_p!8^#<[%1Q\7p!i5eR9dr9jkE#NfPej$kW,&Ar3N;4ab-a-7a,qt<ZH?g6f?jCUc12&^W=*3Ca6$Dm_^kWTr!mgHS:1t!5qs=#0Y[B!DGC]GV+."]o.-g:M(oJ>rcArL^+7<6@aO_ph;C`j$TeL^UhfA-BjjQ<S$"QY$j'\qK+F1lKi&?lA^3K_?JG2K7^-:9*;Z*]c_ecK?_Bs'@]fb6e_Bp<NIPY5X'2Fg]H8^!`P#e3g>D>Xk]n</0/0W7I/tDd))873i5b_lF8eeMgWec[/'ok2AM:CM^qgtP<+V^G+NWFhreGV*\l213MJGKWA# 7s1t?&VVGiC=1:gW`D jE5-=18,V+==9hJ<!'%N>EKF.?:ASU8\$A!@r+XLUlr?[a,@dfC_fN810! D!KYBtmVs5+#U"I0q_F^A2nfF0RE>t&P,`0V7._ajj^<>Q?tl^!#$O:tRlmHd!BejK]aqUCaJm0[Vjh/*iQ>D,-;S]XY$Pn2@3lh1(:AnX,,`4?'Z0 J)_` -Wr5?\r2PV_bA:h2HL'/?A1($g0?SG!"=:N+,8"hqI9VYfN4Y3?bS<;[p\3/qK:D80G$EUb]tgDU['[O)7Ht$c.7WT,dep*%&Ta_R,XT=n4G4M!/A.04d/>Y8'Z=Cm=,A'a!=bhRADYk9?sqics9TMQL_1?7:O6E:/^de,TXZ3A?enI)&@ir5U*9;HYV/nn$F"oJ#@a'%*,m6sHScVQFj/Hff/Nr ZP<'V6FaDYH4<8&i7ZTs/+FDH>2F,BP&BAM`9`$NTRCi7j*,a\S!>O?4rCV4]N1_$p>]E2eg aJ&sA^,T;n38Rj>U9,`mY@_kIsY[fbd%XmmD@p<rAa$GQ#X8&hXR@WWr7kT_j:fhUR5%S;-F7O`L*%PX;^j,gkb]f+0.TjNfbZ^M=jLf >7A6^<ZkgVr%$jTA'lpL&H@-L*Ar*^N7P*K!&W<l]jWT<bLfC6nL'*PATq^A)3Gric?9b:BW<D5DBXdb:)Y-E]i_g-<dkK'%EeB,qN+oI/e#6\\dma)#dGH7\>01L1XcG`&t+ISC\L^,7GYI>)'Z +t2.DPsGO!hae9L0Bt*U6D4g.edlmm;g(;=:h5:@d3k?@^!mnT%cjRpQPE8r8BA-70K6PS,#8mFp!^_lDl@kYfL#XtMEG]fe;d%%5:(>Q dF`cZ/\eA)^l.`3dPAi6(4VAf7jWV#6jfVD3.-7,$aT\ICYnB#et <i-HZtij'H?j\#p*A[skA5[3o)o,rG0B2&G<IpnV/fPfeleD qPU5HKckNI96ZNfK[0"bhi$_Vs;[Hl(s/U*L7"$"lKF+@ n#1"%5A]_5g^$T@Ni8M:=!doLsS>H#1SLfA?KNg6lPl`.0bdR<h.,!/AL[E6FNYF*:^t>p?Y!rl(JUm_?X)l>$/_!5]r.]f?M/B+HiL7Mf8P;A@iV:]'NX=K8]_^$, <4< qPK[gag.V7WHF@&ZKq01-"8g3E?]VK2ZLpmLgehF%SfQ"YM]W?FRJ4.V/G:jGpe\bjt^io$r<30]HJ"2n7H8Q\"o.=BsfZ\=+o)@B>S5.f]]Q'K4^bP3Tj0(q&)eP><p/6If-[O7ltLCP@!R[t'IC2BG2r;4p%%^37c#rk'BDQ:_:FXSEC_68"\A-J\2WNb$:Si?b\3jM5#OH`JS/Wj_1(#XAHOR0kgB/L.?Wtf.R_\EAT)k4o.C&Ks!C@]N!@JhaGrphTns7Vi<MP*gkR<f+"N6hj6b;0](MWcrD2)oK-0r"Bb7_hZO>6LjYG/,CVt3*DgB`+_q'9lK'-Tb+s)T27FJ[YR$Ot^!(W$beC?m[DgRN548N&LZ:/?sXs!F4]gFV*DnRMB`g8_X+a7qa^QAVgj.q0!rb/]OCp01ToFb%]qAHj,YTY2:PrfsO@?ajC$P(KShC T<Tc(k@sA-'Gj'><[^'gO?!f#B]].HBS`3X#p"':.l^7m^,J1843Mah@-qe*)t0ZG2l3FI*f]EU8CldM29$WUD<2m-&'B;kGU\$gsib7(8d2@&-r b=Q1Uo^rGFmaO5?iK+A,A7">b#Z6GpJk-"*^VNlaY> qTlfc5K:mCT4t!Jk&1Y<*CU$'d5qL">s#F4@%W&+,[e&G(s3%Bdil_&+d=m#oR/j%1CBn,`;\:Xr-` &9a#/A`5`^e%V-iR4X+N;Tl?fAKpYV%kWL2P)'<tB?j2QS-4Hol\:8@&N[^?aG>eB=4P0)#T+B,Ll@Gr'XZ!^g7iF liLoAG#MX*kLKX:'A#.EP-b$ZDpdgQhP5A61n:&B r_=$g-4AR(IsV*9CbiWH7Gs+mA+o^sQ?4(p"B @?:B!'*kpj@'^:73Y[[P.WoRKeOm%066gH'6B558DqDXp6tOQSsD"c?6;"h,msOpSBYjLq(QAJZD;=Dgg@Nbl/ d_$0:fM==X2\=m"j][d/Z7K1[3aGS$p 0.6ED74l;g"3;7*C'c*7U8NpCZTr$^9FZ7AWf.EA<X==C-+F<:$oMa=k `=-7fS!_qfOJciIZE18f_e64JQU/lCF9Q`"1^qX=6iE)Pk@s=OPT) X&<$!i7Z%]alK'D Dl#VCl,h_c"I:^IPNUWi$dGM8@sSr@&%B'XcqA_@1a6eMZ,)ABFo< $hp]9!R_@1daHGd@)*KXpgHq2b6FV7>O1&aYi/!mVUmC1Ya`` ].M=JYJAX#q9F'k-#:.Y<eI&e%J9LXi58=L/ UN9#;R11i.+9bD;U`(>An!foVE7lb9Ab<I#G8c6g#WoAc8?)fUh:Q^W2OK4A9b=P-sP"ttt]\djRc.AA'+Q6p5JE`<&L!#CJ,#PH'4!8qk5/i83976'Ys3Ak'&brS,HQ@pF1=c9r8K"sHs`a%]<`-g"t!Xmf7#]bpN5,C#k .\',-q$1R_9<m1fE%U\^4T7VUc,hSY8U?F2n>BTX$LlFRR&Qj"%UdbQ(2CIQ"6W$NF4P\-e,<OsZa<.P#_aH&SXJd^G+rpNtIl-4#AFM1&m,Ja$GNfoh=e>CRGR;G72S1&rFhlAOQH#85[.Q0<_FNk+F\iUYK'Kg+,cYkkO!Ad6S\<9`n`*AL:MH*-NMo %1AQIYZB"lPft9$<r@48lXt^mGQXA^Oi#@T5aRE7W;)ms"Hibk!m7mD<OZZl%2r_='"8E0:'FZD!!1F;^^co"cGA1%38M/=l&l+Q\5iOq!iJ4D7H']K+8jmBR5.ni)I,0n7PVP#0<Qfe"r>q.4K7$fiQfW@05*2M0Q,gaBYj4]Al#1EPUWAs)pd3/fM&;3C*le=YZF<IRDn9(#)T-_q K^Nr8\2=[)T6rs;,fYqHbD\Q=VLUOh.Ncq$O*2R6$ZtWXjZ 5%pPF]ER%++K[kb_AB9ei&hG!DG0\ 6CbJ80;Q`DP=I\o2/^;XI#HI#*?B=dCn#[-E4jDQ\Si5K46X4VM)$p#F?O\#'7IZcdYIg)o?fR^XGbNFPb*q[]GM)QS4b@cA*IDad[`0ZaJ.[#[@Lr+.K<hRA\JhY8"8qKR`LtOcFilqAee3A+a=j<FIE2RamDL\s5_%%X5&gK#3<#=`>'sQ!pS7Tk1+hGA2b`A'cs*&&!#V2;*;Jd\7a@S/R$8mgLpcdf](QGWYAAF;O":QIe()+[::3&Xms,6G#7s04PXnp8T-Dor6bAZQ.i2VIC!BCm&,D5j!LJ?/+M*i_8t+s6:!pb^;qM#f:)k4&!rfLUQ/-GcDq9%?$qtNFe#4:"*"IQPBZc]61@(fT!7/.T7 <t'tA<N`"UB!SdX7Wq<WO?$B6A>-Vn $2>%%,jjIb,M$`L-82!q1&?%W0qpDCFWg@U18f*%::H%sRL#n/t.r<d,SNsU&15Z/p:sWb@,kQfA[0q\q\#>!gg/?VeM"DHH6AI;+%a)h(ilYcP4<e#s<n:m`PUYmL6WJ/)ZE<IUel;ZHV]IjFFJA8=O@i\oB'T,,A?O!$Qe3plrcfEADaC%d "8fLCXlag_-F3o f^0[EhTAFd*o?bA=NiNL.m/4f49T-4XLLN\FLr&PA&:4q&'gCB<?rCI1=Z;\aTg^mk)mMD#JeY013GIlU.*DW,pM0?1o11*\N,oDGU8aD=fYhqpM&C6B1cF1;qYFHpbdgsrqX7S#n>B(lrq?$s3Rp<SE31\4UV(pb\D@eAq#p>ED3%\:o!((N)]]FrJrRO)"A)2i)t&LQh-l]WbUX=iP>l[Tf6MOoM7"f(:hfim0*@?Jio3=If%"H96(<8;7(M;/Zr!t?,=K"[aD:+6ak;,?&:;;me?f'7PGmT(B\`YmFqTaK7 :]4\Ap7$R:^?Xh0qGB$qQnjGaNR1]"Y9T4Z% T@pte3Q/??@4*p\^73Kr=s*FGJ5e!#/m33Arrl1]/1g6)jSIG:ah@9/aRR]To-!toL!hb0%I"1mJqi[H7@fT8+N:)"gCDr2\<AW`3q!)#6klf']q)P WODh:JMo!-AMX &s9ZA%:raO$)=,Sa[sBp*\`U.^^?J5nj3Z5Z*O>\Z)r([G.%e.QJ(DTm(TR(F\V^<RqBNZB>WqMk,^c$P=0^/noT/An<j9J=DG#WgDH2cN'UN<sfZ3jg1r<"i*jgtDe%_(e+*'[8R5_A^D]iL9P(/"b5c`?#ND"GGWMWY+]a $DfC'4l:dG;kXR)!>cjC,c4>r5OrQOU#M6,e2U0YcW;*1$P"ZM/1Cbl0?dQ^\A6@3:Z+-^nph]2 2tc#>6&Z@6[*B7]fGA[cU&)Sk0UgUe.<b%VLB76X=qo_:sH;f2fYgaX"8A$fVatHt$B0r70s^&UQUJ4O,<A@\sh*C7'_m%Fq[s8*DO=aE;_F=AP,bD!^e)*!C'$` P3%(C<A3-A<_d'B^(b1(H#Gl@+rgstJANe:DJD*)54&)H6AD=JY)VtJkl_6"AjQfEaH%HIaPh9DaMaD^r>FFA,'Xd`6epPmXfSsT )-iT>MAi:\a5+gl/.pP@DE%'St*+r;51hk6,/4TCUejW %7e$D4pQKGFW"r=h=a8^s.hp!?.6lI_:h]fD:i5=@m"3>^XHa\R9KU?;Vs@_%3-3>$H4L">8ljV"O:4Wje3#D<b2:2(,`RX*qj_Bm_%sOoN/dqZ?,,N3`Y2NOt2.dW8U\?OLiMXHe R/m3QtI%f)qNU@U;sfUb*<D1X6.IGq:C%h*O@PNNqnlQ_ XFfMYS4n7lH"Y2jTK(f 2+)aZVWsa9^ndABU:#Xm"oP"Z*k#/qoK `*iAd>HQBKJXl?p*)lf[K"UWl@BeAHi/q=[hF-*`<J!G[#.<;B/[o1bZI5(*:#K2r :)t(Q742hbh/6gdA1SF".>:JtCM)I7XD:,3%tZD(LV`d_>e@@>"LTS]ShI:!WRml\NG#cMosHE@t<1Z]"@JENC&=M7gW5t=D7Z'.KtN#r?fF/a[r-B20%J-,A&5A6N#<a_WmjZ(Qa@:Zf9dCb]4l.dJ[dFGEH% A+G+o4m`0#.p34?\AhZ9RJq,=(QB@gE%`sc<g%!Oq\Xt]a:^S[pIlN;[[k3BXS;!!r#GdZ[c?6*E1VSI)7WfGAs1rJs[bg(C\V++,_>K?;9k5Yg!:b=)Q,4jXh@>h$@g$GX"sH8]Wk$\qB"0*&3q,dE/a9g>*d^V8Rbrs&J[??YH"c9<@QAkj..T/nnaaPdQ ]s.-#&m/+,U0FA/lA[$O7;<NKN:M/%RqH_r[8bR>lW].`r;](S<c^\R OBUS@[U!R[-DTVL:nW`dP\2rc&@4=S3qfhVX56[:Dea(B3MYn!]t#Ai6C6J4iiL"IZecaQhHT%7MskGWcjWLZC$g\JrV^E#CBmk0\sWpb[C;%mgSJiL>62JFttJS_!I')O4fZ\4AlMm:7d1'f@2ZE-O*Xjfho)O)/:*.aQeF<tS[C4plfh3K(TI r(9sN?DMX:0#<e+N533P0XJA^BN6^l7)A)4(aDb')Sj*,2MG:*;H!aO6pQCIn.RI$F"66IL%F23<nY an[m#O2eoa#b9em P>AA,hiAFjV0mLH@tK5 '\F%m;g?AM]]&p%A7S&^ab7^5^%P^t?GKiYJ`"aBmc<g&`Wb_pJD;"C:cU6;5tg^U@dLFT2XN[Lt^o%265('rR76+;\L9WTleaf_co52*<QQANo9hXbjc8>X7Z\Q_KN\oYJ%F[G'>Xre>-"9R\O\_J*Yh&0([Aq0OZ%c&RYCWJW[,.KEo;8/oUC2PRM6-\sAqF?TXll'2^$TlX2OdR*W7PX0$CmK:J\MD#C*O:^b!*+rm6/ 6HUi^hB8%AmmS=cPT>9e;EMLUrFY6rAL`Ni]UQetHfeE-9bo`Z>!]C;$k\mtSE!ndgq@2pL@ i`m`fk#OGRcTeUgCB.'b^>sm s:Ib(j`%Q#. 5T\IldJfhF^kdIAZ,*,;8dnT7Abkag,]"-nL$nb]JCWR9L$n85 [f3'd(WTmHGr>$XeM4p&fd+1bTqE_jfn% kMPkXk- (Q7V,m*VYdCEZQrda(r1>$A><L<^(5U%OA8A&C#j+E9GWdmR0$*Z:JNpI/&hD\3cD>;@a\/)=joY+?"$%AQs?J_0p4Q<Ihck5UXW>:([Wdq#;/>MKeH4kI@m30otpY[5jAI*4g'\f0qiJ5f6V-OmQ`B^LL@AIK$or:nOHa.N`;KEh*cQSNYD.8h9H%HAn#`=[Wk(?H)?[>6=LYJIbm"QDW-4c+ES6WKS^P.?Y^>1\O6Y44l5WdA;<HnHA;SSYI&hSE%N#+/AjRQ3f)?-/i5^1[b^h+biU!9/C\Cr'[B0E>.=;(ZB";qm%i`^;mS?$6Crjg;;qR(Af\1ID\QE;cT^Bl:> `_ij_8>cK[Ut,)DhKX$NVGXl0RG`GKH+I/H'Mn#r?Y"67Pm+'3M7&FL\DFWN#qGIhK$N1>X\23%N#@ c4i$s,^>tNSrY&Oq@#N?*R%D$.!:QU^6B>pA^.Xd[i/Je+e+AKb>-=H(8jMiD2BWi%(72F>()`Te^1;b@]nKGrX1c!PjC"@q0f<RrSA9AA=o5X#[sK5pK#mXjdTWRk_>^\Ra;9r-CVIT2.XLQ\GK`/oQQZ<'WnA?@>P#*3INmY$R0N3-pAO"lRsDlts!8i0NAmC?lS"A>rdil+R?%WAg?/jT@L@@? ,3)P.J(*Ps6FHQ^lM<)J`B-K%Cr=fPLptLA'c?r.\Ji36@Tnmfb*"@gh_j*mN2EhM[]55neP1'#J[4fb!!j6eA!/jA]C]7]>mii%m=o/Y>,q^LSS9GUn$l@jn?<ZFhMIDV=5_AdV`A*UKJHeWp4#9#RDbQ^d3(L];J^OV @SgJ%\H[VJ^82cNj77pAf"sZUVbqPNF@#h7pfCJ3sQpr>`t98@ns`IbS:>mI^smC%q<Y%r,rY?\63$GK2(EAs^Td@6jg&#;T?K/0mtOl&DbiYiaI<GsE^r^8_3g9mts4L$NpQ_T,?60#'Wg98Xh8qg>m&)ms:S1c?Laq4U:YlM[>bH.t7!14F d?pH[HPBtA%9E%7m]AK4-?q'r+eL b\ZZW%j@#BpRI#UhR:FNFBY,0 MX)UD.@V&b<N-.T:nGYlKV*=46<lTV.]7pJ8ZnT[)OE9Qcn_<mGAUZ?SSW/DoL0oIr`%mXHCFAP3a9hT31a\)_WKS!d>cVAB!`\Eq %Q74!Eg7O%CDk'-fK9/49\5_ bP*M6qTENeA[A5e[MDmk7Ncj,l]a&\T>WVPZ,>WJiK'_pS(i@HR":0-T0nI\pY_5f7\9]I(!&otDqY-MRSX+th4gU)pG0KRJ)V!VGpTDiU!:9MilXlsO9K?O.#Vt("0Rcq%UaAoM'l;E!a[Lko%C3qUa2/fgC%Ako9!#8Cs!X"!fOk*(.s12S9tn`,SFBF9#n%UVYKTo=+FR#@C"r`%rH1U*o.?Ee^YH `WRM-kSJg-80T8+$ofi&gJ.0SQbUcAm=pY63?SmfL.ZL:/rA RH`-&OtN:T$-4Iq@O.r$++jHcbd^kKr33*?KYbt,'e9\GBAJ2;sq-+]r$@St)O ,H_EFUaBX1MbN*f/1W\hjrDLK"R#ia?P(-an& <7g/@11^/Jf9%]6Pj[1)6o$i.Fc#q"5+gm-3HPQ9gbLp7FOlY[j12JE)[>Zl!Eo!F#.nSdm!s(;$VL2<XMU5-Y/.O&B3S;WN%*V4IE4*efM.m8jK-VMe?Tb!O!IK9RF4"KS6+C(gAZ0_%+7;?R`mHAYg4>#W`P:.^L$0"c)/'G,H"H=08o_%^Q7J`4>Z17cX q9dWhZL:IFn5H@U,.-1;q%U)%cqFXQf^=[h5p*3C6_>OVcsm<$t#-(/g4o9 >U/T>>Le@?7D(P`R`Z)?+N`S+**I#1X5AA3IJEUa";l$>_G^4 G`\5ID8L$31/]a0V[qg+grWO*^sPAga>D6[hG,jX^W^:"!D'2WRQH+XA-AS+jOhS2nR@3b3ghn[,MH@sZQHis+d%A)=7$>IRVg+.r9.TT*ZSkmfMTg!':t7QUgk`b=(HD8ON?gf nOjj2^SPXoCV=HF_[P]cjPs,bH`r4]:2)7M[L*st%2Hlf&eq@ih+Fsop I(EV&oU.%";\0`@(;)V>2Oq3WQoh$F3:E6ai-spi^T$kjc+`rD!cWK($?kGN%?T(*]h?eO+T_E=)8B5ioL6W7<9)-r1cgW.XVf2Y3R[>+0]KiC8Bh>,C!$HAmQ1_8>2UTo._a[[qchmRnN2A^`BcgckQW]r(._m\g_gGt0C`e$GJ.r-^B"?<rB_+FM.HL8VOsmWiqcrmEXAI*>75&4q*)2CE^$F!f.;M.]>LN7P]_(^:2DLhTWH,:)A7c2sJTbDF*e%K*OL]<9"O:tWeV%]V=h)>r3!]l"pb3;\<e'dAJmXbD1-antl'2=j`NAfTbog'a/BkBY()=A?.%2ZCOmCdA;JI)3Dt2FDq_49aZ,`8qV4P>&!Zk*iUn;"A&'Wnl!=ghSC608;A@;mj8!GgAUm&W./c$F+AkA1J-P*ma>Qoqbn\;2<1Hbp;/,V^(G'c%J!8WF76%+/='$FK)!Dt>Eg""2/4m"ok<#`,Y5pdE'$&>-K#1FnB@76/(D#(pp))6aN#*;)ro'7Eea6qGsg"6qln-An'9;TgLrhd;:d9#["e@<#'AY6PeE6DnRL+jL)A7jA.qGMNN)s,_\.6EjDZ?.W*.#S,9Nq-_Y+57LNA;qZrB+4"aOr4lAbLcEA:Md5qQ.QrTlnsYJ\TH';5^F\EZD`<$8sKonrr$J&2DZ@I%Ad:FF0I)g`13t,rgm.Y8nA[.T!+k;kjd.#Ps<2JCCb$ksm0732;Bp*XjA5F"2G-e3<tMA`]ZY\$tIICnNeT@&_;X6S(f<YMfAdV=lAgLL@r6sm.HgRa-V/UgPaE):Z3GD>$rrAbc0sG0H<SkYpZH\8eeZ#+iN.$Y.^.H$G0a+lHll)qArK\69ESJf7P["Ys4ht(J]^]_)W,n-9tJ5r2W/g#"Z@:5T.\3L,.A!spDZ,O.OfrIIY2!ATKIpd3e*0ZAG`D;c&9I+G$"A/DX%X J-f+kNd('j4ND#E<;8EG1LE>bS$N)>L-Up?fbkQSjG9mr$f.,nZCYJ.o[S&YER?3 AV4&1-6#LKt7+C") \\@`B^Y:#i5gT0`(\E*Y!(.H%c9h1HkNC(>sV3XPgi$%.@YApoiOIm/=!4@,]9Y?p-5fN-f4@i.AIYK^QVgA0gI`W&*rI?X=Y?^/98'gIYnqVg[CkH>c-XL-/h]JSU_W<?Gt/G41DN&VAtQ\3E!TWn`6@hORe7-[+pf-SWU/DP.@IJR' T&2YH'_O3&[,Q.eV.U_a>l^#C5I9GII;?YteE`t4M?f6'!T k]rUol25g1I^2k0/q6UXhV16b15[cqs]P=n];5plS6BZpITMN,$I4bm0<St<;.n:3eI:_"),D&C"F?r2L?>aF'b:Tpmc^JL0/eg=Il\&:T 0JndFH$n%CUH6dCAa/+$SSGl?t/2YqPIooLf^_&/UtoL[WN/\,qn`TD\/k/-b$(!+2 TI[O@*1@h;scTFUIXHrN(+)<IV9B(SC)'%AjQi2`r9=iC)DbW`j1AVl.?j(*ATbM6O%n^PGi3>;jLZ9&oe'lo7eWcX8/ '6SQ3W4@bGC/N01^_Xh8b#Xi3rt^X1Co5OS]JrGQC!&V[bR\?++e@B:Kj:<\"#tDm*&?8bIY%;V1K,#E??rQEmr29fO!a1,hgIU)]U_6/X`6n)5rok"AXA`6,g;'p30]t$l!@.8M'XYZgAV4H%N\a1eG$Sg W!Q`WD!ga(r/tXFa3WA9mm5LMT"Xid&T.q.Q+VOrj#.(@.!hEYB*Ncq-et0bK+D1W_T2_Z-'r;d+df+7dNfC!To#[$RL3J[]\:3Ns+>?4J:Z4-if`L86=TD=M4ot8!7Q"N\+%\@`<N1#.,agY&r88.I>a5[0`DjbRiT/G2IkOpKZ`?)T8]a[ELKqFogA2P1ZGJd`(hA&A*':c8HM#3eE.sRcb#F<T)ZgU*A7g@IYe?/V2lZpKp<[qHYs>&,r[B3$Q7lZ&IW%T[h;>R;qV04CHjhKr8/O8IAn;h>M:5(((d1J5?qAS']+."p[kK<n)a6A:t-08^X:Tej<R*CA6\N;DW4QmS<i*&6Jffi 4neJp#c"CI"VKBDB?CfmEU+_RR7o70f-=A5q4g:1kAa%NC0]L1JgH[kHB>mZp1E+%;Lg%lnDsAMt;)cqgRs.^1_W\YG>A:QH<hPrN%>+1@H;\otrctFi96"EQF_m6E@W<Pd[jgHPjHsO$6*:]d S3c\:Y*T0Utr]qM`Kc'coV0Mfg-PE8m\b3$q.G-H]:>)S(oUA`pKfM@Y*o]Q3K5iCR.gec%4XOTf8e)<J+7=\<2Uo#IR!m::-% @%$;A:eqS)t( @RDj=GbAYn]%$H*;Ubhkm8eh\1fbKl4('*`05i1m"3%KGUS_G*0 _k,kTJ6X=?A\Y4Wdl]iTonCL:t)L.ptEB?Ab+:F?dRY[H=37M.JrRl"YAjj_T$dLtbc0Tkk4hRXTaAlFt/HcQ7HINZn;Qo?\P92q`si7W]WQtlC^`EN :akD;RZHn1-+WTX,h'0Z_s+<#2+_.4["s"tac_C-WhgA*gA4RGnO,2L&E8$Mf[mRiOj^!_Ec#cq_\TEH(>QsBk^J+A0rPl2=Q%1L+4j+TdUJ6)B!0IcqMfR&o`]Gsi@AgLV,-Kr-5l,*I9(==KW0CW-?Q"-WrnqM6"[%]NcPfP]nOkckft\Rj.?A;FbTC^MfgBhD- #;&rM$( ^b#)Y@;HJ#@2g@0pBj6PiOlX%A6A7R2hZPZk(8HfP+b1GB38&F=FO+%L+j +d:J#l\rl`nAbDEtdT/8@#s>l>f,t!.$L-XjP&Nf6Tqc h4eI<CH/$)j(#%LZ`?0/6Y]mbAse?pDiAaAOg'+F)@Aoj1=GZbqM]tqO4/`j JO?4e%:HFArISTng6,INUlXa&jXh.lKHQJsp?1lK('m$FZjdYm8&Xd!3GWs1AFVY='rsX=r\G\1W$ZR(;$%5`Z&RAAO?dot'im;3!I0A77D0tL*MK_ Z1bqE7=\QVb&sJH((5d.K V`A[h9d7-hhAqQnO&Z1]pGI8i' >sWcAEd:'E;:#6Js6sVS]eB9s)q`L,-1iNKq$F0[XXIE [#[mlZ5Rc06dfB>A:% <pf-"?N^  +H];[+Ko-IFhftgkI3GMHpanqiK.i> nb[n<FA[#Bj3!='TXWV6B'cJ,^0 i;QrWqq2)>;8kKDBN[HnaPpI;=+F*5n`Ttsr0<ca>^_&@ YOA:$[^t[#A=_QQ;VfHT=4Ir8\&" f#?QW>OiSEhrk @shHirk>9/berZoY>3bH<ci.:QqmDE' #14O<?Q7j+_i(jf=sW5t_Ulo^'WV[(tOIed,/9Tm%W+1bbG5hnCea!&n=sW)"jL<[2/t"UmO'+Ns$5\[s )6F=%k4R<;)@j%p<bRQW/,Mh2Z&L6G#c6a#U\(s6SU"b'EV<i0JPsF\OJPGRhBKI\,K<!EH(AOX\o@)'TE-6&.#"ZRc*fR^Jfk^h!nRXRoD&U)3JC=[F:=gD=$ch`KAQN_fMrg8m"K_ sU_0;JcM-j%13;+X(#P-.SjErga6>*PtUN>nTp`Xs7[2hAJO[H,bbROhY:RFqsVN+WtV^Ai*Z_8P_D_j?f!4[X0GJk<6O&A5'/SCO;:P+1DDH;n2O1O AADT8l,d3-=Ar*V>*IFR6.jc8CN+]s*bZMR)d,iH#P33Bq1ABaQWr<>R*;-@jT><98-^5CnO>p8]-G))41BD$7M,-hCAc'.Gs_DRC!gk>HSIHHER,+0afJ#(%9jJ`@QF%L1V;GfpAM:`;<1@Of(UC$P@(2TSLo-g)M05)F&*b7Y9o5A(OP5R%WL8rJd9Wbc0L.enUZ1F&[;ac7"!K,,NW*5]>>,X[>%@Ei37q,ro5LRs$#eLU1j&YVO\@A%b^MK._QBm;4Z2.3X$U6RZ*eOA7$bPkMd37aQEma`YiOa*QU;G6KR3X*q1G0K-Q=m1Ok#lE/gI5$A+-of5>a4\^%)$)=4-stY;Cr/]T3AHqY*,W\_K5-U/'.I6@C_A$M%G<8FD X0-s%l.>p5c7/)89RL>$-P>gtP]!YoWh?LMM!Eqtg?a,o0mMrYkjrf8?\\N)A]k#5&@8-%)RlN-3-!R;P70oq_KlO;CAAP<3JOr^sV:24rt=(q= $oQ(1+l%AK55\P9?t;BKMV>5dFHb,iG#<>#X_DMQoEZ+H;WXd;G=0+ Ze&@M\<Lt% 2`GB(+6TW?AB1Lj=2cTH9.9o#WG1I:e0$d`j"pWt'<b([?M.U]JEsT+!'o&oQdp;8d+3*%Zs)^RR5SdUkRNQ2T(:06e%T;"AA\$K)\:;^[<dmZJYoRnle';iMk&U!Q1mpC V/m&/,^!,<![=ABYWs3BERVYi4`SVXKpR7ZN,.jVg+BLX+ UrDb._$O,AB7nOiT6GAJ>A5C=B:r=1ULSDD]tHq$b2sYXAc#':I5 D"YR0=LWXee[s"f4O &-l4@8t^qL7l4UWlQ:5Opq%*_'b]j4Ik^_kH+K5(@Of_fhWAs`_D^2o]]=':KsmcPn99VoGCtCKP\73;K?l>O3gF-Ns`_O^l*e3h(FY=Ddt#M?Rhg^@`o>-OSSO&(G0Xa :Zk')</Ml\RD+r"M/Y(%Zt<k75?n@h0g>4ATV0/&##cWpG(AWj ?*kd5sqG/9.H]e%GKb*bi'Yg>K2bEcb6;fR'j=Pg1rnSL<!.=h.H2QJl\M_qD,J6g^6>fT)a?@3pMXLprO>^Uo4`ba_"i#tt'CAP*NP'Xt6tan_SpOq/Hs/N3V/PR9Y.>h(#njXVjDNmDSLEJtA$\V@C[>fcWN>3(Y9/(\,]Vf; 2p(b@$72V;  )\$s*HdfHp@j>DLD%N@gf?:_8[RU/O"Q[1jALA"[='/OAA)-i=@'636!el8[YdW/V(-d3>T;S2Oc\PR3QZ+7YGG6#d$ mS`#CnW9iANsQEoRP3ZU2q(@/AGF$8MWB:bObcRSOR'/;6iOD@iWh_M=/Q;fQ@^ P*ipN=7GDGqTC],@t(@%+1_LjGq"%HbLc9/IJ@Zl8sBP`Mc8C0\m5:s4Mj"gZ&Y3LhKI7+`m#(@r-R59\/I\$0$X!e p7W>""D(/k8OIt#3s;/0*7bYoik[F]4"6@,2'" WP@c5?l' J?J[TFE*d-oOSOTVGq:a%giE%Flg.bE626m-2i1h06BnFJULW#!20?:C(7D^9E0%hrZE&H^Ve4#j-W6<op\kV>8M\.!SdPG<,UA;M)Q -$[7gT;j,N\08e"\,;`6X4<^2AAbe0HZ`Jl]\8tN1P]-HI7pV:Q6DD7$k(pQZY7#OP-G %3co4:Bj.A`A5_iJrljh"J><-ZK^Gm.b3[QcPripbVm/EU0Fj:Dq$D`/*hJ\*8F8)'&O>YHQn6MSkmMdT^gd-NqPt m=$6iY6M7;l!l4CB>Rq2N=O;YSUi&bA8>)R9A&KJJ,A\sBCm< "'#!!I[*Vm;a4Nh2^f10r!.p:LjdF77_rg@V*XU<B`qfY/</_OlD?L:!,(N4@GP6B:S)(('TF@@aL3?t-kinE2cdC0W33'oI!%tBr?D?TK&NQ9@9^Z@UAjeOjrbgQMdA, :A\O4l@J_+'GU$lj2YZq`C1S O[0b6NdLiDVAtD9W5_]>%!t+`#/elU\2\lk.@Uf>0tR8$?qQJ_:kWKJpe['.iiP'G/,Z-L8pFOqo`KHFDsWdbG0n5ai<bI,GGX/bPX\IrM4Og4tjh7Qk6/$aM6VKpt`cbAZdfoij'kj*0\t%Q-[lhgD[,e#rltb #2KY#n#c[_oJ r8 O#/nM2=)E;JI>C?><Ha\cmVls(RBdo[KpER8%FWZ)gJ(4GKPZ7;6h\daAa?j$AA>`bVT_&"A+H\hW2"p!4E6CrV$*#N9.LL:5t<T%Y"W6JjQ+=F>M.KT#i;\bJdqR]B94)i0;I9)/;_5_SR"G4T/Uq?_3WmY*Yq%6K^dt:d59p`jeK A7Z^U=OOjHR2FeG0OT>S=mT<Q32rgqA>-A#'Yc7s s,@_p"Cb#T>.jI/9:8;cS5=jsSXE5H?(.VlG3CVaNG(*CBam!?S5B<bkE`;?\?H)LTb]Ro'1PT13Uq"HKEs)/(/6cM&O8X'p%XA;8;5bgFT9H);UYP+BiAUAJ*kXb]F?;!o% )@Omc<fjd5@]3N[mfCN*f_$%]j(q/W 1f4E`:3m$KgSiiVR"!/1kOscbi2:0B<ioZfAbaQP#B3'2T&FW"M8JqT)<6$ZR61N_WH4NP:\f@G5U;r'Vt<?o_`0dRW`kr6($A%gg6=E<<3XZDMNOF(=:3j/*LR>-Oen)\\p>"\/!i)ka$m0i,8S6no/o-glcW&PAcA![7-A3DVSAA9js5H_.%CH=XT83\Yq^+er tWe,P:FIFHiE=1K&;&5+@jj&N!]%i\Fq0sD;9cR ZO0P$4:?VT.;hFi_??dcI"pDeF4U.(BS>`oqn]VA:TksLA\OW18p^FJhqX,/F&MDPFm1GfAd_sUVC2P DUC rMl@APAG9J8+r*(JjR;!7Ddh:nN#'BH7"Apd,iKd(&LW^Q*A'W5$=/H)Mn\2a'h]OXO["oUSI"$;c]Yhds<X/,-ZW+=<UH['eZ,*f)KAm*3AD`mJOB%*4IFq9``(b:J37#SEqm:\&')ZRcg]G+\Q0t[P)#Y>U82dWk'L.ic o"Wk+<:X"N)e_DdrW1_OL0#ap []DiN\JjPe"lRaZM8r4+ag'7=*4tNbea=?;2HK)6%?tD982!sdEd*VDh,gqGpD=$j;q\mq]c=r@ .\Zo+Gj"FWL(@/4[T%TQ@=!b*<C[! .2M'*b]!Z[bp!3G#418!]SI5VMq %qEo+bTgA.tjgkC!ga3S1CYc$WNoh#fCa/VUs9RIs/TVlaAXn,P<qlq;8f;9"0!5nd78]WEHGfVa8V0apq(WA9fbl-Cqr,C6/,OC%cN&Y#]<qWD$WKh)X(.r$i5lE%Iq`t8i"`d# L+oP[RPJM:-Bh!!lrcL_]Y3VIAePj20E/_*cZWA<Ce:[8%*>! )TK$!Ih8)i&iP1HbDk!G@s5A;RAKR+JPgi\Y1jK1l&5<m\WWZqK'DY7p'PrN)egc`=`dp^*"Dtb8+/&6["^]`Zep`oAb<YBeI1$NkB=^-G#cZW;o>^50S?LMPUI]rfD&^6>LAf5[7MmdRNp?/!:!`3@ @m=/9C3*c'&=Jlb8^?o:`YhO-5GrBbqW0S_*:D6G'AcGjrds#LI!^7&RT9A>"[=bOJN`4oW7f^p`S^J[<(iA^7.$q0Ad9s=6-AcK4r?h`EXp9(`g59GJjHSm%LIU+)O` +05ZHsoZN=h`'"FOc:T;@_7CPD-TWFg/7<KsY9S-+0;2q93d?_%=5C_d*8BrBe'qDiKJ.59j,jj)$3Va/H#-r$@$)s7it_K4\GFLjRA#%@W% '^Fi5[T&Zmn:D6EEp?FQc,*R\]F,\GM*amdRBO:"lHtU $B_r+QK#KSlk:N3oq^`-Fj(PEf%BK+Ti4]RTFNgM>UUAQl^&,_)IAt(7^3g?X"1']/&b>FMD5p%bgqDHd'+a`\*88*$BeHJt K jm.: VL)cF_=R<IK^\8Pa-)mPR,n3bNE["S8>^<&hV'B <.p0\F'E;Ze?3+C/g@2-^LPA%-Wt%Mn?%PoB[r:=U48X49Tl7jDQ-WC!LC:%#5l[3b]>e0TDkC/Kqs->A2$7HgDU\Ob=*= a(%j89@-bhMa9GMka%/')S`S0@2kgA9MS!rtX65<E/8o7pAa4gPW$H+s@[7'a>JBEmS?<I#)aLQWioZ:]dB'$4lA-I-"'g/mN=OU_ NG5D[r,q;ri)e\Nei^0iX AGJVq0q25V\j<m ("o#.?sdk$XasYX7bMTdF&hS;ebA;#J)(8Q*sKDI;)]k7m>h7WqqkJToGc@S^qK7S0Mi.<iab,b!kr9Aj2:rLd'+:76mR)r9:PrPA\8]>@^WI'Lf1NZ(%hA^))K*%mlBs7VE9M tNi@9SNk4F1ah3?<`WWX!^:b("<!Z/7L,\;7dM6]a:MXdKI#Y <`04kFZIC:<Q1>rKN1`qT&[))Pa%h#?\O?s<IT_`'M'^tB9+grJ3'/<]d&Z]O.n1A WUBlaE-n-2TDK1h+)ig8g[K <]]R4(#9fM)C<]\HCZ1YdaJjX?+K2#mGha3>2#YYOVBE#jcI7CHR__T9Qd?/Gm5[tRf8tW8U'id fS@_5?".$I9)1Ve?9J/?hL:rHa>f<>qAn`Ug?i^R=2IOYq> Q-WJa_S&]S*=VhPS`-%pWptKBhe.?IMc/G3B/<o9kj1AIKN3(#l'?B4k'/+nD2Xei=ZbJ\lpm!0>#m9lI?p8e>FFpt]bT6hP,(b6o6=''.p&rYZ]niT?L(JV.1*I#qm$qGU+@1a@cL<! gk/<BlR\1Ucf79P'EKVCCQPq$s@EoaAjR7XR??!aSH!VN>dC+IF"Af(l1J6PA(O?[RbhZIp0IFooE-%j...DOF&]^l 7J;6pC2CJ,dd[#W1cA'0Xl!nH:a24**!%kF6\S[cp!PhCgTLWWPsr9lV.r_:HUD"PWti<@6R_W)LCkM<kp0cnA> `fcTl.;1+rhbXX5^=<Q_<k("Tk_4JC^hE<NgA[$drM__'$t#\@Va4LlN5A.Gq!7Wj?(T9rBF*lm\n3-+md] gr]4X?.i1()\ei=5B4>(Ke@^b')g,WcdX]c'j)@KIc\Ng'-G1@[:J%MJ0E<*&"f3MT*$fVTQ4=:5l#pAIm+ns-,:R(0q,.ibo*21jq`5Mn5"UXX_n/J<cg6c7j82V\$H1!mlYnj7M0AA[8X#)=UG+;Qm5G=`5g4*;j6q#q/hiOATs`oLIBa.p7][34AJe_r/q,N(CUMO4Seg%:(9s:FqBnHLm;Sp^HNf%fAs6/DL@GS,=F2<lY+b+4r."IEdri4J3-e$?ZCl-W c`+sdl8-'."@;dIr>l0H53.1=>IWA?Ac3_th=HtF?p-i^VFsC]c)4E!>+AQN@@\?IH*tN:NBt$^Ab=?jj;lm;Cl[>DV.MJjpeXd19lD4k28tCO)^Zm7kZ'bOjG-Jnd"A">-m;hHB@g)p8ZW.j4k^0/k7DRD=Yn0=-PD#]%5kU@eK.\H;H59XF[(hm]qV"R@"iOA1M"]MQR5, 'c2=Ph.2sbD+HmOZAp?_c]5mOg]OM;1:$_QkaS$csUnd/)e2iN4>IN9UAJ(J 0Md^]/dB3N8"^q.j'bfCjOSs-!rCKYUj#;_o$54D,Lq3H0MB?ZCI*C6.%P>ng>EW#9h("sMP8(B4l;!oe4)'&&,Vbd;P-%VH8&q:=/e_(4#4/:nNho\*6BbMqG6)56F>gpEgZK!nEWS,T?r>,ApAKD9E lr/1E_Tk5o#XUdGYft`5194^WV[Q@;jA)Za1NOQ-a"5;LZ'YB!6RfA#sZVIf(\6B\irp/KYVa[N*8m8O(MK9+JD=m;iF4VF@`7?mhKA>bdE50AVVJ6Xt_#pgZ7,U"B#=]*?mrF3MAYRfA"- N7ZDanqj(hK3T``#mfi,0 0tGA(LbPL0k0DWJ8XS82,b+*YF;#;_&6-&Q]tF<JI"C!>MafcfC/\r_QIdc kd0.o#^clX.E=3<l:74As,mM4A>'B]78%oRLUbCd8Cdit^GnMsE#pR:=qV0% skgZGnOK:scEd30<*f'dem[/<I$O_``-EZ=92ToSFWH7*J3o5<!J(q/)8q/1=YVRBh\An'7t].l%shN$tJ0p>$!B!jS<c)"!AJMYn^tf!jB^]W[,6hH$/cBilFHDo96OhLRD-bi6;;G#6B[N`KWIts7?3Jg6 SZ.ih<2IrIS93OABMd(.A\d"8KoWECE8_KZ>RZah// SC29rRbrk"!A.i%[!Zlg:&O6%iWnM9 8^e)K$` .G-."W,QN2rnGm(V'LEF*IUT1pmH=iC$@jp!D@&C!nDr>5FOj^N!5%Q<(jF@5<1N7^s<H]@&h*E(La )=cS6P@O[%_n`j>cFZ"ZpkQ=UKL`QqZH&"6Cp'(Q,H8ooqZc.\WEl1 a<GHjI%;4#!Of%KUc\T2o ,))^fP#[OhPE$(3N.hBU(Op0'?$1*B7"I)R^IA\8_G<*j6h>UR`><5 Y^l,ld/f28MJXjV&$I&T>$9g c[K"d\mc7OV#Z*Y$M?`Q(QY\,_\/@r5H \McYaaR#CV=JK(A*.LI_aRAArY?AjK+R= Pn%t'3dX&ZJ-YA9p(q')@2F:GVe9V+7j.]pdMrT QF1V:$*`@.?atU6Yi;0+;oNA_'Va0A38-sf6#YZn_J3WIfF Fsh(TE64S5-=Z/L)#<TWA<e/9Z<dh/<r"o[B;"I[EFh4/96tL*D1K9Cn TpCk_?lE@Xe;bdnlE,Um A")0$AF ^8W0O$oX&?BLh78NfoP%`!aa,kE#S%DW>J7S.Hg_oYN_e--jU2oaK=peMh`Q[$D)dI5q3tA7VDD+,?iG=f*bs])J/8;qOam0mcO$FXp4jQqN!iMM'KIE:l+He=U[8o/9/sLtd[c3oJ%:Z5hVVZ"dO1$;idosN<MhfhQhk>Dg,(A'F!NfeY^O!akMYg*SM??f73PL6.lfHY`r_I]-`V;.\A9j8h0*Y73ZUQkqo!Io%i*+hO.G@,/d_%'FO9dP@^D"l&Qe*-[$!^=X)tKAH$<sLrIm3lmOC&m&+M'm^.*tK3Q4r@[Irnm$tT\=\(r"r3Q40F=R(Wh2<`7YVrjP<mXJ\bgr>:TAK4KQ"MFTAk818%GnKgdSG]J4p\8K2ZN".1om'PN+i5)lI1lhK8ik#m3jVY'W(+!%_n*W@.KUQ+Tq0-JE -7G#gd:ARilZh9/>]0,_:kTKm!F_oHrV8Y<*]D4'tSieR&AOkf_?J'/o:qPdReU)LmpB j?;In4k?[tAOi+DZ_7&<2GMK76BM`qMUc#1#^Xi%(H1J^OkS;a/^ANh8e.=-"J.f0\P$,h"s/,sW"Oser6QcaCK[D<^:pbTZB_$q;0FT//eqJj-%KFO37,%`6kg6B%`]M?"8cte0V\*W#&WL9:5a+,-(h8`;;D*I@:ZoS%9(oJoYfGoSgUJN,N15"(I/G?G$G.t#nspAmaV4KoAIEQbi-\4R?t>Aq6kVE>3X"XFVNn;<:;]r%'p_N[pjjeJ]^W :(&NdMqg"(!'F*PoO^!\f2q!S]\Thf+s;9AI_3-SA@k>p/[Ah'WtN7=#f-+k(+p5]e\^jQB6-rDWMAb,+K0d=WmR.><:Xsg(fR\tMOkh!5'L5@BsTsGMd4A+[ZSZIp%/$aPAAc*A>I"h?d:^? G)snhb2eNC[(J8=I-e.nb2lLLQ#TVNLli@WnK`PXVI]M[ki^r0Se4?W_&9M%)\]r/0lcJD[Rl4rI2qG(_]^1,`_RJ#YA&Y3JI,eHKKG@p2##LQX.89hjBo!t_,MC;;I!TWAp>!sjf.^,+sZ=Hr%N2o+cSk.9lg1d!]4#MX6(]ReFA=!%"gqCc-MW<TF<*\7"VG)<.WJYbD8\hcpDkbG%6cETS0<),EKf.W9/g!Xat@lDs3)o'#mc^H!S5VIC lL0Pb2W2(&krB9NOK$jLSe=gA%@qO#$7V*V[L`AFMPLN4;Mt54:A/%sN;]snb_1#^rVWOAA$3V7SGF?.<o,! b1W+tjd'=q^g.4*Y<f&ekgZ$T<tE.H.)3QpoLD\$aBatR%P-?("5^]>,8#C;c=th*btBtJap]K\^+lV?\>[;Ap8<&RsH8cH`#VS^J]?gIJ5lo'9Z,GC@f,AT(>^dS2StQ+mNQm0e`6\_:)?/Wr?,_^'ea6XtD2fY3$hTp?eTdaG<+,!fsOW1-^B^4:8d++a'ra]JQkMO9V#V2@U_YWLW0L)OSU is?CXFt'5KA:p3ZY/Og!=ImOhfIR4@Ebg3AAXGh-Xkr>!!?CbmA;M$=]<'C[W.i=B+s4"RG.)<OA?+3C3# G-oKV=^gXD(V#U\UVDWJ3jh:[C4t^3P0l1'kL6WaFX\IGFn2"qG(NJ8E_p9Vbc+j6BGLK8g2I"HI03YMa.k2J7_Wm;1)4chr![$5D)jMAQh(Q56<7CD.ad$,lMkXDH$lC2kgP9YM>DcBm(Q7PB#I2Mia<ZLo^hb8^^>Ys?\VeitrGO4@U"ZWW+>5V8/C#.hP(`P.9,&S,8[6IO?BUO.oU[*OAmm("Z=4_) RX>'AjKETf+?Jt`28%7n%dnU3bRc0=>PnWVZ0_j+/a5&kI$G_=Nk"nAPVWhe0PV*tIZD&*-^]gpV0E1P4A]i,DH8j*@-@=W9'B2DTA\Q23k$K./AVc':YSZ?%dQ9[d!i58q!6$qP*J-.i5JAj1[1J_`lK\6pIhQ`;pA0'"XDL0#0oV;N[mZ)e$qTtNRZb4AGH$S(J9=ACXY)jG%^ggr@%IsTQkk:ThT`@^qJ8[]qPj8$.lmTp,H<71TW@ASeAY:q.[0mZ5N,sA;VkN=6?j$E`[SPR 3n*:ND1c1-L1KU'-WPH6QA+/E_bph'83j;eG**giUl6ASo[]7m2[^T8G6jUUd@prbCrAOVlgIU[9_ch<mjMA@0r&$6Y8f.fm,LJ=Ki^A4G&^'r[*b'[&/F<>Ge;^X@ T.a:+&tptA0%5-p#@#$>r5T27#jKEjFHtS15pplmSM\mO`"FsN)d2jm9KKme *OTa="`gABf'/O=2@,e19XR&<8p:1L&U,(@DBYcCGOIeD<i"RT)8*M"R#%"[?"PE$4!a@630<Ib N[oP%rQ6_Aa%:/B=;5^-+e(s9]9Bj/&hXX+XK n.ai1k%^Yc((gD:6qTA8O5(j H0"'[$c'XIX]_FGdDI$b8M7&f!X'fi>7ajB<-fbp'\V>q7n>o8fBkERO\?Q4ie#pFb eSs)QTR[pd1`-YS=Jk/9Y**^`p#YPQENmk6UU-"]E'ZP?^,L5SlthTnc2_lKd_>imUpXBVDmRbd5r2-O?G8;6=,.$CRM<KfVIZZdPeG>UdZ9RVs'oPB *U9T*&Hkf^]Rn#VHLZqlh/JJ&Y`At `hPB&TR"an*#.J0E26`+CW=[s;^?%gebsV+[^`EUZ'aFMS!NjmB<oR8]8Agi">c cAG=VO+]+M[1rgR-?5lOMiXm^@0X1"7j_=A& Je2mMh_>M'mat?$)l[S'YjOsH]l ek+Ls5l8 cP,=`MW@m UoEYnV^MD/A>4mN=4?S/Jf3*>f%V)B@(;A$Bf^IkcLAq-T(f]i`7i6Xf:%=0U>X[+'(ac:l7o#[Pp;;`&D+CLq:U"/k<";m/K[E/L"23TMi(bf2L6A0'D3*B1 U.-]Z3FHJ/#@9'V5<U<[#q)ss\;/h'q0PYFh=?L=]W:\b%I,bdps1\%17;.eWfMm0DD=;H3hXOet'6/P<(/-O]8\WRC\?-kklr;7XPBg"FNAGZt5M6&`mUHgLI^^/mG7_5I7e?<baYm_"q$/NqMU5"qMM-UGFaHDc?H8Od>A;Tah)q8)4]JBp+%7>nmhS?e79@Iq/C*J_5r@9_ZdsVc5Y/k`BkgialL8::XP*%V[cg^&Aoje_/c[ms_4<MKHE8:4\X_.TrALSj=1>\N3`Xt^(KCneo>^ogjDh,=^/RP'-=q(!%[B54UpT,ic%K+RcrXP9sKD(0/m%>ZA:s==@cAdm`aT]JIC63Q0209[%MM;9:d.`tnKqFhg,3(YO4]4 =@&[?%6VWTA+K=A%`S/[jRA/rCA3#BBhAdn D8p'l/W+tiilb;=^'\GRM(8b"I.[?*VF,+%1"_#J8mL7EYJ#1T%Y3G?K^57Xmk(TQ/&Ajl6^mhsR0CB12Q@$(tBiE0YB)Uk;A@'tJYCab76t1Q9Ta*0f!AdV!5!3%;K!<GqsPHlAZYHP6^1``V\\C<Wp/s=C8MAO?ER@:Kn%5A.Tn[B^6nZq1WTLl2?c`lMHF5_iiZ;gaRT1!WA>b<,N&mO%TdK 4bOOfn;m`I_/<?>:nHcOq]Uc:3ljjJQop8strF,cY'Z?ME[`ps[+adMHJ2A:?)ffMG@OHf6*,W(kDNM:P&#"f=)e3h%4^]f=0T]1+bapD=A60M&l'b^4HU\la-I-j<`A_"/V#<Ab7Jps#[s8A(:9Q8#hDAO8rVTZ]cT9&^QjY,4T !:N\#"apZcAn?&T+f$"AX^X0(t*WAQlIc[QTbT%k#Ei)Yo\0a%90(J8fDQ)qnrr)jgJR?L)')0l?/$KoY8#N7r]ekrqIHHj?Z0HSK8NV/@pGo]b=LF?N?CA+5:0VJe;'?=hc+,0k;H,A1E:O_K+2Y.H"?N-`XfWfRW:7+,*W2GpjC]n*oD9i$Dh"[7OY<6n*9qUf0q@pG+jDa3Zm2o(\9H&fpVa/9H.3;fm]JGeYQtG8Zl5[[KkZY6f:2QQA3H=.`;_66IWsh,AVKml<CGB7/&QkF3dR;;@k\5L_3B&;QG8h'O_&].=h=#n9HJJdCLA`BJ+9T96 aAgN 3DNN#7[XX"E<ejGt:.'rA&].'-[=HPG?D[WMPrPcQNbm3a_.=]c[0LB?JT4$j?.`s-(9i/3WA@rmAi<1dF!$*@glo>h6r)_Ti8b)KG.K>6ECdLQL;d`<t1#K%+kgDC_'le24[qs8T;"a?e2c&]XQtJI16 8ZN-pGA)AcR'M0Z0qG]CGN*eMcCZUMRf'Jd,r4rA?f%=K4?DPK)%O%nMNn02>VeFI`8=)`Q<EC!$^.`I3/#gd^l77:8Z9bN2p,m,TVF3P]%,,ND,>h0ejFQjJckrf,><6bBmIUJ+rZ MAs)AJ'*[qSIC_e=K*1P0G.nW_A]c%)H*HR.-.g-oO7C N+Ie%aZ]_fDW]:S.9TZcXq3*ih*a)U6k)iqoXs\"*dp#U&eM%A6mVd2[bp&`r$'^UEi&jSsWU,OjYX4r9$AG*r^Db71Ul1M9m8g6+s) CaaTI`9#b)U1AOY3ALhtA#q\_+]PE2TiW'6aZD\HdkD)/U<D&l9i'Hinl[%5/S'3^IPe\$(c1]PnfGtqWV"3oqjQX i+9A+<qe;gAC\$ONsLdLr,G4ikN#RG+#$A,G!hV@t-8Gk"*d1Y>p.d++A%I^#fGKAW]>CtGt@A.Qj%UKZZHNnXer;a(&?Bq,]W*ot]P9e%iZ@]lXcdPU(h#TA7HGT.E8R(h;EB9JT15I]q)XtH'h[0IjLYE=/RFFlcVS)&GK"[Ag`+GcdI?tTD,_XG..XfWAl)gT)7dOFFs3jC#ctjk"2lpNM!Ui0$5H@ja;XZXDT\FMh-m ^D%AIc;N5A<sr$+1Q&[GsOUV77rQ"t:$ZXPqDc3l;7H6nn0sEUbd!Tp.rP8'B4:Hsc=%^.bmYfs;jgILi_=0L/n(@;dPU<_U5'b>.j%o%"a>#'h:kjq-Al`V\]Ih H>:F1hd,tSoPac]sc\'j:4\Zfo<-%foa.F%XN@]ZYm+SIFi3f2)prdB/c$U1)["WitMkFS5N?=b.\XXf4D@[A\\jpg8iNkRCch?pB@R3$:G!X'O%-B!HRanglMWA#5eaF2;a 2TCs_<CDPW_>>cEXqFAo7-<l$ ?4krlJ/q]K\Y)j_$AdU"3'P`AblRql9t06f8(bRC&*:[#E5c6cnNWAQgS7aSnn1X7rNM<c-tV+Hpd`MN$R!AL": =LdPZ[D5mg=%sD"TdL)m;AaQ7P_-9A2s[.'TnY'6"C6aFL!a"G][8>&Z0U1iOHNSG BShrT+_I.=P0:S]oA[]6&,W.`4_+#j5G7Qk+67og`SA8X5RB1)K9WG\go0C_ao<m$^:m[JAA4RiVWn_V`c/am _]F#-;K^ HV\QBI4UD()0+e>N-2sI<b8G)2V38bA`?:\Js*gE)E#3p@_Q!\'Ghk(?!ek\S=JFB+U_P0?Pnn<qZMQY+M+q"[+Ga^<<d25 Z&SUg4PQ3F1?<KG@kBY=sQ:EBda_oNV<OF7D03FJ` /Y)D3m1d[7@JPj)LXn8YknJmPGqOEQo.AAPCI;d9*<3eIiO98tl_[]m8LaAhC';5`ta2<H8Y+.*%GDJDA,6[+g\@dc+3c[D:B:C;jr-0fS6mJns=D%G'Gt@Lo!DRPV*!e=Go/,+dP>8bQ*>P_NM#%ie,fA31lW[sYn)Z-r#>4lA[C@Spt9bG0YQt_;1$V%O\-Srhr6H?jM\GX"KX!CpVK,)Z)Sa/YB6&^i4TpU\FNZUgPQ#`l5jnK]-.A*l<](I1T)M mQ[TM\;AdEF-n'*f[o&=$SD`oD#*7Kg#R?0'N=FomX:N"m$rM <qFI+S_jg4bY7pAf=UP6`2%?l6Q5/U2dH4_]^a OI@JZES6Mp<1Mr6\[(/J\b0MSRHDh$6pHA![3r^#M**XHJo?X]%E`Y3%6E(BZbmUht6*di2T$DSSEg^8fI`@D#/?:"IMNW]`Smf$2&R/P<qoX]sHKQgCQsMpQsKF4'cgTh\@DZ;SkDGjm_ocfF_4qcWn54/aH\)F+6EQsOOif4sa 4Q9'LTK_e%]HlDr#tL=\akd0A(p*YLhENc#DNA(:t84FYn'<,Z0/NqP4E7#XD$T;>H*_2X1o,RRolm1@Pe05A)X[3Asq<ig!'gbhLPd`1t_,eA^O9R] jl+a=jlE$g+k<"$lm]FUTi`RPZ?3BUU8A.F0lelo#drgnH2oJ -dd6*>n$r0RHNDAfC`JoA!e*.E]NQWh.O8CAM_Yk%s0aaP22;=30;9p`,liM"tl\@(q9K\<%V!ifKFoh0>QWs)RDAA=Ra\o]GE"3"T[2iC$PL_ M, TFR<0G%iR[j=4EJLZ:iHnSsAGCQF;-gd"'f^PI3s!WP[gbp\p2K</_:$sbN!V=r2ks&;J&Fe$:OSY`Pal2-K71mpkr.)+>g],fl(NBF,k;l[bE'27W%cKWL(J,8RH^PLqi5d[&W%Y5iU.IH5\6PfP"-2htG-L#W8b^"J%'XB6nV:Yk7'bE`sp[^i"l@kZ@\*T<pgMMK OEP@0;5<_>0jnA4(]_pf=R^SYRG+EFQe[V&i<c:NK1.6ACM)NUd]&GR`<#1()LtS_+1@kOK:GD3-9+77di^Y'>W3OMp!VRghd)Je+^<VM5=f^)qNs3.KDRBCM#6gJL?:(e1@M\U.&R2AJtbdNO&9 [\-T"p$<H)bdbVUPIIogC<lJ 8?'<oI6oigI'jY=PLB)`CVY /K2rJ&Y`CGd]`jT<qF!%''lW;<DYTq,Rl0Nf!jX]GK4r5S-9?b]TZQ?/Vh45G%)CE4Z"b*@ktUl[tUL%4;`;)KI/G(h/:GW"GjG.2Nsr79_+j_(#f)l7kaO+sO]j^jM>+PLQ$Bco QOiA;o;N'HA`eAV+654S[o4[>]AjcN/g'VJHS9Ltn7b7*k<h[<o<>F5\\`rkP@ChHLh"]W8!;V8C)&^%qLE?Mq88IHbJ-eA$;+qOaB@4kCGY;O=d8\.oRU%s[]O?\D\0JO98 L`imB\!#rHo0]i+02k9"4#)5:% A&9Q:,;sDZk0[o(GVlI*T-W[-G#J+-@r!,AF3NG6OhUcK-hXd(##J85GUGZp0Zi"9_Pj4^e])>UC3I8T+Hol=BE ([Y24p[<]CUcnR-,G=hO3WH`WDo]5;`=EGi3^d7aj#G.r2kEP6S];>ab[<d%0<kE7lJNQF+pboAUf[;< l/&<gFY(WgTsAh<:[8(i]^@\,B\?($"QQbQAXA-A._A4<^-d#ge4A#G#@?R%k'JS:V!Z[D=0j>3jh2X1\'!2D'sl/*+oZ8;&\"VXrI);dL<t+HS0(6 6!^=Zm+] :)3VZOi="O$Ynbc0]LR]d) kS=rgq4,mmKa-0\0l1aS!f?rm)4L_Bp4FQ^\)q;)F)jq3)XJ>A%d<WQ#&E4E@0"<Pf;*4BJ.^kfn>\(c?1; Qp]/?g?:.b8%J!N`31"B>Or&U"dHHk-+.`=27f%IT+#r:$0SfoeZ%p7[4RG!3GTUmO/JVKYm=5aC$WQ!X^@"9li>0s2qj)2G,AZ6%F&N#lHlaA (B5r5U5*TX\LJGd1!hOV`]YpD)_>[i*T,(*_oi*"b6a-am,)Y"nK:-T#Eq"-l>R9TSU310G8h?gm$[7DgXt4B2i7fB+IEWi`Hi<^lRseL7U>f@*+N_hAsH)J>f?[B$(-,E*58/44`l%/9]6c,Kj)TfCDaNAROGXnlUaN=WIt_!Y9b5@@?@57K-68'\0[`cnBG.YRD@coGCTG*&ChORm*2%a"<'0s7TKC!hp_`.A2Vi,d*)Zd Iq6QK^KSO9[n`[soc&tHMWD`s=K]1h ?BbJ?pH.n>A%CIWKNsboRXU"/p%Ma"iVi`mA V<a$IW"fdA-np8qbLV0P=UN(i]i2$cr&%&Ddb@`i:+jL.sb>'R)b^e^Ge!iqH`dqTs@a6\7EA(lWKVHW^,,2o5@moD^Q4\GN9YPjq7K4e`AadDQL-R]3/b89]MJ9jf7*2#m>=*RW&Dg1`A)dAjA`^&]^,<VUqairOTOY9@q%CA>OV.iYWHliSEStGtBgt71qQY86*KcG453U//XH5:'^R0<[=9h(IsMdA)$%L(R'mAW<m&07;O1 N6a8EqE'ZCXkqF9@DoE^UMGOkP);plN"H[aqVD0cg+J S00;(DcDDnA1VZ9;484q$C5%<ddiR+TK>5BRHkMP\5dm.:"J?0]+<SJ>b+=Eta#\XFb9VpAZU6;?1S^VLWq$\!V=./:2\+lnHEC=.oMSj/E72a7[kE1ZM$ h;m1=lfT#32?VZoCDnK?EAA*MJ<fMq-j9?f6i]dP-YM$I=jjPD,L4@GU0P &JV7^5sb=Dqjch%B<^/]`n!W9s,*1ctH.o>%MGB_l\<&/P:3%Me(rQ3N&'@,b@>KGk`4D^h)Fl$DmI>L:J>^1"MYHK.o^!_]!qj(Z[=!'3S._MeE1GdPrI#i^#jGq4"h\DZ,?i?jdXMcI\MtsT1:(PG8#?DM?Zse)Gi1OC1W:d`69"akP7!>:PcQc64aGhpcnC/A+d/nn@fQsG3D@9+i"GP?G9[f.3W!RACAqVrdn!k/VWJSfoh;f5C!H:a,M@n\7_]oT *>nDGh*r@N?2;9"6cV0-.e4](:FDEas<%>X fle&l=!C,q;6dNIU`;?C#[p9*d+'L<On \ A@egA;=o)["A"0,O0$C46I,i^LPTlqM_k-FYq/R*WBTBAt96f8:+if2VjTP#dh BimMR0sgI^+ik^71Y;h,d:EUQL?Er^O"\<(gOoi!]%`3+T^FtsK4U(\B7kps $_k_6g,VrYU%V?A@Q/&h]64f[L,l,!)YJ(;45?d/i#Xf`2Z`1-OoXjlU:2?e:L<E/*$r++T)cX=Mf*r`rl3c@i-NA6CC'HjgHaIPaqLG!)V-:;&Kca.21_[@Z4??N-4p.e!b%B3A TaB_*'27 Fh%&e g-9;"Yh:as$\*Q)@gsV2Ne9Kce'ANN--E<O`o3R<nDi\R4(<Iqp7$qVtVH+M7JHtZ9KR=YGeDh7U#[Wf27Fb9sh7Vr(GA/d;kF'A'!5t4VpN\kOc.gT$D\c-4[XIY4*jgX[HZHcAA$g\k*8%c@:=lfo;#<mCjVrLE<k&BN??K[hLrWn1$1Nl+1#*C8ZGl<bB*C@CfF<abJ^PQ5@ D H)MF;2Z\-6Fd9f*KUSpj>N_aX;a.e2PGRkcVPFIk3X*q qM`"jRY?>#E]"-`YLK[C:QTa/,8p=1J7_rZl7XX-\rlM0;$HGfZSt;_Pd/DPlGJ+E*aaO!X['htANedI)<'j+s!KKh@q #VO!HT3m\SF$Bt36o@0tc-ZYFs?VnN2)D,?Nf^['h4m#0;OLSfOA/dcHiX#L8b4dnDs-nV_btDU6gJ+[C%Q!&9kL^.Xh?JrO7jiCTc^3d@;\$,t,+JQOjjf^FUj+*m+*A4]??RgFBWlA]bs/CcX49ejPb08lN]h)>=g_C9`9sY'g+e3;A7At.1j@>4gM^Tk0S]E=;r1Q*p)8ira/O:I+9/_QiRc.\R<,PDD_lGKK4ff*<AEG9rt%s*]Bll_M[1B@DV\`VXtD% !+-2E6^M]e\URpb1>*n2A@HK=&?4IUAk2-S-[E E`Jepsj<LR3&st._hb6BfP)*QNLVW&3B,-:9kn>P*B-$@`KaMlrHo]6i-PBP]%i]$/FKR=$>-Nj9,m.RmT8H&h.Y5o,Y4)8.MPWLmnRC$A.D0pd>3C_sB(-p%7orENVmP6Q2kd*SPg?M+GB-RBH0'JU%NVt-qg%>-A-Xk'!V2sEXF*"<2[:htt#e L*[AMYK3B$ .lH-Pk85-mqpcW.2o$jlf9>%UCd%E2(O6)#kR1,1K6OPDV1[2$&)bJeFc/R@(2Vr5Ko==WmK80+s(4>Il.0SE XX= )-<g6\FfL;-8;+hA;sLS,mYNX#]=b't]\/3mj0lY&5gfCE&)F]e+SJLD$.]Zd7Fsr2qbNMJ7#)E3Ah>[W"=@BNT//C4EGCWVSGo5/iZKJ@$A2:5Z:02L1eR-1Fdh[ljahX"+-XaD&c2`=eod1g"_l_mIK,kVP!.1^7J3\YR%G0Qjr9N/]hgAdC3@bE$hb`$[A:f+Bp1rYU6TH0!KcnRN4( r9 ][nqNnS[brTD(`CEkeoLoKAZ-Bn227d-k/%$i?aU2kD`I!Cjg& 'H-/o"E'7bdaY1gL6bpAefnAqJabJZfnQ"^G@%SV)S.4 t>>rrdNp7f5?Rb@EmA6_q@(%OCd^_]SDsbgf+C488AFd%Kn@dmQ9D\8P9dW\^/+g-;_?fDr) qb#AY+T"R^]JkF-n8IP7o:;Q7W=4s&A8?IKrn0U9URPPf0Mj7^q'AL^<(D0:UsbP>HO>&N6@J1`s,&^j/Gi@^#i+YjnEf>G&!)dohal(\&Y^d\Z0_+LC$c6OFV>iPn9WCd`?eM:-Ahk[#"daffELAi^sAqbgKAf9q-+=^V#4:WJa#fI5rFrU'BM:a?b-&_-j&[T;,0KH,!:poUkAORn6L-U9VsCX9r<25r9g\hltAV79W%b'k2WC"'E6H*:EA A!V1`PnSba\kVrr'AVShRN[''U60+%2)kll<+f8+MefEDtmsXgB^;_O5A6cG5j8GpWVs#bGp(;;[oNB9tD,/D3[pcJ.@+@=n\F*l!CHa%KW@(&@0>=(W%mOj+"?og>4b;3Yl]ocT0Nr'C_o< P':8(\3.Lcs9NB8*WPk,3.[_29,Wpt `+h%:0\]?>$5s56aeiS;7;tHIgZ`5j_WONE,e`gJPR^88K+b VMQ jWI%]mc)3]1tOt5\/?;TM`=$D# d"#pfLjUn"Ns=:RX?gObQa2a[i2g&<^(jG.ZQ]>rJcqWpPjlA#EYif^<&.pAl\3=H05lr.=K'@dWb@AWZD .""MQ7dsXcE^g0t9(:TYUlHjY>DB6DgGP>'9[$](VBh&a 0t&@Uf1B9Bq4ef$]sPI'S'"=ef&b\/j<JP$9RfTA&s_o@</WN[sF=k\**29HY"(2ok6bIC0]Vcqp:ERXt7'+m:ASj/s4'EHTHV]NAcTk"4R2D3>'HAM"s1tPkh-^'GV4j"f5sAl%WCp[Y)9HJWC]@VNTf:o+'5_L5 =R[Y50##nJhCEX 4(!G#<9A?oi$pq`Pl&`lPWVAMrBDYaVam"L70VbSbACEAi-;]WP )>Rp4ep.3Ap&QF2Q[C!LI6WM4a*f)62!>29VZ6NG]7F)M'%H@Mjoq#H^+.XZtnV'Fp4H#Cp?055[?AK3LIt#_if_"9msn[+Z V##)Vq\/_>kM%I.,h=WA4[f+Ds:SVRAoHdn!o:CS;ejT"?@k-Qj*2!#eXpm>9RN&JS$Vn6kq%iOQ-g%Cof=`\A']IGVV%0O O7"qqeBtJ)Ula4Mm*86#QQ\TmkR/!J2G'?6e`)6RVq<DCIf""2bATAYWG.?PrG2ls+VR_b611R>5%*ItWq)P:5pC#X!:K^# 3 E-"eF@s5X9=$AZgJ>>YfZ.iq_C1t^JgZ.4D0]^88G[gsRo+=^/8d3e/(lr ?n4_4EL&%VX Sr$$S]@@d3SfX:\!>FphR;%P$R;TX<bL`VU3FNLfc<0osA, ri$JX-IF0(:A)PWV&sfiFpe8h RWBEeEagm+Vf^pnktf:V&nB;XarEd6A=6VqK<X#4L_3=5_+>>\$q=%mafk\@Z(C]/UZnI9R"P%@0U^_pf`opkp8.>?ZE/c&FVqlGBdYsj*S!Dq^8!K1?%mR:g:PCNqagL8hS"T(l[ ^<\J&9ZN%Q!A<P`C:*^c+RW1#c\_?*bnIY?5-%I;+aG:<Q3Nl)KB/H-YX4;;FR>tVKS;DDkbboAKT'nkBqa!Q90#LJR;..-te=aabVt@40KFBr8h/T`Zr"A-P\1`84Xq\fCl:USe'(eR>dblk.#l^YIH,*rbK4(Ws,:?Gp@d#W0_+.,2aiK5trRDn`KO(+4kemO.lJ?g^bOsWG%D^P5NP*$nf7kFG@%0W]MlP*3.$TO^l 9;i'p#OMAKdl/rKO+jU3F!JBFdg!qXAl,&sR<'o]*p7V[A\k[YNQDr@"oct>!h8eIH.QU5'6L]Vpe*-0XUhJ,5aAkK:6]UlBLMtm5--at  3PA/YT@^D9!E;%cUR(ec5*-SHVfBA]86_b$]gT@ FOG!kf!mUmpI5A\lH,^sN- \%?.W&ReT]TN*jJ;3!Z7GWRW'[\]=Sf%A\GK$6,s#'Hr_ME8DKc,HiE*H@0f*_TC(8^'qCZrbh#.`50hC]9A!oK-Ie?]f^gZD)'sC\JEmKPK]1&6U:A<A=HOlI)9?$ErSj8$\)6.14Xla@eN,lj5+Y;g@A,=V\l7$h&BI];RMK6(__^n\[Z6rLKo`oO\q-p/$q%AqSEB-X"A=:AlDR1lKCHVl.Cgn`4?5C2iS>&g)d"GQDg4PJ,1.ADoP36IMsso$*m!@4"Geh>QM@e]Xd-I"C)D=`:$5An$4sle" +\s/k".M#dG 3T/,,LipcSZYU%c_T&O_/HrO+U,IA.$a+=U GBt4BgpPB`!Fj6< &#:5M<??Qr .T[`"[Ag"O^2!9 DWIQp_/7B.t6+aA/W9DZ($JcYDAf+Ne@iLs\;P`/>C'e0o,0](Clf:Dfg@-F"`ig:*Fr%>HIQ#V/?,t5H4B<Qs^A[l%88+d<$'aMa5Cm.fD9:A=LAHbpA3^M%5=rcnMAA$pCANJ0qYil1jG,8IPA/4.,b/JX#nhUbM%Ynn6S1]*=`pO^q_dlCh7ledKj1HfCU8$"sA=<\K=qKAfEX5+*?liR$rUBh"W`p8m(**5--fCj?,18QS"?h5^iaMZZI'V_qC#JPCr.`&0Q=n8LmNemp1Z32eBO;EACRQWaIaj0(]8dalOb@!NV2!tkfsC?+)\'f783/-1Yn4lV7c*A2Al9*8:;(^JSd#W69%p$jqQKJs8L,fAaCLsOPT5=;+2Sq%VmEA<1h#6J-KpU$0A-aA: $a'NV]oQkk^(f?eAnQV&tVO&a9p)A2Vf.ata,FqU]$KsTf']49JId2t'R"j7jO^$A;?9:0HiEi<scJnpnG] #>i(OrTP'Wc_G#2W[Z^L2AieEf%=k&&-a0f..E8?2gh7=S^-JY'KkHm;ABpb1&)@lZ!eQ*!<JE'>QXs0.qnTHkkO/Vi?pGje3%^0`EVcan>G`Xi XZ>kheR`oFNGZ'#pRA4:^2)Y).tsO%g\YYK#n/sA2gO*?MZLNpFXVcY@R;S:X3;9hHgF"N_JDmYCE,=lPY+"&qkIgr@t!!d?I5bMd^Jg2 IVIA=FRRat.b16dEc7l-V\7^Om3 LQd0I,tsa@fG\sOMF%P-K56+\82XDJ445em!")a.*49s@>W6`j)>]E$[5h+b`-ji*Q! gCZ01k%VIBlCt']*a&1 MUSJ4H+AH(BYE&B$7JT/TLbqtY9%K,r)Bt>XRsk[l]^9G?4p&N2.5J)4#k''Mq(1/FnAeCZrKqWASh/Te^'+p>)O\4rL[hY)F-Y+X,,@"#J2m:a@\Wp-]VbfRRl^$tA\!)('E-KC*[K27;KbY#<ErKh#`RYX>$*l")_lb'NtU:GX%n=[BX`.$6RN5[["/$pA^,H5/*/;WF/H.D'As+N+/6.YYhfjX-a1Cq&?6I48OVR*"tDZ=#p\&cDnbnP6Qe\tN6C_Wt2f@ATsR8]9[0#s*IUI>gA^be#nKI`4fl-]%0qXQ_VM1Q=bY1hn*,3UIBIMOF 3@QLV' )F>H>lfHZs[P[B#bRK_f&d-5_d)Lj\*Z\Q+aI#U)]D9TN'&!pG=<8KScpJ'WjU?^5&-EaU$aU5AK)E5eCIO[TKt[&WPomg8`m=S5j9lUKB$`i G<H;BqtakAhJbX"TK-41AnYEj2\$ID&PhN(sk5e1)Pqmp]!G5nD4h3P<Glm!Z^^!Tji.VZ aj]=1/>K5+qcW/>$FKQ:_rTG6K,#%PWP qM4f'E^ r;<_\Ja4/2Bo%a:f>)Z`X?)]0.V-<-=fkPIe$tRoPdI1M;;d0&1W5T$^dWVn!CX&]b8@:_AbB1c_V+,_:NtkthMM6m#Lh/PR?VEqb&lrCXFC`O!6+`Ce^5Z$s83MeK2`Ho_.jo\25P7ghe3XQb.3bfF&+?#sGeBp,?"*KNh\1XUqdn>j?V*Jm87r%U+ZCAgeF>\(9SPU@GI<<,C_j$QjT<fn]FFBGFPo-WqS"a@&T3!;Nj1qjCkfIa>>J3k +9`RcbX bL!&*U5Il$PjL&^W>:$=FcVZ'qg-8VGg17-o1"B:B@iI$$'h:F$5#We1rS,^onIE?S/@%_gc2oC_ AG-MnMZ b[i ,-#BG_Z/jh0;ebCkqCQef8UO"$M1cW O<tP-WA /US8VJNV<'+/3Q#nWt>"Li;97/9?Z1bH8MaXEi;3aAC4-2dLtfU@_=>R:[Plg8NkJF<4,dXk\WOq)tSP=T^)b#G+18Emjt.eHFst,G`]Xa(GBJ=5;"/>;/=&* +'/3`Ag kI-[i*Z/T2#ZK'>7PaVO,kW[20(nU+(BqGB@AaZ<4:YdG6jUAXB0CVteK!7Tn0Ai&rq_J4>O>siC:?#FqO6n,NY\_;SElXKtLD7WQ+*/J_Q19*]D"H(nP4,IA)p(M\X`or6MdT?RU5d-Mg8ZU)FIn4Jn4A-AF[mQ`f.a`;j[AgZL>3$<:jcnWk/!+Jajgf<F!&Ap:Hl \PE$KX,Jd,bnNr<$>:;p3reF6!Y!$f`&68$COR&#9<["a*O2dkf]hWRNjKkl2K/Ts+m=@TjE2%^/R8ST4?+*cSY`M-+`dNhd+Yn3d9GV_lTP/3#^LKM!h-j#0A-QJ98d8(DL]Zjm?',U=+O5e_$8E)ikIY\Fbc[PLVco8:NYq/dIE`bYm)0@7Y+[DtHrlTd[B?.TYG02neN7s=fF#HrcdSe#B'R\a(X%hKTUN>-Gl]VnX-i)cVVI4'TRSP-X3Y<7,K[KAqoOIWbhWD18$\*^h P]LL4s7*KdbJcX5HSOLjEJ:DY&SA,nZ]\\b&^r41nPIoQ(83k<:K^]oW940AVfHYIb`GW2,C.G9X*tPI;G+RrZ^c65Ar0lp3Ah8Pc2CAgZr=9DBe8X8JmT;7*%Hkb9tWg;Za)Lj(VUX.>9/XbC>GrRHqtJMLA,,Sc7X:<,SPAVF]iC?=7CX3(;Vq es8I8Yjb[(mkYc%"+j!hTkP_c+_K9"KLjHo_2)$^;:;G;B@`!$Q6(W0G Ajb%tsAI'YWIFM;gE2;05=$db5GT#s4i9)bp//Q*GT?EknHAdeND;ABONA]*fV&3'-.@ae oWH0dCS[?5DK!gqA-<H:Y\-sC^D0@'p/J1".ZBB@*(e=/@]1BN"8V:FmRc[pWIb$*]R9\:GYip?cI\]jofDFVk7PI+3DniX.[?m<Srr"[F/00.cCMM2GkTq AA<.FT5I.m<BT\s"I?0eA_G:9,$QsXDANFVD(0r/7!pgpp.qgqe83ZmnT74/%A@b'RWDD0hB]jo"8B&iH!Y\8R.'-AB`d4p-r3k$MoL)XHgsW/SA$0Wp?-fRfc[Lg"Z[Ql_XXdd757DXnED><cSga-r`sHH))obA3YIcNFIKq:<>b)I7!6-&ND<'@i ``I/YqMqY-6d$QCGOR-IiDk93Mbrcrl4K9/mK*5MP>AQZ/m6_EEti,m"A3X [fY0f"(g'ZQgE+@-OBg]=U CN"=?1m5\=&h0C2G3t-a.Zs5i-PaF<qZ?g?C.BjVX tVUNXj;q6;.&'M?hVCfN-G?462IPJtK2DIM4Q6YJ<I(LG8!q8#cM&c]YP9D! 2TSA:ABUnM3[A>1;QnAAZHeio-9sq9\UJ6Gj =W^(rnNgok]mS>Fl3Nb##)6#&DVWKP=+U3C<YC3@p <Kt:N@q@KJhWGdM,acAF0<,nGNAY1jm=XGaDBMcB$RM9E5'd5Z&;KKADHAW*#a.MQ>WfV ;?<?<+cZSpd-+3V6o"!j]p53_b3KQUP0G;72`'5_ipWF59>/k\S*EjnOe,Ep=WhR9ABBiWS-%?F.6NQq>n#6k<*H`8U^B88+:-<(^=T,5qAp.OW%i5ncXA'[#2@72_tArB3s?'6=^faQ[ak/XQ*bqmJQ`C6SLM'E8i^Cl`bp!c^f,_5=t)o;\G*0(U/"m:)1qC+JUdt02caN)oS(WtK$=pB/A%qC6:Y`Y4E[UK5KTG47mePiqFRacGX9,ps\M1sK%@r@S,9fV((ond[)ZL8@.PR_o+2U"J[IADF 3!5sF9>UJIpQn830RX2VfpQ@Nomt_h]R0#pa?lXDjhQ/ZqWjnC2T-5bNF 0E'L#-.t+bNE!:U_W/4Vr?Up<\Kj&/K.p@Zh%PAtdXJNA@F Yp4D55m\p]-[/G2PUo5>haQq/^^Yhl*Tn?Fad=jp,=Xrd_eN&0?8G`n%8X_Eq98n8O$hY=\M,[@n$b32JEBA(9=-AcX'H3BX3\Xs0U'9m\E<a" N>Vs[(4)&$9AkXc:(]HX.dL"Kq_mb^2O"nJ_A/EbFNXT9"pqsZ!FCDZ5sg%jm2fIf2A:_A23WTjS^?P5ei0iq)a/#:jV8C7-r;SP>M\c_coqI`W3ripm6No[M0W+jtcAZsPS+)BdFVh>#lZ7De 'ora7XVAS@c<ZCB2gnd$%''3h05S/#%$'9V_Jm/Y46X^e:boLHs+2kIeF_(s#Q'<DcSm+_reds>AC?CCs35JjCXb9'!AJ-D#2U;MO](q] ?jhTskl#V9O-gZL6!ad&b95Qs-UG5hNWd%a%C5B;Sq;;"A'P^0mJ]F`SfkP2aQ_-@BpC@TBV#DdfO7asSY<qk)1\FHJaZ%A##mkbL(CR AC.0Q!QcHL]E<c\'XY4.94r_YQg`TXt%\[sR+GAAgHF69[2<lW08PrW=g=[b>'!De! ]IT$lSo-c;qS"Vle^I:k(Oc+Y4DD jQrFXATEUlm-3&3Za]Fg<05T&^!YLcL%<))1A&>1N.YT/D+):#!go-,+31P"MC;k0Y'p.<;CoJGo+a@4("!pX=?Y"$0E1Y"%UAZo2_]&KiNjH@*PKmTY@7I$_a)$1cP;OVU>I/$tG3RJ02Hq`X^c\_E<!_i2Agjqr\9?K.O$Jj!_W8A+cgJ$Ln2rB)8J%(Bs@A@iQksIQD<GH,tJ/=ekf]!mOtoW. >MCX8Z4#q,'HF/B&KiY"IQ&p+$@:i )1)3/D"*m=2MHX\CX;mQ+]((C]`%$A*c.L+3<kJE.,V&l<9hT#c,8ArOqM"&6GG3o%?PoJFSA, XdL >7:A!@A\66Fih9VRW5dQD?1'"t;fq3.lrYB@R>7Mc>mpp>g_DZ$Y P@VE)Kr.WAjScRpf:d+af_S39#79BfE00ct?DraP:=\l3hNQ9H9>3_Aq!lD^-ta3GZ\W;Aa7l&s`q1?"F\JOpAR&XW!QR\(AB[ X(t34Rf;1X6_%d4-20)=!4!TO8I.&#nl/#.P\CAYeHJr'c$b5EJio^a:e%m4tqZ$3H?%5r)E$$5q98rtU2-QbqXsLmFA`q7AjqOIK&X19N#beR,]q1>^?(Y*8rMhthFT1qsjsTCGFJ6BPpJCVZlWC>hpKt ljt#+1DDZGV@eci,19LnK]j1sA!g>f9G>&rDQVA7m#Y8a6;1q8f:3Gk(VAc8gKXh"f.YXb0(H4mc%#?sCOh#jX!/Qa^c]5@*NsC0.;WIMJ2l W/h2FWr'Vq),Jqd:Zi$:!eDaC'k;qG$1>?Fiq_hSJd."BEJFU!QIQ4TMd%A"5rD-*WAmR_BF;866s-s&Ns'D)fQKWrp%9Vq27(fZrjKA;3"NWDl_]WD+_Ji?lY"D%LRZD*7m+:(0UqS`3WA:;AG=rKUii]qJH[$+Cm2c;?te5#YeIjm@V@(E$U0XQ^-Tq/*V*@hniBiK$M&"3T1]hV3""q@-^7d+D! Ai5D;=k_t]Coa&&4UesRq(3!S5XQB"j`0B`VIhBt&O9q+YUUn1f'1GdoKk"H$&[A:bst\+V%FX1,,]m>k%b!ZY!]mk6Aq0aKQfmbA?`-"@&_si2ig^KnJA2i4)5RR.m[1C?IlAj6X cQl5)OGp`SE>4&=%)ofQ;O9.0r=.pcm)HY*AMC86%"g`2_Ac2n\?AbA^p<c8MX*#*d[SL5N9XSpP]O)JFk,1@P[d3EV44.@RF*^Z;8c3KFKg`iYA-G_K^a*PcbN`GT4O8"n46?A51lACJRdmbsNY4Pae1tV5+3L^ZH!,*4<9Dre!AhUb)T:f/aT_'hI9!S7ZE(->]!,HKj>g5scI[8cL<f/$?!+5iQVL'_^D6WMXg6fE\ImXTjY2pN.aH-Q0A=/SNO'$F6%#B/;AgcWY%cP?14a`l=2BoFZk<`2<q'+$Pq=OfF?s'e'<*@C$g7rm/0PjrW4<e6N;..#W\X*@s>R# 9snT;^9-*N3BG"rq q#R;K<`"eb+`<"ie:@sA:89MVQ8N4>2 9\UKt':KMRV7!>j*Hs++aWcJOOSg5'QcFV%E7"_I`k^G:Rj"rH.42PQ1fF00L`f#pJsroLXFr._M2]-J] ]gPaAi#BA7M8iB1e5P-FSl4j!GTk'*U3PnPZ)lQrQ\Z:EUS:9i3G87PB. GMM?=jr]=D4BWeWWtd.Zp$1AbeSIAr-+-GJDZr)g: k4EOKl8g?EMYqAmK0U&N7ogmHC!A,]kGG<>!qThP`+3n7Htdn9$#*WKPDe"Z@7?MH$6AiiS[RMsq+$2f-3:U7N"X"(<OHg':MM <BWW%-pZA6 pn^8?Ao?Z`OO`/XM/f1OJ#VRP0Ke[>1I?\="i]1(-JmY%Z/4%)^AMdk/n%=`%Nom@Nd13#(YD`(k0FJrFiI:a.F4#N]U2WcD@rkT!G:^k3t4DC'Rm8C]L`ND$q]WYe[5$m[4H(%CBe7.;,t+ArTW!=lTKmjn!i=_4#BdX(ds1#1Gok3GC;DcO5WF:7@BT7[Cqgo> B04p'1LQYEYR50PSsAE@1hi(*fjocJ@L),e>[3AC:_ )0G))l6p?05M\n178XlA#h^lpG)>\]DS^_D&mULnqgJJ:2^$AVa_lIFh^dkAPo[gt?C'B+H>,1`_gboi-M* 5 30GeG$5B*s:l:O(ZBeUt!.o#'^86\%Vjdb!0tQj5Y,IJ"(qUD(Apj-s3[2I]fM41Wj/#c@?HrT+J=/WlpU\>GUj]so=jIqEV5BA13AD"d#8hl[#^2siAb'[&!F_(fRH&%h@NAm3,>XE4&O4>f6IP\6%nTrb De@EYF,/Wbs<dB-r=](81:nZT6%EFam7fP pQ<\U]-neKhi2HQntoU_IHXWbAZD0EI%&`i4Yg,C'd67Oke%iooo[b1,Li30efA6r6sR9GM*r#06@81lGe(&+ R]@Mes05A?]SL2W9o_QDO,.nBjHri+(CC?ILngU?%t"/RdCUZ2<1RroLqSKk4E)+&1IXeCTl]H%mhW#EI5)c+Wc.GHI]cHh8Kkt5!mUpV9$to'0&m95/c>3%@p@E$YHQE&Y`Y_D)g2@'/$4]p/Q#N)$XnatM:<:HrPg6&F7doN,Nen+bHtE4lJN&QT9fl^<G5&h47X';`9=/<)(\s,-p,3tjf(=37aR<n#R+!*(e>P>W3j&Y&Q!kra_M9[192YW>R`!U^!AS^62KE;=[AVrPK:i^_9] A)mU%SMngF]GZ D-Y+5S@HbHp'X^@*/V@QcU*>iA1dgK e<""`R^YY^):rZ!#9#=ApYQo.EkJ4.*EXcjqmS*:kQD6gYdi$.HBnsCni^5rq+SY\0hC=:t:UH,Ns9+V&k,Wt$DpSF!3HfAM^fI+-A*"9!=G^+hL9R@4XRG3S21VGfZ5a  c!(:r+c7iel3<l.?HpmOQnKSC*]5j4H2g$%qk>(ZAIbQR>L1BsVnn3L :/Blg))[-&h`&IR6YGFCXBKV&G0)@AGTW7QY'$XC0/U<(W&*<nT9P_1/bWVDS)q@^?p\T&rQ:.WW-2VB'ms2(0L(dYMXM7N*nt?^EXqdZi;Wne9_t2`&Un&_'N)%n]\`Y6?PM^]GjL.^+1q&?+Dcq>1R'&Ynk*A8Gnl]3j20ReM1';6T_BK*9h@bB k83;"nq%n]FqqBQp'?QD4>6oSb[4dm.:+@YRTFMABYW3eI.@<D@,'o##hVAP?B*kMl@cY+AshCHMM4UClWn#O4tpgtU1a*"<OgO\Ora^o2oU`0k38E\V,Rf 8b&lg,TA%>5 cJ_Zt8E9640;C9AkBabe.0nZ%VYBYF8;kl`9^^`aF7Hg''q#]bcXKV#_@pj:)Pla@+ +!6A*no]^?$V8($RlJc]jXWj6]=9M61r*f;btbBT*(N5Fl<[ON+'4#X'hsGeGWpML5&6XAIGWjsRs]B]8'IU#-m$7i-RQ2HA)Mq<dd>NU?/b_KnsYSD7=8>[d.)*SQs^K/"+n1Qe2B`R/+oK/3Yb@C.1O,>kG9d%=D8A2'aG6Ln-P7nrtoBE_;t$aA#OSj/#dJc=[X9D#^-#i1:\rjR3A*5aZC"2"bQ$otgkEkES\MoU%*H'/i;T:`f(n8:\;"NGRgBQA<.Z@,7a3R=;gZA9MAV6!KOH:=tKWJn)fdoSC=ttJ*N,B5E'_L1?Wdl-M]bGpf@J6'[gRU,Y#edf=T[($O:22\<QSt[,8TOA6Hk4qB>4Mo(Zn1E85:6lg4+"%K3`PtR9*R$tlCJ7FJ,bcbd^T_N/nA!7*QO\0t7JPSrlf:-Tn=q`S7WXG1;7d!N7a5<_6Ul`M_eTE0eZbfb0%E2])7@P3hIDkf\S>#i$Z*[p5>s@ZV9\q=n_O/Y)G;*^S>?/NXM"t$M@t'am*M92] \VEX]^G_XTfDKL4\tA\Ls)J;%:1E%A@8d>&[Am,:Ke\'8kZ$=(+K[s<STch@rHkqO7h.PE[s!nf_Yp% s$7RK4Jan:"Gr3f?nSAD%^LXS7]RIO;VA%o8KAftsO4HE(/nZcZ69k#n`F49Oap(K/C=tP<9o%./Y=S?ENV9N"?Uk50BNnNUA_=]FHA4&-n34/o,B]%G6s$h[0X$ f-/1N[@=P.#PiQ5hb"* T(\LZ\R*+9.3OEi,E$M7_s.#h$UjV)Z[?PPHT4Cs^>)R]rj2X'!ID"#VO.>Zh?L"0o-=pY^!=BRMA5g#ZK8-/:ab3:mo/F6tL-jb-N)At;%VA["R. _o/Wb#;5C_(NbF1F"G<a4IINL+I3p>4N(oh`^$18bA8aL`8[>38W#c6t/dcEkQUoZ0+S;LS;G]a\>"QKCsKT4pG6dTVg@kPTZ6F$&*/p[c+n,XeFd6YiaE 'a'n;C@t".Bt*Z?Tb8A`2sErW0CXD:rQ&B?]Ys)+`St-2ikC#D<I!;ah@a=t7E H/^pR!=D4%K&l;nZ_+`g)s%?K;C+^NTL6@F(5Kb)=B(>D5$!LORR*jHj^^eWiQV9GF*EYnr.:RT<+^oe^,DR XQbpF\JalQlP2%"%Q9U(?XX&ER:`cSLl9-%78O7Ol3BJ")P![X?J?))$2T+395?;<kl$ponP-U+QoLVARdA$?eQ7T)l 5(-A$p(V7BE\3Od/+&U0<N8[3FZmH)E]:N3L]Yh/h]maKhD@7=-C!f1%rY"An@X"V(='5_:7M/\sD,+saTiZ?Z's`+Xf[MkM'eQ\A^,RJOh??:.g+m3lj1*Cj'U3\@X=o,mN)]8m_Bj&75@&ZeRtR?DoV2;Vh($W9'_Gg1h_Q)(/=<p_2rJ&C)^g,oq(20sh.-`i_MZm-Wt` p0$P*'9Ti%l#oNnI\YrtB$?/3.ke#Fht-$m;`phQcSNMAAj+h\fYbMF]*AWAMl.A?),A`,+nk'6^]+P9,d>[A5/<4kqm<(tKhPGf*D.O2$-J4t2p%5OFiQ8Y^1#b,5gNYWsb"e/t[`JqnF(*D+5aD2!_"bGEcQe1V+>BARj*J:t*MR3.\WiSNg8BP53,knAVNm3:pD)pAh.JmTnC$A1pZ_$9VOs^[n26PgFb5g2 WWr1^#5TJ)F,2=R%KSp2J!)?Q_b$s9/?lNTAr4kb:/Zq'/%o%K>Kt1$=L5Nroj9_[3;.;+ld=j/a0kq.LE!Nfn?[E<UZ?3f)(1DA/>N!1@:GsOElhN0=qVR=ik0I<"r/m^ZVk]3.Ee_3;aACWCV<;0thiq]hCj\^3/eC*<Bj:\.*EAXs^C&V26/WA"b(`m!Q!^fJb`Y[sT_;EY6VJjN0jFR^?KGXWYE&.PNiAo(l4Yj3Cp6QbAl &S1frIP3lPta1'^bM$t_j$H(c3br&C)>8Y^$RKh7^lBMQR4<C^MQ5RdO`djRIcbnccbY7a4IU@2%3!M9[6CtHIQV?&c2fZNkCnF0=dlqQ.l8i(c0p.1J?q>0A/7P8Ap>4P).gUKPe2XD,f7pAV:<goLf[JFeEED/H,<Zs%VRV,O..7=<SS[/1gZE*3 U=t"1KNt'%W7]n12+C_Eb*2"]'oOiU3q::&AUbZA[B#>aL^d,Un`am8iJ"+o;p7<(VIN"&q#g's^eQ1lj<G?q597IW]g4! F=<7I5A[,H@5GlLT"4dslHt[^Bl=0fT\3sY Bs",^K.Tdcj1H#Mfb\@cb33pkjI<'\dAk(se592g-ja-6Qj^(PgBUL"iSGhm/-rs1eaG2meF%8i7K0besQKi_?L@54>k&LhTHR!F<92ZQDZg0"GKS&9^eE]$7-fad<V.48j)\=Vn[pl66',-7VMi2!_ "!3ED)fsrd_KD-RebP^>7aLC=2]h.4IT'`XR(_s&#NEptr_h5^>5[2Ff@O[gNZ?\Pe5k/AO?MlfOmHL&jk`ei$r9hArAXg."tEV1+1F^e6WeTS`MjTM4;/SpUt,0&O+V'41Mr%%<nX+!H!Dr=<GB12^olgI4BdbG-lj5#+;qV!TA(7dnFO@?#fd',Dmc9g_U: UcKOXfsJfU[_Mq I\W&,1j'GSsJADDAeSAAn&-ct&YE_Fh'GFl4tl#SE)#qf"#\dW<o/6s`XWfl;0*0j[?T7'rdhGh>ZI^t#gk]<]Q[Epr$VD^00[rP5lA'OdjbU;R1FO&*#)e _"N/8KCEOi)QAWFl<fl EAiOc7]:JKSgSACs5aZY48\Qn)BIT#^^W4E:ls#%X4bT<_`<Zt+p]%aTf?(k,MdoMaKH!rV0"/e5=_]rAMhC)fL&VkPNc8Me`;Mh@,(;.Gc(?P9$LqWrWq0@n8IY/$A]mVG#;\)Aq';\?8-@W*i-D_U)*<Pr!IWri;2J%k&UAL"k1qQ< F!NbLt?PE4/.k@A'$qV7nrh:;+G3b6T>;47_k<[$PA0RR c)HAB52JL<.+TLp,!2sJ;,UGVk&L#ii!4V\mibWR3Kr6hp5;e0<h^W36s,-UmJ)#a5pGJ&*dSNkNW]h(7E]2btEqDX,Qc@9Q*9,d)c#kIZO!X-hH[>>]d.X<d2C'GkH'$?7X+K7)SHl=^],S?dbX@PA)SFVoI'jG8sM@O^\@c;F6\3Bht)lm&s#A]9'6$tjXNNWM-9a,2.p)O4Q65;2ddWA]?V'D\"4qgnmA9-4\-fQOI8g -<.]QL7kiAtUf GBU2K)jG!e3Pb3G-m1hFt%![VD:K]g:WU#qAWAD93O/6H[&8i'gt/sRIrq6%aCBTc!:WK;bm>HrKMg2*#IWT@6#f$JR>W6U#BnqeJC.iTePg9A?T<B(&T]916Aotfn/B$XBV3_D:a-KMhL#(BGq4&r^ZDnh2EtNcH>E>cfs1Ip_N#L$s`X9aJN\Y^,2CTllW%8#r@MdsB^@7&MEfCMpFW0Ze#2!:S0Xm< +S]6lCPk9L%AFM$<.l:V5hF+eXAt]i>%)C8:VA&QY]q]Wg[;@G21#mO*5"3ACRjDO24Iq6pf8e,L_JWU )8_'+&O[Li'IJ#<&(IUnHnhP=7q)kf`2lX4m;fFq0!>)p9k+]a2 c*:'^,>BWlUD^W-63A_O&[X]Zkp\da)r !WRk#[j2H)N^4Ss>k3lVpgiLM&iSe\L4Tt"oO>s=Np)UqfAY/>LPQ[r0L=?5lMWFS03H,XM2J, VN,%O5&2@msR\m<(%sDJDPAT,`BL%sk@a9.sq7*]A;n5iHY^?&ail;J?0**s.^'C].*sAKEn4[D\Qg.gSg>jXFfX!%6AhFBt@mM<^8jOr#Ni6#U:icok"'d!r>k(=E*5sQkX?^<^-4R)X:!a%RCGU83k"P*:eNCCBfLjtA],k&+;oKc D+`slC\c.m8?LVsKJ*2Jc@%FC9Ql1*o%>!2hFqnKD0cUh/Ap(_4a!AYA(gf6?;L0"H0R#AM(Lr6%q@A#qfDo8CaZppKbk:H,UJepP'L" V2pLY@eNG`;m!OQIE!^?RABSLA_pb.FbkNoA%m_'F+;^&AfA@m]<*J?*!$NPKr-=Ze%kRU\gkn^gDsB0_?mk`QFsHcpJt3n;A`.0.5)5Y8NttXQ;]LW G7 b_3,XhdYA0Ak1Go[1j0K!-rC7X+1o;;U<UF+c]GFkWifp/XqsRTQI5PeL_1`b2]?(gs4b2PIBj8H$Yf?mq1fBcB3W8^#d90a2jCkkW&].9Dpt*)&3`=L_`i0UsGc8JEBe[HKqWJ?da9A)*hm%Mg!ajr[tm_V\/PK"A)4?>*9iOWE,C*tkLVZ^U-olaeOWM?hkQ2mFLlOfV5Zqt16o&,C;<p\4sI.a3-1n%"O%;]?@&&Kkb'do] r,Who/hQU<`Jt ^N0?d)pHl2h+,,m3o7\4k/3]4f78Y(=N"JRm0V1U``6Y&rBfgfp?'FG!bIFZ"_GefA#O.b>(LlZbfmbX N0q/c[@WlhTsEsWG&2CO=@@EFWdL0sAsk99o## L;[f4-*IE>7;%X+:A@F(=-U6o>nQ<#./LV7FPb1q!6aROsa[[\@I_F:/<2(7^TAL&8@Zo0ZcR.YO<+'cniABAE'>O:"iq<j/-P-VV8+7JALk;f<Qj:r9cA<"8IC?X"hrkV75ca$;EaA<_?`LFUiT(U2U)'iFY.2F2<'^;rcU'^[1MjdIs8pKcW\fo!<=8rj538'K.6"UAsU)IZHEs/G19N&;U!(ir@,0BK`O3@7\h !X%])j;R!h_A(NhhZQ4g%<"L=8A?6A'_Z!I= WDY4;>>^=5NB-'3375YdeL hRCPe7UQXLT*bcA[13a"@.TWjViKS^rVT.L.`-o iS$!Mq?+K,Q1Y %%94)m#8g*i QMdVkn5X*1=`$&8jNs?/isc=b,*SAmYT/^MWA]"@SM(*Je7G;7QtkMfCo416#Pl:.*NkfSc]T5J;)!ff0m&q]`7,nC`<t,>bMbpUn\)o7=LTfP-c%mTPhm:?*![U*"5((V?8,FC"$3JL]7n`.>D$AjCl?Y/E%%C,+<A[/;.9XpXCUb5G=n@Y(BjcjpQXUO^XYMqKb\#W-C$I]#n>s7\Ee&Ch]^ m-tmoYADoQX6Vj>df&lXVCc?cSBbdCXEVaI-2;72PVG b$RB*iBT]\O!)tOH&fTbp&LMY bAB$3,m$SO;@Y>&>"SqE=%gLQKq@9A<%'_f6r-9V]`eC,9`!"6cEn:@e*W%tI Oh?Y[j9Hj#"t@.#K5tC<rGR=lk;8)rk]BpIS(8MCRU1c<=oq%&ra-'kPVc(PdFK5,ES"@(_I*)8\"M7q^EeWflUAVCDN\M'MJsTZi+kXPX)43T$.aW)-s<:#o8*Hq0MD1=8B23Sj:>J]<f"bYDo]":.r;'L/[WUbel>qVVVN #A0/A#CMrgb2'slW>-"Wr4Dog[$-j]eo"P=Q-cEVCE.4_/'QR8rT+8=8T2h,A@'N/\NL8[.2K0T\;OS A?(AS&o2:S_TK+b`G:bm:e$N)[G=# ,@;\KF:R$&`D];41=E9aA)c b[Ks+K9l,3r>lZ%LBs`(AciUpF;A*$"^Nt-i,F6d22+Qmrk9@s 37'LPk3-dHad;Efkq$nJnleeLA$YIASX_A2)bq15M'1l@W@=7Y&`bqfWO,.6p/pj2^8/=C._bId'"Td*Slpmo9JY\@Fj7L"(YN!jAON2CAtF`:rDtL9b3JQ7\ZmWZ1LnD9bi,liT0(e HY);_0Re%r1=X(>fl\9`[Pb_.O)3TeSDF`+h@nQfA#3SZa@__C#65Gc]$6J2R(sB<T(r=cUZVSQ'1&W_r;I:-G8Ero67>FAj)A+VL/"LGY5Par[F^bo!m=sbTY-&aa^;LeRCD=*JNrF0H[p0U1*U@TL=2]O.?iK6,$T<`@/;Gme^Wd$\<S:W0S=//-Q@mim^i:JJ[7X$ '6Q5=jnGC/53rr[[mTgo<^:;Q)%PTYcli lDS_&F&"GRFhV?B<:&/\T5>nN_/b8Ra]pZFeq#g,t?D-V]M5$II3(^b.17rU:(%_I!:.j3M1oooLBP2SW#(V 3I$9CVNU4mdl1JpP&oq>,`mJJiIf5e@\H8i@IcP@8hk3]O,\;43*k4?'q=12)$A6>0DB:7B_el#XZ!U.5C ;/?r:qM]QbO3I&gA%#.RncpfG@UGTU#M) /SH+"r;Z(mJ+M?i]*CcU#dkn"J<4510Z.$YcW=Gs#0N:7"?0X9(C$7,ZITf$-1<(YV)i/8it@m;42Hs1b1JZnMC[t!4YHU] @*!q^3l;bhQ<F&4^(,TTgn]IP:Oo9.=?>sQGolboCh0L3oO\a%,eHENt Qc&R24M$j0Q;C?b^=I5=A.ZA>_j^GY]Ds/D"E(jRf@p%dkE13&O8AY;^Or,!?IYUH%F5k<BGsdi8df)7&IeBj@Dl+:`fgYdo(4A_A184r9q8M3&$Pf/S6RiOK<ThnJ<KN[CT@MLDJsJHN`D(+P1lMaDc=igP)Q"^dESaAE9I9>_oCpY#A"FUWrD*R9Gb8iJ<p0:p27%XHpYNSL7b#?KfJW5 $STpEPQ_9N!_P!3GT#l!rlA2o$D/U V;H3sQQAb&0c4\A902=W8ZYX*_a3hqL-5PiKYPp-)R^`2I!'\]_jgBKF!PeprB]:,EAd>Oi'HAKZ/2:RYhToX!qIIp+`N\Y2K@ e,_3i;9f.LMmEO.Pg\6b$tjF5[sQkd,3XHK&HqVJJZ'tUcnK\gikacet-hQlH&pPAD5TM[ehBh;)LL1[#-O.e@-*DWq?7aeI2;LG%UF;_QL*osn[`TKs^EqZ8oaE'#:p23P7h>HBHd`#R;9&tS(& qKeFHh<$4'tG!TdG[L*-';>+RKU J$aNR2O=E3I'B__9B_\BFolMh"6O\4rPq$f[mXa5'3Tj4I!5t!D1L4dF'rbiUdtFj!(*fhh`L27$# PA-YsF*:&o+7%AoK!"T^<,Jrp"WIr8BAB\mEBeg,&GUT"$k!0(Etj:T=J8W%G"1YGqYD9]P&nkg2-887t@d^XhEJGPa3RAeiAi9,Pqobp+Xi<0)h)BgrOI&YW=R(J^;;_HiX_f]aY_=8@ZQ4-&AKh<q!1-tspf-U6At:!pAGO"_$b_c3coN49QQ0+ZA.lcM\PqW!$GY-/XB9a$S<P&FXjFFdOY!d)10".lDKA1[\,5rP)h!Am?f^9iTiKBZmA`2kEUVrPFdK@]DspH-X7?0P#%RrH6O+E57JR:-t7iN!A?'7'L7ZelsZ=QA^WGa&&kC/;D9U,"9fGW1kf5@,FW:[b*QiZotGbY5(92X-mp7<+;ES&(::;M]$q4D4q=gWH"`80(f;MVO%hi/p%kl1< MM)BmWa0-BKtVt2\&Ec'KNqd5BnhZQ1Va,5Kn3Wneb79Hl^fd5/4)k?&7I\ods`$"A6d]cK-(,mo/<)5Ljii&jOH,AUKI0GHfkTp"Y`.?th.`eCEA<,@RLmsD1!I!oApIl?+W,G87jU\\qN:;Q,T"e&An;mI@Gt!4hMEEF&N1rIQh.<MKb!3r\CtGT,4J)E`P&gp8;#abj,gCc$H@2]j3=H5W!/li:UZ/7B\IeIH^9a^J*9tM@rgce,HO7%>*RjZB`rL"'rMnZf';Lr+M/E='=$VCmB)O@o6T/I\!Ut]RVCEM>o99`ZHXp]81FIsqa_9ab:+Z5-MK2ob:"c16C [p.IfG",\GPUBAA1JROjW.c,UpK,Dn`'@co6AQfr*G?TN6kgCX^dI]>Tb/H!IUWA`nU@Neb05D0$HQS_I`'Y4!dTl-&:p,+CX^MMj\W@1TC05 icp/=Ce*YVqS+ZHb)Xr*U[2(oN.=CVI[H"!;diAT_Fi!M)L&G1p_lR0NFYj.aB@@_X`sB YX*J9.9'r-`6C%ijb `F]Z6nAL1X/nUas1A%b<C]75JGGe,q",DDncIIX/AM2=,rC6YCZ>M@Ah3Bj]d@ ;"8DcC%orqsAT7)h9\QA=i4A+Ka)6sG,SY6X6qh)h$&*kLL^#&;ei!;()!dVO;>19A\iZajHd;Ihk)I?$orOYU:scq3Tr@<+&$E53g#k@;5WISMGOicsOOq_6n1D_Op4F:7l#&EhbpQO(r6r8aJlJL' d**Nrr/\bfhpI`Q_>q/-3M)!8b<seDU $<4 []6";L]3Og $D5SeY[d/Fam*c1e9g]qA:Yd<>+ZZSF[Y?: d6ri::(sr%,.L,8L;9O'A5cq`KI!$9b0O7A"\:Nc@3LYA"eK3UgBJ/$\j%OI_/D9<;2m.DUr`q4#eleY"&UL!AI:=8'g__)"r#:VN3D1#<CbE+^0Zb%8LF>Y82mEA+Ubb-\g,AaIN&>?POXk#(94`fIfA%eQG!(A(;Z2`mFGs]c\qmY:nXY6#k5"<7bO]>5A?(m?^3PQ^C<>Yd;WUFRs8@D\C9l+ GS9RC/+R2VWe!Q3*X5=_sj!3PDC=A-?"SM_`CG#^.j-E'R:7:MVMW(TL>lO?l=M*4rgI>:052*0b%m[dOD"f AI.3n.MjLTn8K:aDTH@mTqP'qQ.HaB=?6L?b!ma5:"A=3tY5C)VRF'GIdIEa3JCfKWnQQB&^e7BUcZ>Q7?WIP.2dP)Q#T^gd0&AS-<EX<:^Q?j0(W2#REkN2PO^hGcsBpR4jkrnC0,r6(XTZ b>0L<&@4\LB6pcW "VsjB@0LPLMr 7qm]ga1--#P3;Wf4FVM;l]I)[Vk_^:IiW+KG>(,SZr8_%Apa0>SUBh<AOT:YAio-E:q1Qr70gp%.i<*i=k poXAGj8qNT>AW>VY^3\JA\$$3;<KZ$4.saZo8V[QCi=rn.`hE?$?5]lO$h+`0]tBa,dW40!'s-!U+<A1"VH`UJ'_7A=$9*$QNo3.im_qr[5e[W3A62TIJZF%6>RfIph*9P\MbY9llP22N1%o^Y3oU2D#k)*W DCN&1$Y"@TSp+`4c7 ]t0mh(IG"/AXZ,D?i?_-%l.I#g(^#Z3%g'RmB4Us-T`G]:Xo'rE5ZDGl5@</<6WW$M%$DTW>,]3crC8_:^HQF6h>'d&!%3['k9O&gFO)r7SEe=_]=6Oe;DaK:XZ%VNm$o%'>;M>q5qH8@DFFXFU^t@O8DQP-J*'"Fj+N4Ur5[?7h6).#+C[:J6I39>Di2CF+] Po"7*"Ih";gA/nhPJ>EP_9"?e*m3*UUAT##9i7eS%T[/m$%Ab, S^!^X+DI!tf?cN9@!t*L[2NWOZPRfc=Ao E$AH=\("8?oe15>dT!B]T<ro+:pAF1`'aH;XMr*$U;)p%9P 'Hkj?04I]Sl;j&$(,S3IC8,\($%XND-+i IPY]DU7<HN>s`@PBo?d-bfZ")mOi#$b2J.A3A]Pk<c;`eYeeN7\kR4>RtQRo Fb6G4YJcKB2-i^Nn^t7)dTOdC:/WlkR1IoW)GF^*:=YFA>tV94KRfkYnFApkbb^qLqkc`7 K5m4"$[\RIIYJ-Ys&Qd;P&RmL;fs#2=V1LQtJ[K10n+DYg&ig <Z;* d48"QsJ>eYsEbt4EjPP@FsaUjs+^[<i4)Kh$2fe(`<n>b%GqZr_+lgQ>idEMS+7'V)&=nn_m1%=G6ZLP8XTP'rNo>%[Qb4/4seF[&XSB\EVi%!S^SrtiD:ARnB?3As,%L@_OFhm"@:'/=N*A*OMESS2@M1VpaXTd_ISUSkUO@ERQE`SaJm%V0BX$#WAd9Q>_#-]SV'[:N_,_W4/BeP(<,PA+A#lQc+RF![]Xs#_A1Y3Dl_A3,kXp@OXT(i'M>m: +#b9+IMkJ?!94+p`q0Gb<M341AoM\MbiCE6La`EcY'[j'= =k2>rae#sVoTXtlbmSFt(^3P,?jpp#&H/f#9T8m]UJKYT"N?,36RDGgl(5i#j]% # XE>6l$5^?0&kKs&9h#P-G?$q1NMUXc"Y[Re</N>1,K%2RI3D q8?7J6lbG(>jT??)E/F8%MmcChF3Ie1M*d21#"p0K<UMZ/)o[8A5qd=Xeb`3feWnq@2P0a3JZjdIIJ[;D7DLYK-a1KF&Nkas?7JI_GYrK#aiX`P/sm%a6@#+2 BpJ:&D4D)GI.75Hb^MA #"HRlLD7em,F[bXDP.qXNo,`.+%YMNl PKmbRE=Cs5btD(M^QOP#Ka+r<t]Sb%Pd2So',(=Kh@\_(s@T4H'7(X2Ss$N9g7(CjD)nd4f8)^RS#:J7L=ONW'=1(EDk#t\-*fYQL#S$BE*M%g:XWfFKmX9.c=d2p7A/m:g/9]\SqUb:p`p(TX(Uqq22R`1AQA$571UcW7EJ2G&,%2Bm;K=bb<Jd`3YHV-A3h^dK&F$D[[.'W>n0:Hp?8/XRF! $T%-<0f8c&+DV[ZCV#+;a\c'<+^"e`A8+9!<03(<FU"Ih]^.m8SAAI4gP#Cd4!onbCf]W)678X$(TY"C0=VNoZn_4%\4 #J-j`+:M[!LF)feC)R_1QWW4G'0`C?X9TAB%A2h@P<NhDD`%R&*ak3-(D:%-UdZtdCBE9C1ba]'A$AZI05+N>F">XTr0,hl-+pZlKA(9a\pCQt:QtMjYYman]4nI]-kD@`kO<D#+'42ij=b"N1]!?*-XAEZh0;3"n,.'E,-2RDKfs*$*pVVY=AmkCl-6)GJZ1,O@lBL W6Fm&"J>VZX3o_`Jp#@\q+hJ58LT_8El&d8%.%=o(,!f&"Kca$Q),D<F>BJ)_FNob(/BD5Jp36H;.R-.(fE(^HXHeUbJkN>X(Rb-<o!=P5l,68d'GSP.OGs3O$VL5?a)]XS.5"j!CX'=A2*O.K(3Oe^AU3Q?9Ft0Llm1Q;%"4HM; i,@h':b0Xo/icY,ZV1"W B .c2gXih(2%D(Lapp>V!k1Lth&XdW-Afg$@0'@[bcA52+=at?gA;4W#354d(Un]l`=+^_'P+$pA/4&STiO&>t5#`HJs4V%DTF@j<D]H>/.6Z&U<Kjt"Zc:n`0l^#]OBhO1]jInmJC:+nktQ$d,bK'NBbgX9`)A<ZrZ 9c_#f@*MA$DMjD;qL@(^.:$U+!EkPWp+j=jAMjAFn_+t!V!d_ Fa%Aj"6eX p28I4Qf$FkAfr=X3s3*C`Pr-^re=d%qRsqE;rn4fk[m0e4Q`+kX)&I._QTN\^pV(0:TZ&i0-d;(m,_=:kYtm$^[#g$06k(l+()tKN19A#!QTZ>>'*[da%>TkjrnN3\L.PS>_g#)Gk&fXrqGUJBo+7"oS?U5UeE<n6NGTUJJ3`1O:JKBdYL+oC _`^>'@A;Jo`;r9Tc#)Lep\scM8$--A2C(K?SZlXU0E1M00AJY+?XnhSE7WU?L l!S98_XBA3$ddc"!rtUcTDOe9i;hk1R+`k(?a@K1lmZk5:eSAo*Q&n>Xq`<`I/sg*Lg"=Q_VO0=pgqX1+C^rO-.V& :>rMD^#Mc@-!f--t:KR+ 3mC%[4&nS5!sC`E_W,]A?,5CDSA]i)k0pj`_ne@Fi9GJt6q-d]jJ-jh4PhR(KbmDgL9p:KakJZ8C"?iso4%Ar\5[!Um,i)lgXB6Xl-*U8V.Nm<%7Esb_`?*7tTMqhjqTGh0.3[%6+@dC8W+h(WlR2;27:a6_O$5k3gQ_o(bh*IE0H#E<PskG]_#m1?XG_+L10:\U#Ra7,63fKDN#k'pfYc*/[F*UE;dV j*UkX#`(jJ2@n</rWokA5L'ldIqt/[eAC80j*eV8!2,.]>).4b>9bm<%NEI?N#&GDtP6&N>cV3.C"_:O.=!23?!?3LHd5m> NRA)lO3c/d+\1A[QB`C\2U+57OpQEqSc&]@0AX!d,\l: H sm<"j5'(d"[gHX0Y"O#`$hU0mOkh01NWt90&rAen9)NA.io5lq#do>peBfp<>A1BeiI])fe!\R)]1\"OC?0Fc0k 409tn!#qQe&N@*E[FLpPR3HoY,PS,%8A9_iD@Znc! Kp*gIhA*nI"`>;Tsr;g[R`1cmt6"Fj/Pd#gLr!=8XR\:Amg \")eFXG <]JL +Jg@`MOD7a9Xe)C0oZPBE4ikP.QT;AD=3#d^t3sn1il3qM)!2>\c$\12;!jkUftXecD"IYgtr#2tR*Apj$rRT:*-nUY5]T6o*fFNMq#I :.G\ah3sq#`<QX_A()=YA$;/ 2,C43YL:Gn()AlA_5L(Ap0WpptgOlcVD6B%\Ja,4jCg?5\PsO\m?%bNtAnK&!$dR$&kS]3$S34A]Gc!qFE q o1BM_IsZWE875i"VIQES:tND# Vf<hG%TCMBWfmhtI35$A^]q49nrbnaNaomsDpZ<E@jX&so_PKS*_)UbsFT\4A\@NNkbN"_KA7Qt7LU-TEte!XE?iAL_ft_;,rT<cJ+)U<j^TL%&!Qac,@ET$i%>7mT[;2tW;U(Ps(ohs8?SStj13fULZHM2&-[9#/q V\fifI8dq+Bj:NCjEpYa\g<1ef3>cBJ\%8aklFb4V<-A3CW/+L#C(9r+XoaMar ,C:t>6r[KYT3XC#5Sr1oVW=q'/@58a@qD!/X)A<dg@Vp7"<>A_LrB?Z^V?j=WD)1%eD7=o9)Wti:`O3"]r+>KjIK/K+3+ki11&ciJ4+p>`lA#]p[int1`QTg1/K"d:CmhYG,h&@F3#>h2dZ$kYPIT#Bmq6m4f,seO5R)Xf1Fiq$Dk[kHP74lN>^mapAnWrCiSb4HsUYAYr++\**e-%UVjtfP1S7$K)Y6 0 &;OPXKY]L6Npc>SRHd`?n#A4O3[:tCR4*!;&DQ5c0`lL!])^HsDUJ #<t@+c+TaG6ScmA="V/,X%g?gTs'`acLUe.`kJ9!*$=!YSjjk2<=@ZDLlP4sEJd<T?dhb\%!B->&ZO1bXbe1$:q;/Cf:bH57oUE]H%iJ6H8^em60DEeAX6WjXhV0_MiJ]tdAaH<oh,]j+P"R^e0]sJ$AWa?XnK]hGCsYgk,ht]_7])nX2*,+?3eSeA0L^.OUq6&F^[IdF%I#ofm;/J^W#N6N,7(>o-k8\6FSN(a K(6?T($;dRT0Po5\MJOXqg^pVOI[.W<8\3Nb(s9g_-P](("A_3$Ra;b_tSYLb W3RKP!_JV9],P.q!O9>?Z0d^53"Fi+J=bf/0RFnPa]'f"iY-g\QK+ir!K?H&MK6K'Tm:[>+j7P?/>mZ[s9(/$=sU,q787+2bY U#h8<,qnUc6tt3nJ[d372 (WA&*936B1%+9m_]j8m2-Z4OOHppbR-GXQq!s?HhI:JB/4,AO1+b+CtmAAS.In[)=AkLOMbmcD ZP%sqqHrj]F*BeAaso=.5F#BQm7[`h'3l\3Y6&!S2j+pX"]Ma8M; X``jhK(5<%Xm_(DbU^I@1m6qGk0`2hXH5T4Yaa/#m`+cI4UiGc#^Sdf .">A+&!A:gW_MVb-TC/S"c^r;9bbHc6TEtWemNqNn8bbDs9=pGi7A(SoT2d&=Fmo6W!o'haZL>GG`9+DebsocV9q_T_g03?Kl-&'WqE%>sn`4>cA?_@-`p64Z.rVFfH>t:)d#Pe7B>oK&Z1]D^A"A%2`kh#>_[&o/HXjhH2LOC$fW$I1(.?r@VIH2sgDa4o4XM(1VTm4^!Ra6`AD6T;U9d@$;tNR:]4k^ 5l359gf'b"AA&9)^.r!>hA?)]]bW5F>U&Si$ss'7_3,6[`]Acck9AC8Ca\OL3bb@fdQ=VS(G=ieci2BBb_jFq)9L8>o0J)DAL4<]VG'E\a!lI1aEaG-$(3`N 2VrDrQm+!>t_O-X`Ze2 $mI9WQ2T+U;\1P;aAgAHl<i"40L_(Tn.U/97^@]3!EP9.!cG-'J57-Ri?Ms0')8A-_4WV18KJdI%.6W%nf!0I' #\A\$lKr Xsn#T.rE)1"1MW1pT2QcY.Yj 5YW_?41a`<H&MZc!9V.LW)J-8R)D:i#B:;dHF,O9#H_m"U?3D(Ml"BY$M1;FK(#k!8bk3[PBiB?Fq;4Ka]FkWHOOL*kcE\-<on1HJ'1<>^Jn.;-@TX2C.T2J\R9A*--2kjc_X&sk8r;^'$AoDON5=bbk6?1,0rpa#+R&?pGAl8f@_])G@lF#N=_ 7cLY[8JYO'k%d[XS$ LdF@lE4Sk%A_AZbP^7lZb62k3p0?XR$q74j`Pgc\Bkdt!3CODA5Et&8@!:A&Z7B4d3q2'ZH.EhAGt;rWYYo_e">WQ#417A<P=Q.$/)#Z;W.->Tk%'Ro,pS5;`5'&E'<PHZ$\62j3+>\XK>2o&B??IHl,4Uf>95a)qR5F9;Z _AS e-cWA!;?Qc,UiT?iHdY)5rQgD*:"-:* /l./Q%&^b)_`h@37IGR*P)V0Wj0<r[]AKN-d>.RIS+t _UMJW)d^S[snLfAocTL(GIB6ts4tr3%Sr6^S&P8:npYaAlVC>n*D``6F)`7-pdG+L:e_e7MnrfSJntJ GQISBRm996%%X?c<l?G1T'A@'Ih!1hp9U*[Rm,Y<@l.:'.i#`S0,nW6hKGn'\):IB#Na@$,5JA,VP5DnErnN_\>tKSm0);?L&`D0E`[;\Rc-9c\ihT/;*9\j'(V"-T@#>lgS$0]^Aj04nKC$<9/D(2K\od'XbO(J5F9PR6=BGFp#3l[p"n30&NEQUe(OkEiJ8%)BAPiK]L"6eM7Q)VO)FE\bYk;*,R %,K$,^#O2eX7#dEKk>]5(Z_p\B_Al`f1HMokqnGU)L8&AWEbl"?495+.`>IO;o0$d32g=b%W=n[>9Bn$iaB=Lr[gP3ZU6;htm_J^EpNtWp;;9cXO56Rro-'<r!L?`2D:K(/b9_eS&f$AAB0t  P bh0 %^X:fA#W=$$qOZ$W_O_(DVXBMrUPA^_iidVn"5CH%9`bmGMC+"C>13)&C?tq_CX[g'QtQ8fHeQiarjm3ha^q>L@Fd`A'Vc$i!FiSXKDh=*Z4(d6,`.+K>Kq`Np%`.4TZ*d-BAI#/q[Ns+(g(]0s:X,A08)o7ALtHHCOOL&@NqhmikX 8,>=;]!MCea7RC'0sHWnW'L,7E7'+o$A%t2Ff$$a(pZoc5_76#cs76&,b$n,XO;@M"5VRUld*"q7D+s6.kBY]VbTIQAB3PePkO9gIKTSVnZB&0m,AA^?_QRkk*\;:;UiS'6`N:;2.H85:h2ptSR`V24@ H g&F)&@T]A!Z3EdA\L$rnd &D9TN"hCrMls<r^MSD"7SKAN0D#NPe1n<<;e/%fhZHOrV"gLe$1j8Cckl=Ei0\#tpYJ?*pIXc7W,j7r-25<+K`7qh$,Q?DGdpNA7A+Ph<NfHb+jC)UIUt4GMo2rJU@BF5F4RBK>UiJ'Y9Lnad?8r^;^;t6Did,l4\5%jH[;fPN(0.ReEQfRcidVVY^lX]F:71LYqL;A`m$M*t<g]c`hKg UrJt>5(]c5d'A8%"7%!r'mtFDUj.+g&S].*+?7^DXAL[s+/;f-'#&sk&Z*"b\pcchVBdi*7k-Xf\+"-#A-AlbL,NfbcUf6">]^*PmcEOGU4SKjp,1&(]?/.rqk%Jr:UQl3H!X iHpMOr4)0ZB"-0jMqA!>21F/h@%_X$&)Ze_R?_AE$YP6o`,\6&L@tnB#Ja$n,YOF1;e2AZ6;>r9+&IWK5N<3Y'5 GrJcp)ZK>r`TXr%8QWs_m80GnWE[L%geqW7P_6XIq!-'(imGMnKg,ZOUf='#d0[b#&L<TB]iRhVkR>F%'kK@`;/edX)=pGqsjOnGG:467Vl&r4] SJ<NVU8GJ9IL[)HAh6a=H:C>%sI%/VQX#K363QfJ,*YGK(1hHf@G4'lTTTL9>>[T%'I3>rAG-:r=GnFE41HgIo=&PT[^BWH]M-!;W=)Ad!.MUs@p#[:>lmd9&^8 ,T_iP9"Tj1ItmHC;-Ib'[r,l?RRV2$V-j[#;h:HK*P8?l+h,$#=t$D,%-Xo)t2#)RZr:>T[i(jlBc)E+>iPIP+6h<Xe\\5FDAX`8@]aGHjX[Uq2k`tb<DZQl3Za%Q39bpPI'RT?O[M(/UAdRsOEqF6A=6#&_G?aeNHYItHQ"Sat2R+jpA`,RB#H9'WVA(45m#,FAR9&*7TK-I$+UgGmA+R9 PC%^hcZpYmO4f%aQe^"a)DC 3@cb"p,>k74K<ht$pKfm #fBm\3J6"!RUb<=g)Ob9@YV c7;ZZ$3=eDM&`HY,"J'7Q=`AhG6m97g(n=P2#e>WWML4<r38Jt-n@Y+)@h%'XYElcDEFe.dBU^J2/*[$3'I7>*3Lq/<L-5;cdQ22>#6[bn&_GsnLPMI-+cl>[mNdb"nl-$W']\'KaLPNA ml]t^WNOt\W3aS\7]"D5./8lS'5g+Q+<?pNBpNe&M-^g/fL<r!@@43)&t_PR52XG,qil%WH=^&Dqa/^p#Xq;aiNgpGH@@k%'VIOgF=V(fbAWg67@ i+)6W^*;nB!sfa@,"e()fk=""j0&Mp5<`KUNmF*bmWZMT"QY(^3'1L`g&c+,-1JUL#^_4:S8a1c^h#_8!K@hUa>hlZ&mm;OK;f"@%s]qkT1n$T6AHtX0FM^en7eIdsKZ<60;(:X<hIGAP4WN<APS"*D5E`]0n[/nB^)AJPf>88gEg1W@9f&[Y#j!;R_\7K">Y"35C0IXdi0O/$/jm(!l+nmON8=2T8fe&Tpf@YYJi;BrB9Oq&5gDl'j@9F4>/"5J0/Wg;ApWb::fBpp[B'MCUCsKnn#7Uhtp7BqAtS!9f(@Tc\>pYD& Cl&ZY*:5mWKZ3jDVXPZ2( U5KbP]\)#qAK-^NjVd.b#'qk>#F.Gt?ZK@rVt9FgG3mQ;iHIe&m57(h5G9i/8n`:G&HpfXLD:\g:RXg`P)X\k\V-`Bp8Q/[UP0<?RBLNT5Y(!"2fq<(-/q?>OBCCMHY$i"X61b>q.Y^-7K8ENOXTF%A92nmd>72e FEo(JD?ZNYM80>_iD+1.R^H;pK=AAqLFXnDD>'@oV5,tZ6rpcpAh_F-$A(ljbg-bS>N#i5 j%dT6I_[?_$`a-*)tAjP Pf$e]ak1";1#G/\k)%+c[/3P7\[qQ:\cAE+4M>^;lACMfV$_d hcesHeX`,E7a$_?sIZho7V&49_)a<JT* s9EW8"P_\!"QD)_0\bjUYH!80P)l6:'isq!t4c(Fi6Q>>X9]mhD""gYjf[9K1caC:Va$SPl_KLHR1jo?Snk[R-l$`'aWrA@_Iio^_b!2ZsP^Vg_6Pr`8@>BdRtLp2_G\#iSoJiI^jAj3bm[=T"<h?:%J)4^:NG3-SQ\'hOC/=s7mo=CGC[=V""af2R'=)]:f^0gf:e]"Za^>@Si FfG?o&r(?"gb>% =(jheYkR(BWC_H^Ki=1q-aZ=g*h:Ls! =EMTN[-n2 [MC9*d$s2tn'@0fr%KBG"6k784fZ Dg%OTX0PpR7Kb O5..q`kZ]CmirOg6$*Es#<5VXV,Sd1X*$*oo.&ak3N+g80Uje[H0$l+\1I]*lRi)6=h?T9FOHk+>)JT#a))fl Gd#/"!24O6G:6@PDQm/?4k_n7%[!Ei2Ap@VXmP_Agm9?=F:7WeW $NF\3C0%aI>G.ZNDQLQASrn(hjfH,.Z(kp@:<e0sjtMrSpU8WS<PS:Dng?D,8mWo._BL^XoIMU0UA"D-TYo=&pVOLi"tk2X?4r[GV6j*I"G?@4'&Xolk'<.ORsC79L^DjIj:!'C7q3'E`CQm>&jI^.?T2rLol<;8<ngkr4&1']A^?U1^<j,.oAsdf-9%3"P:%f:)koC-FMa/<se*FN+Y31l;(18;>o[P5tS#U9%6Z0UJ!dqs?(d7Lnm<'K%_)m6n^,IY;-j'OTCZS`A*j_^b-G'56kcZ'IGa^kNJF6#.=X4Y/VFIG[19(A`"`KjoTcQHfL9Q^0jkRNZK5I>'&YR#g"6.(A3nU?OIcnd+QL[&jN!<!g'`LOUoTWPO3/W^R^`#a]'En5)=N,?D;TmBitXdG0OG!JD=5;HkkOY>AVN];igf.:E*>tRBO:S[*mYjhPCJC)8Qg2$s+0AKeBd4(s1eZ3WD#_bVo'6m.)\&c-?V+f9=&A_j4L?9ZQ:-c'pW74U%r.m#DEH8f6F7H1BLDqa+eIq_cj&T]dr DLjep+oNt93$T10 GXAaL(Dsb(hT&(:FTX,OtVO\9I4#2%boc,0Gh5p(f]'bU$L1[0ZBkF#?ogBjUm2oQH]pTinSN":cD_FaEkA7_M3\*:Q)^WUREFZde1APMp`McO8pe^:-^Y&H9QR,#+n_JO&0Np+9re11>)YM]fYAIkCACaA\3-7fKIFfY7Xp[s\oM)f&*+p)pFXkpjGUalhQsV6iCn2iOBU"?19Ns7.m-_kh<-qI8OlU3m<\r;tM=]]b,/.]'%Lm*Qg1%jGlE4AfWh5TR,:nCU3ob!)XMF@^2%s<JY"@\>Jdi,@/Am2Pqgsa,nZm;*'%^XK3e9R!96eVbt'2GkO+s,%\F1Vcg^W"s`NR8eM$?&'MA?]^Kj<@nS75,jA-V,`;<J;Lj#K>=V:=p*@L>_Th2>&7TI=,.m;XE3142Z#0$m$8&(!7EC%J<>+$t#-;-"1EVJAW,`AkjOtXb:;IaS#q)lDk6J_DZaJ-P-7?]MO1XrqU4ad)G.lD%aH4m[:`_Tg`5^=Dj_nNSfQ446ffRC<t@?6VaNR/1fk*@-$qb%t#j]aW]n)EG5XJ5:K*=M.R=)eAgsTL@OQ?m$#6t@e-eJ'_J'1BRLF=gWN6;3S*CHM_iBie]VGUeUGI!.0*DR-\-[9L\mWgN8ebjtP\$S0?_cR??[`HX',O=>c=9#Y.j%AtJ4l:+H2oK`*5;d9mAIl_J5LX$S99sY#TA-=nB*G^m3/#T^"sLOb4Lg6(nGa_'L '7$i;bdDq'j@j5ME_s1BPYWDn!j2Z+MtB8*Q_V#ofZ7Z$?K\)g2Nrm@-6S@C=^tSl)`T$bm2"p3jj8a^UlME$JD1#.XNRk/$,A^g-\nj5@>&!-mhJ*XYNKHps?' jG[*S"<piI0HQ]<)ob]%A$>lgP$H"CGQT m"hkHk@U)N?"dVp!I(]]0$0;kc:)Fr2DsMXGZLWUFT.s&U@fk4$L5F*j':N&X(DQc.P]p*0ai)'5=^6r4,)%Wi]`O9,L`mPj,pJH)_S*IoN%X\[X'A+RoetZ*q4b<^;2I85HAbh%l5kgAJ8)+,bA1nL!H:8&F';1!V(6D'O;KDE&PhM8oRQViRNV4%Vb@Ab]qpP:Q^i6iqW%/'m&kBK:dd=j58ITNR+CRgI(g1.69cq'B+'+0Y[5[ti=]$0Pe9=7H>s$oZ,6,^iNoNJtg8,SkCjpYOPlSoZ3oP^KF-s1ME@)n?U*KtaWCpCSAFK?C*CGmr4;hCMd3]bLI2%]m*4'aPg%K5He #TLa@#%"_!KItS"4:;9lZEoTOSI6['*(_I^@qFf0+Kc=f3E.`(0hiK&C!jq/-@HQrk&8T*Y4\S!%q"-1H(_:-\nR?O=jKMFfFin0LqO9#prAm#%HHt[4jXK.XFEH?G;""Q0O\5>;jWL;5@c6E;`61Ne?G'T+Xp"-n7pF=#N=@@ri<G^jf oU\UK05 3*>kG,/Ze-3hB==Zs@:OY\AVX=_Y.UotK-X8H;Hk'\U3+dg?Y.W$_Cb=KUbm=j(AYc\CZ%82=JWY53$D2p_qITtj/.1M+:SGC/b'RJF67h7X=gEREg2[Z[9LRM$fRZ2RL4"r!`AYn+jVpPT kPZBHSJUq?AL!fBo`\ ToH1XVtBL0(r50hV['=UCt7Fr%@&e2K^5md;r30nn^'BL?Ra^ZWf&Bo#Xa@_FFUics@m[;tt)P!43p`hR\@O:eYqdAjmY-K<D6ZP'U,:A&jRGH/a7-LA(_J$-Bbnp%Qd#WEtd1HEq@6=lLghDO-Q%CtCfn-O:W*L1T-QK^ptb':`S)`]l7o95VbO&^CR`teXGj\:PAG!p;&3$_a<:a]JCgK5qI]TE$ipgnNJPV'VY`i93SbUqEL.t$O``NT<+rBjN V$-G5Dq%mO^AtlXC5b;UpaOPgUEgp*r"QGce/3?8q>7';gQ6Fqp/rKqYn]BsMO.-UaA$)#=-b:di#ZnVaSG:)*]AF]]q3L/0n<!AD6bAM1NNR9J"WjH4-^lnbT]Jo>GU"]2E]MLh-gp4s'QhQ0;oq3WOtNCi:ZQBf,_,"3Q;T&] XFgE'rg*;?MD3`*=N-7$#%nj9sUC%XN%,h#CA71ZAk>q%mKB0mO2n.I]h-1I#+!Y)n,R:rdpBbZ[BHlAV!q2cs+@PSb""Yskj.A81 A.rqFgiB6gJpSc.V3N\Nb&jJYI<(+%QZa7dA<m2MS/'R0\Bn!8Zp^0j-hYAjA?+iLbi)*rl,.XN57Jr0FZ7j1*o,#PD":h9O=tD'REC!#CpBP&Yk]MWm`>TpLH'H:F^3FS;YdGY25_U PeoQbr<>J<+Sh(`$P=kcM"0nV\O<OZ>#%lEcGnrN_=X<pG_^5Rfk@p2UPaY%&Q4>n?XBT13k=qq).!E4BsZ2"pqrGU\;l1<Vlb',Si]XHN</7X'l=SfIUMG.RaBe,`$fsp@Ug+H,f<)3F`ge1sNk^*Z9,cAS!T1@RJes"64iq'p;T2+J<'Z59S.-6<,[q?gAL&kC`I*?:RmKgCOsIAW+M5)C8-POb8BA"-JYggeM[-QX5I<8F#!g2H:8LDTcR4b*K_!@e#!!SXX-@='t5BG7o,QJ26>r:re`W>b'.nrZL2I_&S3A4OnVW6V\E%14qSG]FQHi?&2);SqGFPZ)q%bn!O<A' [7m*@$Y@IpG-3^p6Z3lBq'<rUH%^CUir,Nt=`ikL*it]gsAn,Cgd_SBW\s?=l=%D!%^R]Y:]_FBf2\,Y?@Ssfb6FKNrT`=]lhPXN/D] 8kTXTJ0Y`WR!jSp!M"(9TK,$s%=e9QT=?rQ0:q-; 6K0#@pIIYSfO3P_&Q7-,OaT?EjZ 6i.5lY8Zt7-0?0%D'MD4\*XE9!2<6@qsp-E,VkmX<RUUnTG>s(*`5C^(*>0`8f5.D*9CFf8,C3mMpmdn=5E!X+9Kp8'HHc  [%Llr?t-,*RX59hS4_I7gl;;k;%qG4bKscD+RO#go605<^JCZM5pBcdA,oJd'75+Olr=aUe`l>[rL!36J" Boio2$`)Kl-=Mgge*+@Cssq1n:7KE(pZ,2$dY.AK[ D9qAKrjo; ZeU:Xd+q/-KAI&1Wa<VO*Xc:Y_QU:N?sXKCd]R*FW0\0L!O"k,?C9*r:$5f=oO'g]-o>UhsJA4i)R17&_1LViJ!EDpiILQn^sp*1"$s(^+^&]57\5A9G,rI_$&qFSKBff/)WBe!+6ka[k*YaKYG<0RLEA,t:_"g.Y5A5LdTWl"/_UisPLQ:O'Y8-_`.4&gHYIj!1#PfbL7M]g:=hT@M>6<i;+gpeZj&DM:n.3gP/,>%1-t,!2*nXtltI8#X#<)T.L857G*F7JjXq3:n$5q(Ad(<V=+VjV0A<<0thM?eA0a!=8rrO'D,tpXh+GbQJJR`Y&g(:"D%`V<_q%Y+s;R3d('E*]+d`T.ALoYAJ]U5nsXl#W[F+[\n>8>7J#H=4si+]U5\AAE2PP/h*!cEG?JM0:8_5aC1o1.cI,h'd8B%6BH:E<2"]rrfbOEZ+=q4t$@iU2hF1[AmRg@>t=r);Z 3+Hbd4PcHV*i@Nk_EMD3?Ct0P?d<#A:5W_!F+MG"&kK]4j]-Xd Q31oql;?O7Qa9t@bNSe6A%)hRO12E@VNcsSf407#@?#92KeEU2=b<SY4L[?2mL5*80pj.=3P)9b5/Ri0[:`lmE_LkT5]gLKa`\V> oes)XZnaDg[eh)3PG(*G>DB/:^GsR^-)WK<&BA1K*2i8gd6L"nn2 ,8Z4O5[B22Q0p"r=T .]Mr6AeC[qY?5W)<t:nIU#1d[^3i)?U[nd[psX>d,72#0:\d]T4LM-2d1)>79[4qAVkE/Z^aSB%hq7@UL)iN!Q1=(-U+2WIBqO_RU\L$rBJ(q9oH53n9kBB5lr[D1*/fie8Q?LB\M^0dIih3Z*^J?)Z11k@,sa:p59]G""sB#2Q;LX@?Y%NBt?t8?p5FZWl_S(g[!U`oD,=A^_]3?8n'FeY(>,kqnsFAN)9nE>;YpIm+#\0lD/:mU0jDZ0+Q8BD+G'oAMtV%EC>FNl6Tn,714bS2_f`X!lJK0JikF#nqYP8DSi&d0+,%+CVq+V4pOd>Se5meYchh2-f4Uksj'3R[m43qh!'/.Z^3j]B$#CJZqjWb+XLUM&-a`^0)b>g2lFt3Ml%Z$g"D(i#H]4FbD E9G+BtjS?8[1ojO:";nU1s';M]F[8DTpiNP7iaPnr(/&1t,EW!f Ige-AD?U8EUET:3YB= &,E@JT;%7^rdl"-1A-@YLU%?sXNo?r8m,A[-H/2nS6#jEPYnV%?8s[ZEKI#;th?Gg`=[r!O=(PIh,TBX(g_14V[jS*oaF=I$]1sbg[.L^U^;gh(YWfco8K*Yo_1m:Y<9XcMf`cd>4B5_)2tP$jja#:\4!Q&4b7GY`NoO5)l:-NSdZ!odhsA<0YX:T`=U,f\%TD&t:"m`> M!V92(Df#!`WYAoBl/A#NLH"XGVXhdmrjc+[)dJ*dXK,\IGLEm)lgX-\MejV&!G=Y:j-ck52qU]f:?rkh:i7<OD;/I$4a:cAfF3^[WHA5&SP$$p',\i+GN.q@!-$`L#&XN\^sT-BrlDh:PdqS!e7`WBg9t# Y6rq:T@PT>?2;1Q-l21cr[\o+JQb/1L2)t5^A(K(9BMK8ZALi4=<=tND"j]nXAgtA(i`T%!"+;A)nFJFVQU)$Mi`Z:1]_AA[aVB@XD>;h9_6WE(l2Gt>I:nBrLcj(nb@GM"7=g=S T\M 6Sqi_o/_)cOr]O:QDt@qk`:jA4tO^+:j#MPV<1kV^;k:)$;)_?,mGqe*AY8#Ago.k. #OY5g:>?g`$qSg+7\>3k:ak*qmj; gdGUiig&5U,BYiB?$QXWArV4Kj*DB,%W\1*9ES^lg^=HB>8qa4%Pol\nnS8*!r`%4Nsqr15g[*Sp[--kpIAq&^N<OKL/OB'(rOcp.)'AAAgj*FZ^flc#UCg?%BjJ+>b91"\Tp_iT^&m='FeOUT&X*ni*rD\0P%LKUetBDK3A0^^EX08qjg+O2Wrc-@U"`M"3R291mG[$7G ;>pnbijT<Q]2lJnBc@@ra"WBtAUh"+Go 6U6,<Ns]kheJses]+A>)6F(,$e?88$sX>=(P;!]7e(ht0^MgQ``%7KjDdCmVbV1SLTeWg7PE0g[:e0Q+,4VPfHg+k78Hm^0UO:Ea$>AV\hB0c\#Y3NZiA$i(hPh>PN@s<X+e4SOTp\_o?B_7C9t=LAaM>_bKW!W_El%bNg^sA<)b*p66]'!R*Q9(K4'&YfSUHY>`J$0X.Q.m'j _%.LSMKF2 hcael/5(B=mNh&#@7pb'ZLR[41U60[S2L\Fn<pn]6AZrM]ip)-?T?A7B[dk`7?e.CI-9a$ejs%(&:bs,HV$@C?.=2)X39dM5i!`;h#@hn3AO/ pmFMOpIGA2t*@>@eRM_2X+hGYM^-7[G-^h.^4m_d=(m]5DYm[g 9)kF0/=]Dog=hA`@.A!0&'kK'<dg]69t`mqqT!q]K347>#l.2,T^qBde&[Wq2F.bl@[(-?d-,l>[][34(NmM8*/l,m/fjif[+b'7gn#A!8+H)tiOIfY-VqEbFZ_*d+(Gi3n^cWGP9"I$gpO)Tn\@m848(OA=mL'MLD+#aS^eo.mA0cDK@Ug5O`ig%s&b(t*&?T"O3RE;'g33#cEE.Q>B7Dl"&`\[DF;(RL8R9@%l'e4s$(;7Q3(Wo@Dg 1?GW>,B#9f(48d;E3nkY::p%B?E]4rUIIl/S>_Jm)dkbsPX]I1iedL@/lhDr+!)O_):A4=cK\S.U:*t%V<cAeeA1*5[q>?A3hk!"i-Cgl=KA6Xo:/DJ##(3"Nm(Dh+j>tg;ORJ.t'W;1=k.C5W1sm^T?g%slq,RG37+!"TtcTI$U3UA+B+=O8'F`?mh^5=beG\9,e^69I]@(&%moD-N!12*d]<.7Glqa!*ddYLqWLO5j#$QbW0oJP7XXJCRG,iXOM9&\=[HQ"L_I)^,EpN$[A@0&VdqbX`E[`UiCN%PZABVibPN)U&/l+h_E%h'%!t^k?NKmL9?1i^nB\OT?-_+,`*X4'OH8h%\#],cmg1Mr?M3elTGrA&po0n.XNh9Fk*>h$[(Ol cr-K S,)2mYCtm$HqMh,d(3$Q-Er+=6S\YWS'2tE(6">0]g3D.9*7t O!rO1C-g:JTq5Fa/>MoASLgTj9FA%b<sBjKpDQPe8HaMe4(J70%NpK+A&_>*%I\Cj1.jgkX?3n:,#WmA;@EUAEa&bJ$?=5(D.[*4F2ReSk8pq#oOWCW-k$1ks=<o'B"1DB_32:OW)fXK+hR-BpGCdDA5iXgdIa9s.Od6n-^L$A+J,\X4;E(h$sJYXmDel0FK_.ng-;-bKZlY)a-=iG>hN q& (PUA[]iq(elgSlXbZS"I%$iOcin8,_h<*!@@N\raX+JL 0Ln1GTjgIj)d,8ZU#A\<84AiW'@s%#FZ">/ #JaU0hgBgGB0!Qc_RR&rQQh*7 UR3Z9@k-(lDP)XnsEoXt"An-;LA:*4KrF*13/-n;Q&[\U&X'&e97jkU',IY7AoppX5nK<Z3k/^J)A_I`KVGU+X%&) ALWm11a.&;#V&7LYm?]h^"?$IblT7cE'ioPMr7APS+?iaY5:SnJdN?i$;hYAooWQT-oT>Z#s2JS_na@R6ZF2m6pE+%-"$6mt.t5\/:rcnO1B%KC@ r4sr`p0'RoCQ=lfNam6;^?dLp<SJ"di_"pHp\=@P!2`)Um;t(gRmFcqVUc[FA"*9)AbM,-!Z/7Ge@Ad8e<WP=aP#SS, BfpOcB-Sf1NH# #_,DkoqZk8?ADO34bJWaG*mdBVcPt@N@0nH&!!=3:2'["U_[cA4VYtFt6tLQs?l<ibB_:_fi[Us\%@^p<AU8iI<7<YbCKP^ghShis6,Ob;I ^>.'3SX;-pZJ?b,kdd;V 1#KB8WX4`Yd?ta$5CGBdo<U=Tjd;KI?tN-6hah Jj&_FdV'K]M$] r-]3t(++pSR&Pg$f8R<`Al6=RrCR#3K 59r07'5;";`;_c6&0HR [,RSAZ3%<1G1YdP@="),VT:Yeir44h=n!-Br-"a,C,+E@WQ$*%]/Tn`?ZX)=,3#$LNQboQA<!n6%H"YP\]mB5.;NX1pe'?`\@$^BNj<tQ/8-PpUdj_Ck"=&FO8EV0 ehmnK(Hk;n:fq2;G=_Yg(Xo8sq!*VSd5qfW!q6M7@nd$5ASh^2mm/[7rC/k4]5^>1" P+tR< ^o<i@X5f)\1:5JUnj+ CCbeK%jo%Ne2bDfVj;i(;*)7J$6g)BtY9<i!_PI@HYZ^ %[kkK[hbds?-tl%f_:_lgeXGnA5*JtQ&0t<'f>q-F=0-.J7Kj6)nmq9H>5(BWBc)jG%qIn*INl`TI/np<d'%:lf__o"[jOsqiWUa"1hK+"bA7`"/*+/6%?F$n,^rGfc?.&c\iFICkTtQeBgiR?_9g/G9:N$lKMdnpil^^p==U5)5?7BRiM0l?b78K"H]c%)GF^dR<4\tM"qfhWcKHcJHPH[i7a!>hUP<4<@19*b(^]?:KTZ+P?\/GE`[QMki%nfAV?A6l+(g(t$O`\-2pXCkcU 4p]AY;-+khk6PoZ>lUDbnGJm2``G<h@p$t+39jh;9h+?^4%B=VJ<Y3<</0$i9Fn,H>7^'I(_B0^ok/>K!iIe:rE"5UGl1c-;O@Z)ctM#Lft\65Yh<@c#js.!]J"c)jQn\GBaetQ[0WIGn$P(Y6;&:*QW38ffW4DA.Ah?;#-mJ)gi1YT@!fA?7`]Cq>[87HL)rgZ0hYj\<Nm$;h7A)M#PS?$Iq!oSjVCqpaS#WIi(J5=A^c*mX)/#S=%UI-LG!pl -(!pWW2#XiqffEASLdN,(Y*]FR36OZ[EIcY<q%S\?dC-]"I.,bf+rdB)RmB%9I7q8>/q+_t$%+.E7=XB`.PGg7hs7NiZm2`p)PM'D]*e"Y]@-a/7+)DAH[KJ%1?rGgE-G^B1EKj&'CE>-?`mdXs#"\b,P3LmFqOP^d7C:d,4OT2ZpAfBt*^]h"+Q/:8e.3AeW_9;4.fDW@]!!^!tIss`*le='i?_4.F=/L'N3<Kijr1"d' AO7(I#0NIjW]*O-P(R25<&`.7+qm^kf4tpUIJ[rIn F?k"]9mF`""k)%Nn2-+#Xb**5@om2 cV$KVc((.,&N(Sq081q$cI]"U23aRXeOOa7/KcCs3;IXj)1jYF?AG1\YEL=Det;,8Hi\AmQn`c1O&acs=]*0'DMfdA&s4DUl&qt$a6G*/B?'-_]%Jc%!gW-.@be4R?k[M\ji] 8$srTOm'U!jP;W<$pXL81bm5Vfme26?m3as_4?M/l6>(B[d$aQkA%4%+SF4Jr!AHRTWkLNlr+)_H',e:6sEWnF1YqAiDGt?(5Kgo?&aP/t"ZkJgf6CJ)&ER&^44_dnjZ"> @`1L<3f,*t5Us(Pb>bKLBA[*n:H5MA ;V='WQ@[iBiPFaWcj"C\=/RnlUoJ-HCTgEt>F,l.*f92ArUma2 3m5oT4%N^BS$=$_KW/pP;bL6E1lDUPM6$F8OOcI7I?.d>*E35",<tIg/JU%C/T!sWa4:;ohiZ0,mXUjpC9@b?TJpJO>9\>4R4>^8i=/H@%_*iXi!@Rf\akh(dl+#rsRJG]H)B5"+QEC[1ci,I8f`(QY+>Ws..DmH+/BDV@9PTTA5!?nKhVgAF?['JprQQEC4^@16rgRqELddHHoSl $:X3HCrr Sgi!AOG#TNg&*?r^o*Ms@< Ooi]j28T)^C=II-tdag0=:oQn&>)E-d;+mVrWM*mH1d%m@Jk<*'F$I@KJ>.:25Yh73gOQ!2?<AM\d+TY?PA26qtE$5<8lpqreQIpk1R2EAq=?+?P@?2qkfTPq^J[cJ+S],:AU*kW6A.IY?s8sQjTo2^_7m7llAi&:rrl+M,bKVM#J%RDlo;I^MZ^Ab[HS&)'JTm]U71fW=;!e<W%9P=#/#_]NRY@C)&W%s^4t,*rFtop.4\I&$/NK96VXVJ?aFY'A$!<$f>:O?2a=/(G#)\X#_rg35abm8rBj8K&OF7$QKH0YCp[D*ROI8F^$\"e[:rFN2J;W0a"-Zqa?;(Z1CU5;OdaHIULE$`^O^m=A5t &Q'\"0U#i\(EL,j)oGOKmU^b>\Td=.mn=9\EcLH`N9&jWAcsOE2!HEkag7:LefbK;MkAA_!<e:7:N!-T?ORM)mY!*^$!)t'(_,Hr$AIgU0@3EJ-3%hrNtq]Wf`p544k*WPMq#XB[N7] V)4ej[Bip(X(gJ?=`ArpfY&OK<?(6?UlD3jR ;,-M ?a*#>h?iAWBN^Di_6R[>0n@[ FHg<_A_-&l:HNP!A_j $<r<JL[H9]&IC>"]s`Q'*7dRl'd%F\B<N*3c%8>&P.%Q?L>q)\]B+ce )qT\KRgNNHQ#o!-e"X/S=(I2bDE3)FV&4:fKY:?&9;nS!RALA>q^\FBY'-4[>lQ,?]#3/8F3.8;J/FG>$0,mJ3dJ&hi$,n]iE5P?Il*"mr3FQ Po-=<)id_<r5!A@AgD3NF$+sHqgUoHj55Gfr2rGhVSA\Q9[P)2f]911O_5)*cYcY\KNM]V_YACfZ1$!tRADrAWc>X)e/ZF!)qCA:#0<M]`/ h33#GTP%#3X4l@jE\SX>Hlh9)R0L$P1O3s(O5XjCK +$QRLM&Lf#\D[tJhQ0g2%.Et.1EhW-I$@8PA7ElQ<D`F]FTQ^0lC^&)_qn$R2tCd_-pY#7B#0d-h?2mB*WV ?pD%.">FTX4 @Z+$[0ER'AX.T0A&m[J%F=lqa,sP]_2,KG6J;AN,EpcbjNpAM`HP,F2W%@,X(/9s7A1.haE4Z#FV(Ut ;DHrtNPr;p_V(@9<`YlTF2\`U9Q_82c#!(]T;r0&= kat71d";K$:+LnaWq))=ON$KV:rIda\(@Anf_Z/I:>&-k:i$jn-^O4!j#NfEsGh[#"Vq%5 qn3s3+)b(^.Xd07Qd$dL61bR(%V$D!s<dqo86T"f66 c'q^/lJ[1/"o:$pLILA*rA$G9R'#gY[OJB=(3\HJlA1hMdPS-[(h!9)dbDp/MOl,_:ebkl+V@NbsX\D%6-_dp3-Fn+k-,P+SG "kA-h3C;<Z\/ae2k1Rm1?g/$NDZ`Of/3(V].(.^j6V>Acrh"JG'FK&r,d%[thtZ1Q&MgV 5Aa2n6jV+Mp"?& "i8h+dmKbo@<'k.<mG16:-qL]OlOU7ICRT)""d$TBd*&4jfgFUV";gGKRK,dNB_GL-JneUoW`*#EpEVi5."M\2O_<OLAMV@9+C&VDX0Yh!*niX1@0Sfdf4;oIL<HYP#!#"?$mn4P>trdH3P('@$-kJJrI7^.*$"Xl'Z/C#eQ_7@8dEmsQ\L<(8lqhf8OlRSni=T$@'FgGqBD=Xo"m^O8bJANBdn2$78Kd)Tmn.Gm.]f'S4-XCO >b i@K#>bL!a gme-G(e%-kbhB*SGXF(&bkF4M5[B 2ksLC[]j=BIm_C^n5oMk7>mXr><^?YRqJ-88)nH.lYeBA(TlT.Xsf%bM&i[iAqhD2nk#JkbTS.Bm tnE:?h, HONo<Z.<h2K^2a)Q>E?+(+t'q29[5Gg7D*0"EgODHeaD'[O#SDbpNbO%h#Z6&7*0AW3*9%gZMk8C,_#4R*V5Z!5Aa``/PAcpnIc7)q3NakD44nF6.r\P(rB<cbE0XDEKF=G;ok@o@'+]4q +-[s%"?!t9p`A(/YO'["]$NZ9R6kOZZ0r>!`cQ#$>PE6rn,Q'jq2Z$.t@clQH/q)K@IjIV?Aa%+.qQ^o`6+`NT-Ac./4;LQ!"HJ&@)]4kS-dVEthjcMWCM[?Uj2l5`!(la4LYpAt120S[F,$+i]MY0q[j!bOcpABT<*A"Ge#%>>">l>J!E=#_#pd73b\J&81(+-=b0;ofqW#DAeENO26b=Bte<=Q:LbC/W4L'NOJi*]kC(<1A<.ne;_k2K'(KA!CN5IYW'_f>XmZ3J5E+YHf><U.Ie0TJ'A'9gpT\k79EXXKF8b0ZVM CHlQ%oK2^ZtaOQUMLc&hKAA8A3']1)ZTEOZ<o=W$t1q14YPAlP7>66pe(+W%MeTFtGO2C^kjd;c[td7:f"JErc^WWW9P<GpOJ+8?4&i,+SfH/_G4gqie$fM06`*M;nVL`BQOO=mGet9)$`o/]'eRnbqEgs+ee;b'JNQ1'rRh9F]?,`.?M]s7TbZ.-7X=gI^Jq[V;aDnAa+XZS%+X?sXr BK,.G8`7d<p28WBnU2:ph>8X"EGNb`\P!3@kc`]ThN AYGM_6jlqXqj8RaH`BpnJFO2G;^ _A$Q0Ol#6 m4hSdOQJMfg_"j>o5Z$7gnSf%i:A=s5P`M/6*>oZ72@;0\\O\NI!bo<H>0IG`/1l0FW/<@lnX6#\BVl'nb\s.l4/KP60orniFN!MnRO^2`l'KZHHne*0:lA(Ha<*Lq3,M'>&fo5';oqXZl.tbHU-`O(sJrsD\3KA^,LhM!:=YmKqDRCN^&^:L_#H2SUS+n0[(\SE?qGOYE\CV[7UEIk<i6-*F` :&3X4r!O5&F1#Lb(A ,iOq,bM;= KZm*4\8A+^g,KTNo7LM!.<5FW/IbH/+%m'bq;5@GB""IAA97Jt_VT/K;9EA^t4rt[;0^.88>jR)bPsA=$%P:0jo%B67\S>F'D*Y(onLJi''3pJpDW^$Pd4\e='d[lCfj5m"ncOQ;FIGS+"*#kB#*.@ tW\Z&!rZ/116]lhp23o,2?=qpc"]3#8>$/QXtslaYR2e^A%NGOS6?^h"1-9AS%H[/tUH9NF)0cKjLAl?WT85G:_\%28?)cM#J)@i0ajWF8h#f!%9d68&O2;LHS`G5&8hUra(Edco^\Af_fNcgaoXaLH)b Y9DI[]_8B`=F*8"H&*)hD;]5O<XF'lZRd:;TKHWGK<QEmb fq?t;J#0M32#G:5/%Eq1hD97-%5JN>!DI?]VPR+`VE/.L[QFc._d!kJD;5.;pG^`7\?O(tsh$nn62%X>5h2j B1*\can[?.t5>:OElXG]o]BE2ni.m4GkL,#AM5(=5h'o5P9n sZJ))QU/]T!3ngkVGW2:Y\\=A2A]VQ'$-g]taXA8jY6@8c"He4j4ai [b*5G^>bRXBCo!^f!8c54&.I R!a`b C(<=.`Lhssq:nHF4m8`5BU>Wf(Fq/AnO:BgHbAXd4S@TKWFscj3@(8DA fCt3Ap7Af=]JYGcIC$k[#,%/jE+X8 f7<$!kZpZ'sT53W@@oTiKOi*9Rsr1Y-JI;>BOIqY]S34A:*s-hhkQaL3UCsK7eGVcodIqGB<O*0@*(fgmF@A<)Z2fDN,t82a7aMX ^,o//oZd$Z&ZW9A;r^5`ZVRbd:#t[G@>`P6\(M$9>q$OYtD-_`,lF84SFOAWdEcsW8p1fn6\UUoi(3iXNt[M-7'ZDh*ij_rhO3dr_V*E/3[iGGl:G%9 DdO%+[L):QVF%E9oo6_+j_o=@hg9c;nGt;7pWCA5OSEb)beNGrl7Go*:Ab9/La (Vc,CHTp -ed\QeI-L%^$'Vp]YT"Ce[HsM3"7?T#e,8Gj". e06qYPms`_'mp:AIgealhh1Z7$o5sD[o+^XjmC5-kaX]kGZC,0Lo3S!dg56eJ$*G#U+k=. <3Y149a=1@ _Q8R!DPMU^/Z-WKoDM0@!E5@Ja+&)`I]-H$3-@*_tMdI7;A!Do'78BOZ#9RXOGj0Hhq[#^m4&75kF tr&mo6OD,#G (Db4"\e0hJfAj!I#aecb4UEfl>.a9tA'T&O5X]j]I=ia3HB0158N'.%h"6j6Ip/g,nke$",.('-%UQYjCQ'etA(]lGP!Cr0aFO1GBX\F2;Y<n_-XTO:(bsl$bs2]eT5QZ0k3:cQ0HF.0c.mG0-$L4B6[SQDGaO0n$ed$_9c_"7%E#qms2S3=%BP+AI3rA=Ugt!&%=BlB!Q8rl4(p.W;^ABkk.(+%3Bf!AF_P L1iF<&gFYhbO-0H@Kt$S8KQs(YLVFF]Dpe%ljBO'#oY%Lt<M(0m??4F99Ekc"s.tc=OtYXP"Y+0e381+5=h=e9QG,J(QV2?1`h/>n?j ?DK"]kVYLb%\1XZab0n:7\mt1sHdf.pUOS_)#B+lat?3L:.T."\JbMJ,EPN^2 6d+31P,TJ:\TMRa6)*]lT6iG=B/W9l1)MmQP+oBH<O"KFZeR,sK"gFfCD9&Ae_S>&&"Qcq4Ak+f$jphpbGUc\f[UZ@RXh7[G?O0eGJ AC=[ot6X5ls!ST!+^:`01dKna?8=_V0s#Lro4'bgM52UR1)!ACO4Vf>4'R:r=K-=6Y^j"RFjjL_R ij?@tpa(EQ%n5.B&bCY]];Qr4P;LHZSm2b)M?:0rb!f/C457tRmAq1Sg\=GtkG;saf9Ag[AS*qF:FYp7$rm;;ksPf3Re9egW>eLT_V4L(a<\LAGjmkD1<FX&5DEpmtU59b]1M32#J(:fCoWI&!AfUgghsnTb2m_.G.C68?*,PF#cd#^hO)nA@Wj>(so:)mCI\4QYXoXN9d_8R>+@& tGbj*f,FC>+F`'\Ep`J1Wcj*3N8#nm;c1=qIi<N<!(`%AsA)Y'-NAOhgnpKXB W-3d'RB?\/T?B8Oe :73r&Ac.h g!s8f!7H@)KAN'.@cs01Ac;r#,;l.%t6s@gW!&miGDTWS;OA3GF67]!7!4,;cpZN("8(`[GR*GQTOC(_)tr=j)Adh-W%\'4\LMU\e(W&5$4c'IM<- MUj\;1O=iHcA\9D:Xtd9^a#Ag:[m`deAaHbQ=QdLhAY;tK/J&LW Ld8W+GST@TGhh9c;0,&ks $TObV`+osdDbg_c<>9*#HMbtdk<A*<mEeoV=rb2Q#PtA,ANTN,=:#= Mo5M\;Eq:9lHi>J!q4Q+k?VIQ(NW&nU]DEA&8pMTVq@6Orq;NQ[h6q=)?AEoHTEc<DsXBMqqARD&9O!9EY<7qST[bGk]F@A0&-Z*KWTI4ip@cRr.SW/)A8/'3=K^ZU:EJ1q@.O+p_JCQ+7s\eYra8^CCFs5Ebd+Jas^Ga0H*S"X.:2@XT)8jHT2qP5Q>QN?DYD%F8N=7F75UsA=^DL(* Y:odI+^'3"c`OK.;G`c[8;?c2[9bUmA37!/EEfNIQB*A"1#<sQJ*[a0GBV=FH9VN'tD5?>Y"3_._Y/'Ug&C_`&5V'')6[t.4n\<b+X-a[rH8Ci^:E5a3b XkAjD]&HWWVS-8/.pNM/n+-AMj6=1P>+>[qn #=A66c3'"a?_'R\QjPd\-f?c 1ga%M`^3;c+4RUe+8J]Qc^c,Ea6qR:1N_@#i+BgPD^QC4$@[7'<8Xi8^F\gEH7_,mHW?ef.$QcN6:C/?-W:]`RDCKN`d8GV@@R*k=Eh Pj1@94h%n4D#m<QdD?a5a@ ,Ws#A8G\6AAY4.Asj(l/\<b<NaQ[%F-/%InEjS6E]gVdTLjBEAS:mb?H"$l1XTJ<(d1?L&64B[RAA`>A8Rt0O<(l7;R4(sg)EZH?!/(iS@WX`.fro5+$Bg07]NU^+S7&3Jik'IOK_ZfeGma;rK#$ZF-iPa5+d2LMWJR504_EW'/!&[&so,'GJ]NJLb2kEp'S87"_VCmX<USO8M4!6AQj>sKc8&pDg399c8Zcqo+(^9VV\-4QE'79ir/;2eI#HTb3i8nXX9po&?=i+75Cg$d8QYB"qAB0RUGUdZjq)EC)99>ednIT+rG2P5>/"l<$^#62@X.OP0<\l&t<i/aQAn2-K*bJ"JP1SB\44d,77@<FL85Nt_B8$\B#DQEni@^c_nX^jGZL]4W*WHcacrkEsZ3<0jWE9!JWMA:@4/B+sE_Mj O>K2fqSko[-G5+WXG/4i,lt3m7j*gp-Fqg,2;DQAA17Qs\*:UZgH@MD$7.Yg+jAnA,_E$[ZrA(SbGGRn$P[ Pn[$Ak]Kf1tm&e"7\3ZgYWh+Dp4r<`'ha;]^34 ^PTd8$Fd[_r#L*tL2S(O/TG34Te\7Kh%WSk`CocYt@2tI<ZU^"0 [J`;l!g;B9"jAtnG6gqjSh89#2Y^!DA8(lgp+EboS8!Q%)UHU)BW+!XniJb_gjI7jZnqj>&0YIVoa@#:@iq#5"gL79n$[Y7Esp?b-3Z+QjrVJ1i<LG]*UenM8`='sdi9B!)+^Zkbb,(R.+!H31&=XQ[/X[)oF2?GrLV\hl!`?Ack@P0fp=kL3o[g]__F-]<8Mp(2'f&YpA#9S3\ibKX"9W.,&RM@'k53k/I[M;?T1<^2E\K4tkf.:LE=*,0>B&m5p[nZO&j\7BHo@k(,;`>b@A]b)Y:*C*%g/ab=IG/^&W`7eG?"X>\"D':(p2`D>QANkSke_V6MeJ3X4&gB,fes+mGORj3NApg34f0\+L:%JPm\PA=;Gbh^D\;[RM9rV/!pB$qNAA9sno2"bCM\QRA.MZc4&Q`?^OVQ 9'DLP-S,'U3la.pjBK1\g5Z)i>)S33M& T52iAlVo0`A76jFEimi&aafI7>pOU8]_SroKVeW. ,lM]!(:Zf1=V2%@/"ZPF:XF8TXDtAC'&rc<GitAK!RO@EM33MoBTP3G%7U9Mab@8.ZF@1/9(&S,!8q1at=5*C2&57I?phjEH scF`K/(=2raBP`AK,!0%BlY1d0L$W)+mc+6,OtL?(t&qr)pLS9b]9ECFm!KZS(V*oJo:g8_X-rhI[p.A$OT$#D]\PC6G<44 +5<rdIKBI(.d_M8/`;3,m0.!V*,q+WqU8pBL,>%9T':;oLIa<&P(D$MF8KDsKMAGig]UmRaIbOn<YbsU=Di9XooN02J'B#YW/BU[J9%WD]X#8R"a#%Ffon0$9<a)QBid@:t0_Pd.EON7:i"'/%pEf#AKsrZa"F[De,F[#fSZacHt3BZj]))[jm%coClL*)lTOGrD)@>P?9.Yid]s6::3`e4[H*l0Eg)\At:J#o38<4!>Jb2/QQN<H2`Fgh65@1n<E5=?[e(P4"qIhU)C!JS$a(1qN9q2TYBkh(a2B[)15mFb)Y#dN^B[.^tZ(+XIBiCqOaf"k^hfCdt*[%O$HU+9*l+mG@a]TDBHWVZIb.HE%:KCUkgf;0,#WH+-E2ljPK85OPnd!<25`AK?5[p>E*^9An:I%-bm,)Ar'i?0,So/k-G)S_(M)sh></2S(%ra7cA!&,Y6_n-N:56nsW#Hq5V[9*BXWGM2qr3QR;W>TqJH:_2T9n^P\A>g)[Q:`L 9X4[k.#X424]_oHPg.faGtb,"!s8\ PqGBae+V9R42d9qQ$#GC?>fTUd8KG!VIAiV$MWr$aMAf>,e<2jfgkB=i@\;CD$Oro^2B=FN M^o+ n8@07?E!`ebB^@BWN1?IR];O_f(sPdLnd]6n;5"7?n>2I[N2L?69OH.>.iZq6&`;^iTaCK !?Ai@S.1%n(A2\$;/fJ!6P0[YL8XT0@:Q:i]C*9E[-S,t=Jqchq:UW[AKFq%A6I1R(F0,'DT3UZe5PbG*?FHaRoBd[MNja0sN@4@X2_T_Sd.^> 8Z]A(dD>(5K54HM5@h"CK:`F/4='/0+bgQKc >Vq]Ff.HK*Xm`Lg+`L7mk%1"32so5Nq2j6W8d%.QnNZP`t5eV, U3Jln$]5^5]^O4lT8@SgbpS"F)7'p?SYAjBSR Q`opb$BV`:"3bt>WM%_CT'(Ap3P>i_ C$rbf47bdK760,a3\&EdC_]d?:\Na$)_5XF;B:_ENnF&Ltd"XEF[&0JfAsID)k!?*21!rI&:Cp?/tkUh>:&:C6KAFqCRQe3h;nNtD$kAqj>4kJ#s@L'YCp-mRRZ[R^p^Z,BUq=6=`9+!)&5AoJEg*rWK]nnb@)'"/S)CsYO-<Aj-pAC+LR>X@5?dq.MF[PWEU5pt5F[]W[M#rZ\Isa%#;KeQ=4t[r%K)mh6b1o04L]N8jlEB-*F8gn5o$TcJ>T#%;1Q',oiXOQIi0Z_lm,Dh]&0e6+I3Qeb9O[YEX)VB*K\_1_t'N;nNc*$I_7'0l-_if0pYIRimXPt?ZGZIs<\>Vl4t]F!X4E&(rEEV<I'1!+9: S$Mg:/*p^'SN,W4_'-mm+F9b[pEa$Di1&jU/,n&I:H`\6"=!Fit"f+W^Z%s:b3-2D5,]aeD5%[a.=F50nZb^Rn$>o6Wp[Ei^=+r*q)(lWCFrUk 40p(TP#ATs//KMZLGDmD3G9nF]A>_E+*bfHq<nB]hO$KUAYS6%B-I:_]CN1PPN );>A.A_-X')92n<S]d^QJk4Wp7 0?YWF%382[T5>DI?cj^nTCXE+J]Jo\"$=Q`l-,.blk58Q3N<$X?RQdA5B6=B8pEDR6:KH,ppA=jJa+K@n6^bn$'$O/)Y>J?c$$I-d,#3j^]j80Wt41MWs\?WJE,;COD?4ms,oD.sIRQ9a\!i8t961pg^r4kF)6VXh%&Hl9b)q6K;`hc#H8^n2 )AkTmRhAMi3I"RWbVkaFcN=+%5FYH[*>XUn0/b8XsF!+f^5/0ZfClKWRFbo)M5tjDPecUD2m=''7eSmM/[mLm!r[f?JmZcrkg!Lh[1dUfN?dA@)F.J-9M)3$.Y20n8Zg`3!(UGd<GFWZ,OLk3V2A1'We#bKF&(.#]1H@&LS8c/[_#&paml4E7bisULq`>(MW(!#5ZTk2.M"2cSaNAOAUktY' nW2%P$'I>U\FeZ [N"$%gr1Uh=[6AGTHBICP3:\*l#_Y0k"Z+i)W#8i_:O a9$IBpl>:i(VYHQb=`r"6 _fW:gmJ[^6*`E9<gJ-2_6B#Zp_p.DK]LC%0K3col2g3_FWKZ9/R ge&+EMk </5p>21NAIAZO^#7'9Vb?oI&.\S7c%#^#opo9IXehk=\0$T0,G%e9#82k=g$S&G%6N#^S@rDA!+rrf1"bRe%AL (FOT^tDX"( `1?C:Ip4sIW(W%[>5Jh\UWaBn>X<mJt+@8EgG_lIbM+)O@NoiR)FB\fX=XC3_G FPq3:ECsU# 9on?r'd59J'^/>."Kn[=A.c7#a"C)<:kfN" 9JLLN_S@aRHtdj<[DVF"f*9?)do<>pD;1kP>]0Vi..`S`,5.>9fq^pL]A0`k3Vs6,]857=RYbq@!UHV7#9\,F6f[O8Rh5,X8b!*h[.&6o?BmZ#9/Z)=[Ff"EK@J`p.i7?7s%BA+n6$I\-VL0agqa7[42#t)+a<lkcQ,']@$RIUdcnnW%"l1oU3<4gNki&^=).I@U@3[aWLTn`'QA<_4$3VV\?9Za5kO_/U_>`/-F(Rs,.6$+6As/EQAETU`7_`*NQJaYlW38t<[FGQ,\S2*q!7mZ]E3a2[gBgS.p=<s2JStK"Aro<,?UZqPR$-1EOp_b.,kgN9:0qI$RfC)f]rJr]lPr`WZ,/IZYSCM,gTQIJHE8GX+[UUMLf_@FA=HEQ5W&>`(O20#3ZfCMTjW);J;1_>6$@ZdU &m[HUg>Q<tgQ_!)k_QE`[%.ZUB9]k>^^db"&rlbj?KDiifhg/ZZ_APqgXga]<I?-DH!WC33EhBEm:UqNh_a 9 3YZR`nlt3FRqXKC@5e-T8E)NmP/H)gSL%bdT^Ar6Wdlqbm,1;"d]hL]>(?jl%0\"OeGm?l*& ",ptA&]q;N[%KIrr&p_YC/BpJt>aANfsB" <=<80jfO3+pc`%iQ[G?YO6eP+]>d?nq"5,^/b+ZacXQa#(/!&"bCEbSqpD'3WPdblq$F-AJi5)2(B3>5`jF[9LDU@hh6=q_?L`R9H3&:TSj,hfhNA*J&%d2[eWol4`ZW^)Y0^!M8UM)Fcq<;610>oIQ6/7GKALd+o.:$:3kqk)Mot^Ne0654ciIqn%h+O@\$V9D+tt.B pB!1fVL;MUGE`s;(<\*ZK3'.?WPO;jH9K[/G&>P$8Z#-&E_i1Q(^(>e+;[-g0PD&.AIlFo?\RFs!T*Z?#W<d2h]oN@:2`R>7aB/.+4oOgQd@].*bW:=T.qJ3\j#%5^!N^a `[!tV*dojYTeUId.kI>%?8Q&5Gp#IL'^Tsct/5E\imT>p*oJL KO#5Dr>1.1mRS2eKJ_rZ2_cS+c>rs;kE3a<[K3[T*-%R^`@*W1e<i?XfV#qp'#7 <5n1(M8Fhh>L9&_&l#=3NW`+Y^]KVcg]ZqYG,l2Z,j.7,[02J^J]!oeoG-5:U23"5Od,da430\ZNXIHd.rJ*;jUt9d>(t*L^GPkX]ce Ya#NC7c#MGa)!7cN)j5LF%&X0?D@Jn%*/A"NAV$G#mI-+mj)#/^ecY`^!WKfRk[)Krjj1ARmUip]/P#qG4I9H!<]*fVBJ)k*dr3,gI*\'M_bb4C8`ip<-G_<?3qSr00rZ`%W`fA*7Nqg6m`". t] g2mdFdW]WAc9)4=#U/P>ngYq,YmqEj,$oYIi\#NF(>ZsEZ"JWrfdhTGDVP6tSH?`Z";]:gMo_QQYXA7I4Qg?Z#tE>?_t)*t](SE;r;J8cS3EP7.@3p'VJW<A)kt` t5"(Kq(5$9cM9(?7cZIbID&b=F%"\"=?E5&%5$i)AKY1&3IY3cQ@:\!:'BsHm)f`=W]7A(F+^glU afRa5_Q<KWr153#1Q[&B9g`J;O'7"?MJd-7]`)M/8$QBh\;e237ma/4!D9aN:FJL;SKAWAkDIt2IN!5'TFKVm?C_SA="(>gD(9.).N'LTH5eA(sfaM8)K'8QK2Y8*pNK9KN!%"6'@ho>PL,fHsD%eim[CThlG&SH>eTd-BUo]MC?D=gptat:;":Q(X;YG*5rU7Z<K*kAUA=/oY/qE%?ft`LdHnI-j// =(aqk\[K^ohBsC`$%LEs==T$M `F\]U[5WQVMA3oGd[p8l]AJ`m?-IoSR&JU\c\!&?,##nrQB:>HN&[9[!)eRGqnV#8A44nOnh(0Q#+G))c-nQl1Wm/]t4bfpO&A(p)H_8@!A_!rP+0#(8NNa\53QCFI<tk+AQG-.=cJ-#5RcTP>!Irc\#NmmKX;AX:haa'Xl)haG'N+(A@hWfd'QR(Xql,mX!#OA.-?k&Zl)HK5Lmf6&Kp!!d<Y\8W<][Q 2.K&b(7`4nmjl5ELj#PiZja&G-`!fXQ2q<VKb97D44;7i1bTe^JcPJ#;Yn[E>S[fh:rabYhCSl#c#B'F4ZZ-UZ\$5XJDf_X74K]nr>T+"jem!+*`s[Pq'$C$oc'/r=h,'6!O*=3.1m3.SPF4D]jS(X0#&@ZCo)[GG_?`oddGD 7s/7[bL2 fN#KQ:7i:B5(2q$;s=n#^1"c$,5G^6$`V!H?!I%/M.t5n;;?%rph(p0YP/[heg5E,:o3toP@=.OPOPUSnd%RH=7k[O&Pl6c&kYdi^%Yj?<BgZIA@34tffA;lIZ"1(r#Jo4`KG<nIp#A@^/9j)c7(?:@BOJNeI!&E=>"FEl-A>?0isP3KJ*=k$--I5Rr:EOJWo>1bA:n#($pV_7VoIs)GR2[p>C2C\p5A0X]o J?QjNBb$qIeSQL=@62^#QhfA*aYA%f'$XRl5l9gh_G,LRb7a7O1Zg_;r&A$tA\PK&B8sASCYObeOA=(FACKH3!`[DHfpf1\JQnEb_aOk@UbO%?'3-WfSV?b)DIcVD^I;eAU#9`ZH(rp!i.rf0tKF[aO9<,A8#or;^g50XfIG5;t:3<)i"gG9)7CQ5;7MLf>)nr9EY@kSD!Uis*c0/^Y[aIYBlKoX#sQTP4p2+]$ZA1;ZmF%b.aUTSi<"*+@A?G.\6d:EX2%=1PfD'oeF!UtZ^KJ"3JnSh]"s^8M4* ARL^S+oi.Z5-$j`6]%?H"gr;YNAr)-rNT!E6MgGkVN>8`9e#$ c#/`"FR02ME,e2)ber_C8^$#USH:EaV )pi;h=oT aW";R&&9B Q]P@K:!1(A&"G?]L4_c-j3lFVCCqRJo*<$3$!R56l<^*C;)1GBl.">LC2QnRi"AU.,%2o;E9>F.TW4rCOfo/.iH_\Fk'T9b3bR=T06("Y=C>A%7JNc(`.G":j!GCto[<hfC]9E!NOA@<GN1\fWZb[-o5W#!>aZnDR2LTk1C/,A$%Qna7/lFEjJ-dHFf6c3`2OQkYBi"9m"YZf+<l"qPc8%X0=#8ZIOkEN_EA-t\Z$)3bCh,A;eqX%i&0[iWm$n6Gpi&?]XPXZrMl14%H3+R"i#4Jea]3_dQ3 B1jsIVR(AR"r_r*T%l.b?%++-G4O5B%oGDg6 T/Vij GYeR,'#q3OgANWVsVdXRo'2Vqq!"(T3U>,Y%.&o*;H<*=r0- UT^ gp^c8J,5cgsUKleXTH^l?kDPC1 =*l,qpe@n>0enhaI!4A?OR6jb.ja:/ qrO(T;_9iTMME!F/]rOD0`%V.]BpcQfk]Po:)0kN;)d%.W>GCY/gjb,#Ce7'mCOW2FKb3Ea4ZJei%:a@^T`p>D9Q(;I?U;F7(Q!eV.$(O>l_q#olXJ!=B3m_N;0QB4^<nBB%N']-0si5Y[<U5:nKi8/BmThY,q/2!FE3hAA&_F^'101\sILD*ogY]CoKW]N*8j52&!F9'QEg@h[ d^\r*(rOm%+];!#)Y996'*PW3tq[A.:%=r705f?6a(Uj<prOQtaSNINUIK,7(;nXIp9c$g@(ID,N9THl6A[Sf!QXI6nF2A"ol3?5Z-X-8j/3gcm<mbPr3Q0Y&_R;j`XdNtL1Fj2l5mlcS6rf(IV=/dS?\\(fgoEfL&(9Y#*<F $K>pjrmRp-4.#=#)Xm382Yi&8jA[-a!(S2Mi6VZ+>Q lFe ^h6P)_N+0nsbb9Yk7R+FI(WZE8\nN-.eT;105'(^QCe<b_"eSJ^es-BDt@O!S"QR#o"NPm^&nM<: SpI.=ksN300qBAUQCV!Yqbcs8AJ%C\(dN\YEI8,h0OX` ,#:T#flVIA';T.I+cCrOb7IN8;LHLkH`)ri:t7WR>_E$J4e\/8ThAI:CU`R'&>HPL/pt(n"Qd\A.5G3r_>th%lS_^sk6H].jfmTNpsJ=",8$"TStbUY#&8Gc-q,Xh<gse$deq<a]F6FMYG2g6W`_oD9-%Ne;0VnpJaM\f-/obn`34jB1&Rd5sbl`6<:D[.A0[DStLA=[F9.;+ 7,48i!56#br/X'eOE3^)cJp$0#qWI36pjUq\Rm+Lq,P_H*[4Fg7lb_(8VYlaIn!Sf$/VBg(WFALPf=#,PpMT3< i2af/k]$=%[/W1#(dh+/Oj'PQjAU!4H*:KO1_Z\?;QL;'N4 "k-Ee>4]h.p4YZZ@>hm8^_tZ[@AK_h$Xn>47//PMT@Y!EO=m49=?CO_;(2+nS`3AgXf&][c_"$N&BUpnj+SFhTQ1a<dhW Y_NaA+CH ZWoE%e-%'3Vl!rZ"#_g>CXsZt&WFpNl66A&2V-Qt(&&""=IfljF@"'&]Peg$ik5L%TKo)_^7m=H37bb2(pOX-APf_IlRmiAE#c8*Z4MI.bJ1:``C@pSH):W8Cdo<o\co+>SE#mbo9G5c5"b!VC9IZ\6GqgGtAAlS#S#U6"kE@+$`P3YC$n>J52S5t5P\*Vt=Woj.->>WOg`d8:iidhR\F`Th[l?;m<,OAag'DY_DmXi-asq^VA6RW=lpHj*"3;J_dN:(AZ>CB<a*WrK"bUc$=EE@^/7k+o+L+.'jffPTD4B5e"^+Nb#tr:\Uj?a4(S&?O=l/lK$`%Hq`7cNh*7Pf;g]5.78]P_MN`PfVk'KWUL%HSgp@(Z^@#;nlJMXpSD56I^c0'V!+Mdi,pmK]L'D[orM<s,bTB.aEia>gWC&2tqrf]/*jl%:Y&MtK@?>\Saf.>:cmp(-\n9K>#5DCA$!_+k4kJN'Th]3Qq!dU2Wd!=QlT&n@^eBd#$`/rgc1`IYFjP1KAh7!RG2-%I\t"ECc`n6t5`sCgjHk/rMA5])U\7hMGh<(UAc0RP1M*VaqH9A;2XY.s^s8-KeAH#ipk+5@U*DF4`#+h"*,>3`6[cWB?cGR-TehldI1#E^88TCKd\m7&IQ=?!Tb;DM_$RI/&8;Sc%m: V4o;3[48$>5d0R\Wem;+US0E)+`G<2=]Zb*L$`W0[^dN^N1jQAWEcA$)L1sM 4YIaGB>9<s6n'rUAE=kM?OEof]"ng9Dt7m-b9LbR;Wj*a=^-Z]Z2OB\Uq)qS0%AUeJ,(?aK6i1$"RdFh61VO5*ZX/0,Js(jo'&ITRgP7nl/6>,e+4m4C1_[g5[s-S_5#L2f-ZkCFE@J"l?s1(&*":A-X'Ki@B*Z?AFdY5jlk5OaT,I^aTQd^V]iH2P9GY":ph=P4oLi-)3P&[LS3"cA.85a Vfbl5VA`#[(UDKh[fn8+MkV&B?]`6D3o#JZLa9Sfh0kp%Z6>$MMN.!ARk$Q3og AXd"CNCM)&)669T76A'*lB&A".N#Y0gsJ A3sc #/cL"&a[dC;S9$.C8R\[Tg`Rk)nmt @AD6>sk5+6-8V=]F"Y5`Z-^L>(2Q0_ pcig6GKqho0/5dZG>6.U__ S6,!L) sp8eYZ;<Dd#SYY\mmYt1BHO7-B=q>F`K7GK-]ViPKqkIrhM.AT%3H%eH 'mqa<lgTX Xii$IC"#_@t*KGro(4j;9"V.`tPX%3"#gt/73ta]@>4bh>\.3P)Cis(/;W<%LVXeQlQ5lO'31IB_&Ue0`<AdoC&s<bhdA4$^;-4WD8.=RCBq%OQ%p[d64UT^^9`9.ZYg Ak.\8oR6Sba!rLVn3'C@/AHHE90#GsphJ9Prh$8.%21jTjhN&l\#8G[!.3C4 ]d4J^PTVJMm4bpl*3PCkskAktVO1(8&!h>#KA;A*1oGdKf268<lLEFP"9K<!CA\qo 50!FH[;MPjtiG^N>licsVtcpU,5<#jnr*O`Yl_fTA<gX+?SqA\1<^"&9mb;q?$fn/tS#@<9nPp#b3;=]OGb=Set9!&A/,e1?C7&XNr4Od,OAV_;mRNEAgcjr(00X]p*n19:@inqWO3Z:3Oa#_+l0I@`J$br@?6E^]m'!$Fm^BF=CDlnc>aMrlPDU ;d JH4g1]Hqan Y*"qWsiA[0&h+6EY .D]4&<d$ t5-T:-@/Y6!A]7;@_#k-%]A`VO`h1gDR A!i>e#eCRCSsC8Yg?gRP9rhdg:@g.H!X2Ab RNKBsQ=>mde.t3)7&m#DbLG]qkm1Gr_tAc["br-7-,3#r];%W$ 3m>>K%JgT;,!@t`IH?]R9]CEq*k)At7Q9<@nKbX[8m!:OB)BWP#<_AAO89sON[g=LMBk3](F]A8\RYt[VmMs#,iQ[)eC6JA8r""")gW?Jq#5!AJf7nAa)R,3e2Q?cA`nFp"M^)kO[))qS6lq#%<8%%,N7A`#Lk= >kAaE``=S,r"EQN\3JMMR)bF/8!Wf.O%A'MqIg>EC$s`6%: >3]pHd[`hZ ,,j#dH1>OWPkL6'^HNb).YhY16B5&s:!k_,"&%A:!ZRpNfHVTO3q<\QNAt](F%K,B03a,MYh`k<ba]7R.,pOCE-_j.Y].?Sg%L#m_q3*(AC\rU$!1:CoYgAcF96VFGlK$:Vpn^mC0'Fj\/'HY6-@,8Z[c=p$dsqd6l2\Rj;3cbM$5][jhgrTQ,e90d_)l"EA[`5r1P#AXjeanD\%\&he1e!J&iii?Eh8'FoZ6[djVAi&P+c_4RZN"'SoH6&kb>^jreYh&?NCqF_H8\99 tWpjJJZQVqS?p.!-:NrIC7*[7g`dqa.M[W6i<c,VaV)Q 8'[>#8`I80ie6GGY+FWfAp6Oep^\=#=B#.rA8:X/=d` rP)t43i,s^bKkPBS4G?_Z]Vhb=HA)+7E0M ^V,&^`c'M&kS(r6b#ft(F*@rYg5Zb%4.gih[+,r '_V)d07/8(d7Algo)GEhW>.g#jhJ\`C\Z,Hn7k(41`o:. `9 pHBRGA0q/%<_+43<Q*U?Dj$:tMX)Y3SJ-U^\X- R[^_0H2hIR/T;-%@2PtM]c\??A1D^F&#`H'&*00MjMX>_2)lB9gjFH+OLjVa&:GrqR"$_'m0/GYKA`FPio<Z-#+1qf1/2YLSUW.:L<+ti7k^j(tVY131(Yf !Vm.&^rl"=OR-`hA-jlogkW]M.k?U<d_I@IXeR[c>D-2KlH`fk"Zh,@sOG30,e]#k\4a)47S:%)lhNd2-Olmb,r)hWDW^pq6t3](2('jj8V0oDRe)-r/[mfAHol)'>"R/5U"5;5lW>1RDc5TB7`gR<(%jmgVCl+.dLpmsb-t  I^&@.lA(99NA=eL'j[ZS5iQ7(27&-JjCTlHt&<4r;OorA+Qr/1-3=jc'g.*$`Ap>3)O'_M"h6Q2?ghgsPrJXRbA8T._&_)04!0k`ls3/]AH)Iq>s&aGK7fU55GEQLD$J9'W3'.`i2=]F7q)8e<UP`RDGKG>tEl!B+M5_Yo1bN-B6$79"0KM]CsnY6;cqeiHNdO.9J"BI[;MCo>R!rDX7T;*9[_ \s&=*p7e7>4L'L>knbW(n9K 45(T_<mYo`7FDa&cSJ.:\o<&%^t@OtRqnY^lKUEE\_]BSZQL_4;Gb^#C>G[$_4k3Y^8Yof)550FJ9,^6 #AXiQ/tekn]OA3Q_IG\;nbP^-@!EQP6n!h]`-AV*6Z8kUQ!<fBR-T"csCmN,b;8O/mpeIN-l.AaQ[Tc9X^.i;$If4_FV.) ``9,lNI\Mj omK.YB#j D2SjfaaW`?TKi+CY0dOpn^##5oW3O=YN')QI-cBXb-5EO+Y.^,;9nr2n+;or lepUg0'-Io+N*$1^[saO$<<n]Yo13^k`j+/UZ_gorfHm!tAmWiO(<]OcSQ1.$:F`bF&A8;4@MFKJe>g&dL"\@C&EP cN/>O<ElY c`Uk Zj4G]5b?+KZZr*m* Zl.pd= MS3JH!rOC%E(JG1DS$U&'YWbGn!^CL$SAd51KF(kL'ojFXsM%Ck=AGIA5J5>B#A<J7=\Oob*n38>dfA["=OW]s::[WLCfiRqa(9;ldB[REY^Ep#//28eMZ4<W%Df*q*D5AM*-*Bc"9?fN67E<P"en76$U,,!fhW;<-;?UV"$Z(H?1m-n]Q\?dB61'\+lKA]TCZ9H:5pQ-6=Q$G4729/#!n[s<!oUB@0Tcra$e.Db@dAFh2Sg$3'Zlbmr_oC"o?]>Q<L6[m#o[[HDXS>T%'6ZfDBlE[KP.^V3ljM3A%=?,>+/-P83Mik2hsGoB.a 4dne AeO7i-mqXo g"LQb)4!G_ [e">4Rg[Yi('P*Km3K\_=1(4IinfR:<Y6adAG eir?6UOAd'Be_bM6""?H;QAN]Y6leN`6TX(I3f$::F/ra@fAjpA63Yc %2\3rJS8b;qsC@]c)UT'd!A:Ak!'\i*P1 25WDDb'D)b^:so`E4l=?8F"lpb2nAp+\eDT21''$m\&l#h\g+%"<dO'#.aVE>R0lr8lfk=94PLDH.IoSBf,;]#nK"3kb?Z6RY\s],LZi_"Sd@NAI_$GHNBECT-grSAOcL@WX1,'FDeT2XL8OK;d-,L)lP,3\ngKVG[Riea`tK2rist,D&OfRZFD#p9]8CqpYIPq*n)pUEMW- FEpiW;_(< <6To3Jb?RdV(@1j?r.$#2^>U)#AT]+t?=FcB/aO1h',8peaN!_qE'CM;aLTPF(<!_[8;'07l_`OI1?g,^]fb=;<k6KBS0f(.6e)<l3aR>pF"b'`kk[*G@%bJNKU<a]Ij([HC'lI<h\JcMPARB* JC>)=]#?FpeV#28^Ffb,1+mY6<8J4+=$M'!7 =_"1PP12";)Zt*MT!V#B7coB0lRHT]rLXEY+3t"-oq%`gV)1IEs7F?Y,67h%8@6,[H"ihY9+5:sHTNc/qo-esI"_nrmHUZ7Y$!Gi*k[.TFrKaagJ#.ZYc`IZDb@S;$R`T([%\otr(7'-!-8_P/?#1qX4e<^==3L3codji\,sAA\)"NR^]KX38CDA_ZRk9g/*Rgd'dIU'T:UREr*9C]bR^"e)&Y6nC(fr]"=0?QX:6ldPb8)\o\Hm7b`:k_PNBYO(itiV"o./S.s\Q0g<I;qQ>(!@%qKX:32LDk^S(t:MBCXA<j Pn'4tpQbgBeOX(V5?'WgC,7#Thr-n K1VU<VKjE,23!,G2tG0'?.c"#],,K'-9AgQ>6NN1<q%ViZMW\OAk;aF.egS6Y+(g1at!Z[?N.IlpD6"BG.!J'7?;l3>AsNU7_-)@(He/,ltg[<=IUe6,KP$1APhQDl*O&lpe@2At-@SYUb0%WYt6-,Hg!)>DV3BDO:9I()T/t. dja7JL;Zl3@ )K7@*esQl?.:8Mq"YW@DisKkf@T:jA+<\AaFYJm>1d%;m:('"A0aP`C?>D7 eN>JIfTEIAS1Q@B4kRn<Bc9h?'+#jYqrEWN+n[.?f0%cVj<WmFD?\!EC#[;6$  $h)H0WGIS=/j8r,LP1tY<2ns' GtS=GOG)8mBnXFbFbVn>\/`4JId3a&'^,\n0XGE*Zonfp$1?rO3LT3Xol:O`46kib+<\F,ilohHRS1?<X")J'<IhYe'ZH:\e;C]m`Mpn65`r8Xs=tj@;<b9a$c!0c]<^_MQfZi''P,M=50C:Ac*#s=5p3mga^t=nK':YT=rAacMX50lD2S*56F;[: _agqTtBLO?&D AH0/]S4O%E,jjpMKkObY.82n:PqAD[9+1UUkmA\;HZ>ElmM(#eNE(!'Ip;L3O"t'lr[VhZQkcDO\5*,0d]1PD/( <I#:X8K:;9^m_[jU_EBFF$;<lCN@PCl*S\Kll06=FajDN0PgBT6MDqLoHQ_SC^:A(VAK6;*EQ)5^8oHk\A!0%K3SmDf_##tRS^c"t.3J-nV.&]lcC/=Icq,7&/p52G]idg2/s@[O9##!X'F^dmJ^Y@DF5>mf bcoCVi2chgQ^I%J1Qa3I+[#k&b;Rd?PUo$Nj_:*[`pPNtob%3Xt!Vm],\`G<V`]QiS<!l_a%AQ?"5D9UaVZ><"[AAYl0n$k>dQ[o-SNbQ<qTEND[<a%4O[?J/G_!^0[VPXW 5Xt.QDD/UG=NOsr?`q[?4.)QT9K'J^ipt=ek!g-9>'ZU,<-T"<B])n:GE4I$!4rRGAm8V<.XQUsiHK $aYTl0@1W@5<a% !pa> Y8Zp15qL@=Ao[NY2M;rAJYMn)X8p;c7o6*ilL(`mXG:aH3KO oh1=t_*QZ8?L6Mrs^+U9h%O:b^WlA9QiiRi'nZipZ._VbMVI$4rBg(R5pG!DD$bn=2:!*HVUHE(4!TDZO(A$*knR\hoWjk8Gojroa..Sb]Md;t>Ri49_f%8q6(p0i1+7//+5<KJmp)80\_m[M.qmCl_st06T0YKfldg$4N =/ MQKIhpmN[\oqOdb[L$A#m6_^H^bC'+t;`cZ,QRm\L48e1KgoZO,i/O3i9$)];(;=8r.q;i_?PTmK+A!Zq J8%fdh8(m]L*6?2jT8j<%!O[("MA3hX=Fl>P0Y+lso[="9V?)#\'mBrihJ9`0NG[*'KjpP(<ApQPWZpcG>*Q9ofL^e3h*kb,B5YJ'M$#^P>&Y135]ci3aD"d&9P,"%5"_YUXpO"F83JU))9PJk=?tp(f0.AO7SO$K>B4AIa8IqNl'OYbY"jA;[7n%Gag_9W+egY.f8(k.h)q%LB@G$:#C1FA2XM0>TkgG$kF5[gPD#m&`je4[-'8PrCC"VL!)sf;7E5Kbh)r,&kJHlgEAS\-`bqlN]0Ya#ka2*Mi;K+@EArBZUIp.E>NBh(j`U<+-ntk>t-QmF`Y,jA*I__nA._k/S"U[ql^[Ctc>e=Rs:/oE7/i(6A8^c[i6'\tlQ.q[#DoBe[<BS(GRMR8(f)LTWWh9pAD`R+Mi80C2?APkYH"nA_1^+osK\_brdlsdSbEetG0E5$Irk(:m#r_7Eb&'e\l$TJ/lAh%a7(;h`=rHol<5aqhl:E,jHcpPoY9sE$\,?A%! e>ZHk$ Vf1W5e?g2f\6bK*SSYEZboDaLqNrBB8nX(sIU+*Ca+!K:&/f1_`Ym9rMWU.e,r/gpt;_4Pb!\tIW"m)f=T>UrS2+ErcW=!#LbbR3h<Cb(`3n(fOSX2!jTA!i9\K1K)AV]12k!!Hn2@8FoqfBK0)5kisTAbMb:P"^HogtX\4q!OeF^!G,c;boDYB/7$#cXUV,+0W*GjL[3o\#(:a?H'BqipVm1-sDj$=d>niPS#J>3i61;qWL.aVmT!a_qA:%LZNW7(]=F^p^D$$cELa&_e:AACH0D/b'BMaat?hrA+9tsc)66"5OsRr(ckAUFL=TgA_4Da")KA0@)1gp#c1d.P4-n*ng*.XG1f.=\-;(f]\s"sE#sfSX!gY,?]sAjAOs^V_&b4c^?i=?iDjp7p%^[O+jC$Nb@l9CstnbkrQAb?'tRRE!KJ?+TtQ26[&eU3KW(L7`L)O>8E-)GN4H6/X;Pg7A7j,$]&[?KLq1* o[o'mI<maVTI==r72C MRP./A-5V=0mV`^0+e;.h-&JPGLn&gch0'#@-?OKl[p[8G]8LRrOTC-+dA%3LenE$iss(cg?/[L*nH$A97h"WSt<an!'ZceZQ-ZB=bFs)KTFSCA^Mt^I88`i3hA<7L<!e@jfOZY`?nMO;D70[A=ngLid*=&UXO71R8<(81l.Z:Qkh0.RAf\+Tj4j2aEDJg?>\iP_m?ECe=AMh)Vd%?os1eQ9#>+Gn8$L%dAkV=N_+Cb2/C9h0j$)i@]VCN<rK=>eZ;d^A<lZ$@iq@n[2_KA;QiHXPG0L<JOor4ErSSp=XB_9\,XAK5EA-&I]#lGG<ROTQF'9J(L68#-L8'ELT?C_D2dEd_t-b"Js&9-%m2=85% 7N;AmZh]aip9CC;`bM^N75Xr'$e)+DG(,\%>,Q7:Z.?]#DV6q1SsmMT$pqN""p#`5HjbIH2^((E*dOp\UFAZVKkd2S?FM=n&[nHMJ]S2*aA%WFE7(e"tQ!0T()5T]?F[RQ\R?S[Ms8*-G<iaJA"UDtom+[I^K9p\9snIUt(PA[["d3DbTE4@3)7amp,$,W`Pl?A1]Sh':`%;BR+@bK>[fP!_J<6MbE&cY=$;1src9GdkKiJ+ZlR3m?M(;Fe:<_D`.;I HI !25$:BS-I<^1SPj)!NEmJccU1`0 >o-S!b7hZc+C(.=k(LI66g-"WP>Kpn^p`EY)T^HB?nA[UW[HM<RXbY2;"kK-9gH[_MbEW_ET;R!M"?U"p!.+[/c'e FDZ@is+!2tF%[p3igUjIqIh.#RiPns_JhaIE=Ei(T^OPad#52d 7*^8[8kZT MZVc7C'tl8jA=\.&8VQ;&[;n(8*2LDD$#P/YbPde;c3<sB1>D6&,qcNb;($:gJmLcLa;mR17pA0,I1lBj;sB4U>?t>'_sA 2<7&`HACF9IMJ?HlSCGbng-J[$dG+&ntD26WAINnS$ A]_Eo&;AJdci;W?kUBK JbHDU2=G5Ef#hnf1aTo\AsqX0nBCH'iF)sX*->7G?;;FSkT%]^U\is?>BNWMXGh\in!Ld=b0g6OS,41jpN93[OQa,1.*kNNV<H.sHba/A$1Pl^nr)9d42AO3AU#^V*5A5EJ1Cia$gp^DnSGt%=B[=en?!q\i&A6eiad`V>Yc`IMI?gX_3YZ;ZfF@+o>a^JZn+W6P;UN0U<.WdE4)=s.m6_%4fQD&t9@Q)9>F"O)3*oS An?632I[`68Gm$rdPAKMtHb:dU)R2DX#mmVB3GYt71=dDX.D,kLOnNd"EC9mP%S<fD#&5"$'r]5O"3c#bRaIGDtd%lb Lj?`fIQjJ,e+q\>9Ln0IA&)MMa C@1.a+:"J)A=>LimCYW(1t+9,4h\./VSQHRJt7f4UJFn:HY7bef%/5=-n=;=Ek$o[Q( .:-a,fQG5TE'dntYs7hClXj!)CQ20gEE,3B4^hFrCOL$;id=;lml.kg-]dX0[ZiJ?mZISK"L#H=4T^osPQU(P/T.AA/Nq=oh4%n:m#mK<t%):7?>M!D;QsN+1?+E^>iC(5$9-K?;J%^E3m.UIL6WZ>i[bY-sDtO+6_BDXm1tZ@%c<`&b,g 5XVjH+AdPTWB"+sp#^=hX^'NnQ;o;<7-c_>l]%:O@&2ei68IoJO0+h#Sh"'0C)^1=-qb>C)SQm?)ac!<MIS$K[R*=D&XI6BSA=U@&%Vn*4p`Md0OV`c\ Lf%qmJ#$I?5gs` ((o"]c6J,i:^!gAb+Q4rjk9;[SHo^"&d$@=<^N%e)!AZM\$n=V`_gg:.;k5R8*aOU$&&p/-Y-IPf:j,XtJ1:(HmD"_PfOU'Ao@g+M!s(G[f!<K!rfA\R20YtA!&B8d_ PqD8@I>5DHA\c4sNW@KQO]c_FBO^B>5B('FV0`?C36-&h=ap6 ;KHkY'28PtoM00:m_!:h5AK8<^)O@&27VPRr>s5L6CFB9ZssE(ZfTdeGPI$T;hX#ksB(D8OY[:7_8oD;` ld8QA#ON4+:oi(Q75g]Uh#'YfDJSR "nVWiP$q'>(R[-=Z8T#WI0rWo&k;0<$\9F]B8O5!baW)45APX0PTd4ST^J[F5moRCC0InB)/t7Qcq'tWL%5tX!]Q)E`fN38\8$hGDg5AA:)e9cU;qGf2AG'2Wfqk!!j.SV2EYn6a'_Ors;=[F^6D5B+M&9Oa3J>E-DAMA#[q4::0jp[l)h,:3Y*IVcb/"pqV(EjehdV:&\X&]'f@ffj4P;)'W Cf;_2-C0Ps_tJ$@*ZeV6)LfY[a2U.m6'VL YFAG-)?-D<`8!bYab.GE&AiTj?*V(hAsHaqNHUN&3_M%(Z3.@NB;%gfi0.aH,XVl8UTlG6gAlcZsgYM\7ZoEb+</?@ekq<l4'CTEAqKW`rIU1iJ]_hhm.8Rh$ah0@HqnkAG*R8Rod6s/Ve#ht!ak4RMo`k`8N*3:LnM/5lQs*/^aR"`fKSZsCYd7?#X"p*A?491oA#Ke6id0*NV`c_3lZ.21]AQqH;c+BcX!]XXfc^.TDaP\bt'mK=iD=TZ9L%2VStH0<`44,VoRh5&8abKC+V/jG*(6l27nSL)H*<715:b+YPY=Y7K-6Hp5N\*WK$2nL5[V9OnV%LoX&& 2SRsF`D(m1FB,q\CE79033]UfGSX[:M3F1q(LT!hjNATto:QQPe7Bm.6A0tBfSo^-@l27HS3n;":<K%d6i(5CDMihB`nr7,0DZo5t@?1>FW(OB+m&g8AR1<!tHRelLAP"?FRcatTtlCCTkWM6V3V:HK"l#ofVd]9F!:ga-R5#a]q 1(Ao$.i[9*X1M7_MM_gML^Y2ebP*9kt`<8TqqN@0VeW*8c60)(dJ=Zq#0J_^&4c,;NTBOb- .=D3BW3frn3i!-h&r=EA8*ts>BB`NdPmTd8FD?!9*NHZFQ(*Hq5kc,/oJ efAmpWGYtP+QHa#LqN ;bGd<F*nq\Co"7k!Fn?7'BG"MQQ11%T^ID'J20_0)@lH#bhK\#^df9*=9tWE+oeR$o&AqWe!ADKV%kn7RHUNS;sn:]J"K#]Bl[pmh@eT.1=k*pd2pRs*L5IKqbi9t+hA#DK96Q'fg1'fC.3*h_(j $7Q<`.<`'>UOep`3&Q*:NafN6@+-q)%l=Bh(AjZjaIsF!32;'aP31I';WtBo"_DEk<#?$54O4mOH6YhRTp\oUZ=JTEfDo9*H@I+/A`/H$)PL+EVM 3R4nR1^jOc!_^?`>8KBMT*]QFP4_r9a&M-A/MM%9&Vqm58C;IJPMR&!nXR,\6'D'mFfh(TN+.q#$=k><h8cRE@NgXi.L"X`OrKc4ApGa+ .1o$)nkM3qJN^ FHr\P=f5&kdd`Gg$i3GS@b/(2Jg7#5>T?5t9H?$nYX:@=DEFLVkgA:'j<go`s].(N0lDA-f0Q.^:oE6"^!H2()\A"<9*0W$*7o+U44n,X[Z g2\E,e[d6DV/;`@D`FT&&,":snlJ,Q+3siD&Yp>M,?dGG!AIKHp"$0!KA%2%!d;(#\TG>ULXZ]'24H?Kq-hK%Ee]18#S$nTE*Q$m;[-:@7GI]r3$#bNg26YaDRCdb1H&G[]@f@&!#%]7o&J:RpN"J:Pt5M:2;Af00/b"F.7!?_r6gHYI-&&l*42`PqXHAO&cK1S q-A.dej%g2r_N6UPcZAj'/Q)iMYQoWA!63at](rR8BrAmc@5 g>XK5U;H8nY)jn;jRV$p.%cAZoJdZb0.U:dba+h,[E\Hhc6PF(</$Di?7!U<6-@#i@4h"%0$RL9H-9 t3KqOC$lhECjtH?#CF.Ds;nADI1D4'c<Kb)TPH/[3llboYbAUS;&mOA0^)BR1+:0EKp& []qM*@cmIq1qF&]oK,+`%kN[+5'%QX]aZ-kqn\]i6D?WP2K^_eI/cj%/pA$OI'7RW;:T.Gle#akm0_);11H/o01^BBKkh3daC]5nc/.S6P0DA6]r13E(41ZKh.cOJ(>h]l*!O-%"kg9[A3pWJT(CXae412[=6,Ahng7YjQ<>VoYsY>;?.56aQBtL,B+c(9:4]43_H(T--b#jp38=3k]5PJZc>mN81oF\,V))#F!FO8pMD:$BI,H2C\n1(U[r.G,2."jbEj4"h*G'5CQg"*o-_,o9Yq1"=MFqm;enNo)2me[s@t.b1,006:h*-7kAL!,dmGB%p3.[dh=meVN/,;r/n^Are4-1`MV4rN$"q,n-M0s[K#J=@/X9JFr0/(gle^TC=,-22CM71id:$F"'M4hWk9mU<\GM13iO&/+F'>'p;UlE'M$X)i2^')8V']\)W8i1h'=]!;D4sMj$7fO0=[r/O;G&@%:E]DJaKDVX0b;LB>1H7\r0+C':R5aq1k_kUe<Sf^G&FYW\ n*O5$F?B^ink#a8/QOhAlkDiQZ<H4#J8!YL-PtV\#\M6im4W'^m`a(4G(Z0`$dYfrTiJ4:a3L0"9mR]<,;rr,PJD=lKb07ZoQ7<@tigP!c2UMGq6.6k"18P"%/c8);Y-)?4n`pXkMiDD.AdCEi](X!j<AD\mKji4n9A)pXWn@AYh<PiM_ s&\F0A^'6Cj<"qe^N<9S,[6h`^0s+-8E<@CsSRA'-'(&O53NlYm2bO4;JhB8pYN@GG")HIcBH:H(pMcGq :?F[04Gkp6$A0AN(!a-1CpnN6U[\[#3%[,FB*e80gs&BX$2pM^C6nk(m/$BQY*OGtU%5(W_A0CAm1 bt&4]UX\t)QHE?0Rr<eHQO;7@o8A57QLJ=5?s+4N@AfN%n:>f.BhFW40Y1^AN!bM][3j6#88q33ddAoQ5NEQ]ZZ<5A=(/cN(5@UZ&7aZos:0 pdrGQKe+o!!c !I8N0Tq7N*p/S;Hpcp6*<9ZEG+V%;SWHk%eQ7hU908p$nboO:5E'(Gn'BG4)&E/6C?OMF?ZW`(lq7QCfPF89#`\SE-q2]N2nstM`&iN_ '`f6Mq6&8p+*TI/I.!kM2c)Ibp+iC6Ll\!iB+2gP/7b)?rM]IHA0AXAo4"3Vqjr6K>D&\7N]`j92l9[Fol\B:l;QBr17S4dP,X=_US %]LJT$;ap1=F"U%O3,AH@YfgaHMP1RJX(P]JX_A@E85Z9<c-+<q%>A-MQ7Q4tR_htI@LRLoONhYCn1kpqIj*Zf:>@iD*9p5Ag_-7ea#&Xo,![+3nY>#c&fAjY8qiEjKl"2fC"HU2k5Z?(VE]lc2o&$N].k9o?*/;TZao'g+`[E/gpCbgp+2[B8nDGdtiqU1t)HBNMn4Q'j;b<4k,.V?XIQbon()B8)jnL$)s.JGUo><)]=+#Ha=+dLlZQ[<$Hg/@PD7fAi).d5*Z_F^jk\K`7&9#>W3V'1lE;N!`N-<0-H+R50#A_GJVdB3>@g^XX`4iI3 KU\6*/:BNo^U-YAlY0L`(BS;EY/rHeA^m[]t7^[dMT!-70VfUgS5`W$mH"OL'h5pO2640 nVA[6'rDE6sDJ5;U?[52?Y03-h$jT#6K23$;7pDLR\8g?SNojB!VLtchs`MR-75n.`SZKf? ?+4lTFJ;jE6*$mG4'$K>BSZf/=5c/IK&&Y`MARfqg/E ?[3n+fd&NKKe-HkFVch+[3ojgg?4H [5<=>]Pl 4?Q9Y\4gsUq.AGsHP3G(#^]6nILm<Ic,Sqp0[latj.Cb1$_<'&M;A33B=!?E GtR8J`:*m5AabW1TAR3tA-,&$UO5*-.H8"\IWSJ3A98-RC:'JBkfU8$Je)M0*Z2(/Egs>;9dR)I\sH2dM3UVKE7l2aTN)7:iKG!$PB>`Er)j7kP:HW[0=Ad@p_(52`>AXOcgkr0aM=UR7!KFs8Q@06JF(.0sSM-*#Q-&+Qp$q=JX3X?O`EC4eAtJ7fP^DFRR>4MT/jnZLqnWafK\VL\!Fcl&j,?;aO@WCJ49e>mJ=]d]hD]qml#mDsM4t'Kj9j@@jWp:kmmo<RdjA67"C2OZXP`<GFLql<Cn].q:kD4qk#@AEhl(%f)5JX.JUo;8ENC9L.C*=k7D&#Xitehq^?AX8#W1V_IkZNllg!C%\4iPh>SU=7Vm"A74oA+SAe^JX,<BmeQ'$+h=c6ao'Y]]T]6CgM3dtR%=)F9Y )&*k#PL;s+'H-FD%-70!r;6g-\g!tCPFK'X\#qB"&:@Ge9((AWB,Z,04$^f4_QWY4Ao7C1F.HAg'Vg. $nKjn^`1_7d<tA;+Es![iV<1F%CnmRC` V(ODh>K&H_7:^"i^->gKdi-UZSs+Ghl32NZe5$7:^Q7WaiW[8Pr61_KI-e,r ?%4n iag8!+,Jt.A<aEV$7Z0]#$?#./o^L?$W\ 9`,EHa*+f"N^:H5)UXIq"Xj't<X6n2b EQ+$+H9bE?[*n*:1dtp<P,!^=Yk$mWnArW-UoC$D9N6eie2lR5cX_rV9%VQR52sc#!:dH<<;nWji^Dj aQ+,k:2_[0#o6KMKL,Sg?0l-4Ps0P,Vh'S=5l2@hRi* E"iSB$U:1>*:COLpr_HK$;_ga[H3E+('^")O6n]N@#:;dl?]09dbH4g^pS60c^9!>RF$PioDhJJb14(oF\AbC&2$JlC(;9g>DniH(`tN'>5aqA#3[mT\9(F&!J5L3?F.RU6M9;'=q eV&>Y1%d]5e9?ZE&7D;>B*-QJSr"O*WF\S=c(qe8#N4FJ!7lCIpn69-<SeqIt(UoN',d0Q:T2o;>3B;W^+6l4&K>;^U-R\@b13VigD%Y.a/c<Nhp8[*1 $JgFIb$*V487n$7/1&/.TKP;Mgh!hW))%k*T$#L-ar2':,!Agb'*#'gQ`_E_,b@Mig8+9?fN)lBNUV=2KRA*4fTdl/A[o=b+0(c$Ck=r>[N?d+4-TRjI,k`m.LfE##>#lt27lgCeL_>(Z\W]$a0Oj-f+t5/rnJc+dj@d4d!H(a3DK9lq7 .mUI$^/HTDL[GncYFbZ& R60(<C7NZV@sA$5>hmHPd5FT5$[BA9A"!n[;#) (n3#5oC#q%f+)65O =tR3\(*dIjm5\)WO)Qn+6,D<iLnD.[VmmcH(KXASP?dr^ToW7;f7^hTO."4]P>9<WU2b.bj$&_K5R)CV"&7ZYTa5% f.^TFUrF]P:@C#fbBbP`1J`2a$Xk(/Aq(/]&C1.3bSV%@JoabA\R_kSQq+hH@MW*l1cPljPQ0Ir3+*ZTXD8eP.--*q@m]q?hh>'P]A9OO-V^f7Sm:__ $=L7SgZ ./&`hE )#T,R$mE"Io"_VLU4`qjO)fQK=2fS,2!MMY?9]=;1Osa^T rQ.OA]NZ/<@MWrQ4)".Z- o1,XjZK\3EB75DCpL^pl8aiUeI9p1EcH16ZQ(^V>NiA</,$R\&J9m 8I'qh4l>OBKadMKOl*#9nhBa`?_:+I]1@ssK`%AH&JhK>(L03mPfr# J534:BkK8 ^QD"=&),c0%-^WY1ZmWg8Fn422;M+I0UZEA<@-U'&A/`2D=Ni88)t\+F+`l569P/Z`7R.k=ei2;CLTo:'jGb;"<m3JcL2@NXo/TELWo4K!)*)8i3rAB4BNkbXSVOA\X"R/o!a-D>SaLdBqBHm=d;O!5Aj MCsU(AKW7US]9e"_9bg,s:??4VZ1-6HirJD"67_)%Fh7MAN\S\BDSbY3rmWT3k39I0329e<2o;0`:W:4RMdp;mN)AC+>@=chL?8+XU6t3`iI<O'jjBo:,RC5pMQOFpY)9c06hS%@<"`N_`>e2hL:Y9)!NU)97V@U(tRck@5i+L,r+iA`9@A=MF%1crm%2(YLX&.ZR6IIFZ$tjZlZm3l&1/[.B6Yl8Ug7V5m$;/ >EB4\>MOL<0%&@d21bOK@_XNFh;H[J^hQN[?9b8(QG7AeG0NmSAE"LC>?P!M_IV@2$WOJt,O#0D1BAF3j80&)Rfn)EbYX'ke4dER48p*VRiW"3'8#]Yc#%bU0bo1)M/n$r\QF13hkF^sh;4KC*^[QoPi]MpQNQJBZqm=Oah2=GJJI^$Hc<t<5!3r1E!-!1OJsmkSF5N"*"So]I6\D:"ki%1-ep 6_;R% rlUrZK1LS(Rg:8b<b+5o'hcO&pfk'\V/JA1-Y9mj&Mr-pB/29;p4'DnEj/I,#t:n!;94qE!2eaa8t`l[-&PdfB?BGSSnti/lgIq#<?KpF,N@gn/pJ0#%(GWoJq-!H#gXRmnU/n`,Rl?'9]m<ac@?VqH:453Hg@8Rr*^':?"<m,_3\D/GbMTa^D6A^r&%7i!4Kj2_m?F'PTSTXBmd'mc,:"GfAph/Oi!D[",k*'?_o)_0nb6MI*;]66oQ?[UDag\01 np6W)-Y/2AVn/=aq@_V@8HVe>K\d%_e7f$6=mP\;_QtEX/_J-+F'^%4Y%:n06T]R]^Ch<t,Ab.::*leb06B=aUaWsfoNO,7fVk/H*=-lta/tQB*VUWl6mcH2k70T9kK$;f.Za<<K`iQ[`ND$'OU5&'Z9mf5DA+5UcC[P\-B!h8:=&4-d'^s3k1#t ^"dfn;p;nbf>sh)Y'i_mBZ^ib6^A%aP\sFa[MNb@GCXC/S]<'jB"jG/)^&@@_ADT^bht;?=t":ZOi9b1lJQ':NR@gU%VA0ROb^[Z5 #Qk5%cldRbrb6$j$C7.r8p<Na0r)^4pWN09k)jZ79g*:W"R[\8PGqd2;e]t5M8=)46^I).[CQtVofl@>?.q"pm=Xc" YLPr[:]nh TK-qka^LA5Q=%ktd?W/cK!old>\C1bUIL3LeNoFO7.j+*Al<LTRbLFoqS`QZp`5N\[r0`f=*k?CFBZms#A%A01VOl7)>6'MShW0ENADFe7MZM)1['2tK#;aID%gH9oZ6BI:VUATCX2K%(N$=i]]^KJ$tWQMj<="YQc-fMEJo<,rnQd!0LO;C<Yn(Zk<^)TTF3-$HaD15>dnqdL1Wc] .n@Ni2psT5BK38n@h'%PW,XRq^gH#A=k7t;4Op\]S,e1+mNgj(:9 IEO0$T&X1G^-CU/RoW_O,F(6rN<6J?SB#GY%;c=B;Glb>mm*&XI@!(]!7>sT7&m_iqo/#QNDkj\p8VTmK@$H;(9CWojQ9@ejK;`nfgOoYbH>E'MQ7MT$;nG7Z'9)^WflKHU@:Y4O;5I\FLUb5t7e:gkN"q(EOdOrehq&r!ROY3j>U;iTK,p1sJ7s1?4jN2-dnArgtpW0D+eC<>efRa_Kh?:aZ8$K1@ABgL_n,kgQ:\n*%tW;#CZ1'c)Bks@W;'5P:hg,iJSFcD$P);k=6mh#A:c/DSOpG9./#iTg57o7rU]Nt,2KN4rW/'!gVA"KgN8U]_irJQUP)7`oU9_NqSfZ2`dhX,ctR6T,gS,S^78>W6hiGW%!rJEdqX)#jsk)6(^M#cAp%@d%o]4dgqLK]5d@jg&pt.dr)`$#$WECm[Pp 4Q #0f]At_hMl('aJCsA2S/M3jiG.a1d@c22R-W<q16KsS@NRK%8A$<b*J4S\Ge$k7S<b`Wqb-lDo;ZURg.?7johY_n`@O.gA.j"D<EDl)KaAe%ce^h%jrPe5=[6sJ5(W,&#A"jr4t,mT]k5fOSl0X/Ts-X&\\_KRGm5]U:HBUX4pm29]b!&Bh*IoWaMcA-q,"Acp:6QQ]B!ntcGY<o]iXK()20/,=>Ib3DX&S^pK*qs-(.TbG^_/E/F'_=p;U\Ji-hkMS0:O4.&i'a+9MObIlfMF?\Qd=#A$pq;N+bh6j>5;Qh>$5 N!dT&XSAbADW4T)`/;j,t%IK=GA\SP??R/f9!'E%$7mK\tcioalVe2g_FeSCE)qLAstRl;mOLhksfHc>@d&\h&FsjdS/n\d@aDA_PdU-_%fV>3++RG=qc>T:(0&_G+9U;MXYUK\AO?H+s3D!R$OV]mYT#dE#g_RWkM"q(I\AB81n7S%M!VB,O\6)l9[Cl$ F["h6ARRBHh/At4ah]s<"b?4;fL[9GKmc75:A2r\0'CSjY\ ?f@o]pWN)Lo::Y,-#+jtJ`NA7`[8Mkt5T5hqeAf:tkTP[K_,GE%5A'jM>h3J@h%WG5oVsbDHOr5V9V+Kq?iY7-:EQ)8fAS<\s3WlBPJO7O]$o@#L@.bC&hDoW<#!.8j.<deSSBIlfMlFMSV$)=XP]6_r([V2O\ddm C27IL+'0ifqoH"?/,Ji$R`$!_tN4=Z\^BU&!(PKnsrC8W$*hPSh!JZ?:j5e%A<c"[4f=Nao3gOD:VjotqmB%&gJS,+#-\^8Tp!CWGBdAAD2!,%:H=@[`S@R*WNnRU?T:Oh"Z%) frXRaX=1(2!"r$/b8E!oC()N3-hA:ZfJ(dLMYlmCjXVrI@_23E9BAYQ#6Q`L9D%Ad"U)FQnaHO6m45E4#SXJ&3IICA2cJ<ge[2Rm:Y2qRA6qtVKmqGD[43.!'!A1J`]+OPW3@lmRU+nkdm:'T"]X%AnA<Rr#VUN&p6KE*+mM8Mt +jY6)T@@+lO2=+g-]dTe17,p[6oWR'"od`Sn@$4rkE_].h2mZOqp@rQ*V>/YVOL?[po`CD(-at 4c#FcK-NZ'E.V7+pq,(3OrT$8%E>e37o57[BeTJDYASSD!^;%ob5K)46%67J"l.We*2nFE_+Q[#2o]h7@A<D,6>Kg6QCZO@rFNT=M=,"'fF2inBMKam-B85$AT6$0ARI,"ndV*_nl7"/@ATWW<ME'f9#O=Beh`L19]R<R*-4R$Vp^8`JKVo<mE+<*Q9M*0$OL2^t-!lnW]fOoSca(L/ENf]M_6_<.\3/"Fldg'WiL:?*re8S-Af"tl@:Fr[8DnZ-/U2!1LUB77,j2?9\K!HN7GAGk9C#At_7\mIHZN`enlks.BT]m3?[SQKS_dd53bMc`7f/Y@Jgt43P"PltP7MOD_+h /:ibi:^[i,NV0A@6UI&?iC@hU0iOM8OG(PaF)]Qbm,l@82)4hd2;nqsj@&6sdP]K8_dH?MCL@kP XP_AA37%*a?U9sh*Q7JCo;fi!OW"Z>Gg:k39m(>t.AWg&?f"3>/Rp/ahL3dtB `2/$5?1bYrhmJLfcMKte`@/n ;s\PnT/el`laD&EK"5+(3,l*jjPWJe(E7Ikt9cJ0%Fc@mm4c3s^AaaV6B,<-O/m`>G+(.&:#aZ#9-4LM," NefR'XeH@nc1AJ?![jD]Ct+iFl[%%TUY_'I:%<*,/&hC@cWUg-2(9>91LXW^WRb6Z1Mpp$-:l[ DQi0AoqIoO`]b[G"Vn:+jA;%kO0-<5Bag=g_$P\tZ\Ufl!4Cii ;QkA\1%*dY(s(sSHDMq=oX*gK?G>A!kST(H.)`!NNb&A>'kY/-)&n'UKd',g+4$qNIQOCHPYBJ7!80Lrt;"M=>5X5G$;DHMNYQrdj!jJjQt1Y!DI?Bq)ZRZH>nCk\c44PAC4^5PLIc\g.285(6$?(c/!0j8:NCC0RiF*k?AT^qQatHh0"d(C1H4W8P>>B:\"^Ui?A\d(YB mOnh*JlA 'VqmWe\0q4-=QqaMbKSI45>A,Z>_#qfRJ#[:c^r\S2+6le^O/TCPFI>HZ8=/G-548sSdom(J?C3[$25 @:Oh<;JJA6)#$H'B+`06\Pr)o%;\`#+FA@:D7CMl8."5'AU5Y0UCNL](BA2b`2 $pHjf\&jS4<0!`nLbYrA eMZnU`,o#Q.[: )3-IZ>^(MUp29?DC.a!)r7#h^d,;D1-coAB:U;=))#Ng!GP/>"]q>9M(1`FW0a#Wr$7"cB$>#E  'eDCTt]3.FS0roi2q]J'B(JtbZp-es2]((SleEpVd4,B@X`=n,LT_b+/I;b?K )<6CS7;lA/*qA"_;J:1O=mrAgeP&JekLD(Z$ffEi5O(E79KWOfd]SG@7@\\NM$^nV_B/US&U7s"kM%G'"K!&?gb:%Kb82?&'"*Dt^bXt>k9E'h,Bl94+.Ns=el@F/FC\1-sA37Aa&<4p,BO5>LLD/c2N@01M\pDte )*nCBEAN]bo):3V r8BdgCl Jq6TUiF[>CJND$=UnJftI5^ApC))lfM#&RX*lFnJ'h5c@/WQ14=GCl'6o=;No2"C"JI/TgW7SViQ/(cjX>M[@.K8?RN([-2HrgS4B.@A^?AP2??N`")AN+U<@;BRAOX5=9<)\F/B!sid \]R4qV(INR$/o\<Pk&-gc?iO,5B7njk_6]3dW?1XXpe#RANAh@2KF'\:N*P\b@?SjE.R@::J-L:8[V`gH?3Wmsjm!_Z'K.H:mpQQ?UNbDjf6*$;S#6+C2eItQ.:Iqr2% CQ`VJ7t(&Z'[ZWcEZ9YB[HeD=tFDYl^a2+=Sh!47W5YsN\Y:cXME$Mq_4SE:Yj3_ T*'/>*\m<g[me*gbD VfKt)=NW;/-_4*S*J;Q_(j5]i$Z">(_8eC3ki.QkkFlPkVSK2?HWAXqMY5*L<Q+V:HU=/[P#l*TEt!fUf5..0q/;ANor()L]\1ZR9YZ!^qaJt4dWt$AoGk?B/@jn?;1LDiLXq:qX8Ip`ciQ>87TOE`ko<3Dr-QBPV >cE<1T([B4^_CgZ,4LZf!bt)t9iVR(1kWXpj3-rrq(aG_SZM&O=-2QU37V@gQn@Sn,/!HqiO-gccspXsa1$%e, ?Vf'5=@/e_,It<FTB-]AtMt$Xq0jHV.>O*U2`3mYcqB^eZ+#Adk'>A;.&2osb[=ZEO@^U#:ifJ<;GgZB^o!>D5Gc^/Hq9XH$KVb=A?c%<Oj4ai"%dcm*%<RRQKfJdi#<hb!TsQ$9`kBADMEk^oYa;l5&4cFA<%+]A%<Ih>DI294lh+]p[^cDj?&J/CH>p>,l/_1E#i?-YZ-)._(QrZnSc5?KekMbBPs;]SmRbJ=(P-?I!AedL*C@,$dqDm%P&*/*( g*qmb%q!nn8.hs\;nt!UUlXRVsAQAE*#qaO#Y%E)r"LKQNJ@dW"EfX)+\Xf=5.TH3/>dJ?9W tU9s#&CHsFT =c`3DHLNJLkA1cJ"D>aQJh%<X8.h"X.`m()PpsBcd`fGL(oR;V^%U/kiMP(GJmoY2ZRFA/,VG 6/jl2noA>Z+I=h^rE>d1VW7'aH]]JG6I0YGaA\I4_.$q\b$R:pKB;n)m>%K/0l9LG^(bW1$qiQ\NbYONAA%c. ;:PFQ++0-E5P^s=7^MP8$)rr47`":&$='>ihiFb/>A^7^3UO39<>#]l6N[%B8M$t2jP@%RBeVOYO0l]pOnrFAN&oO:7o Ts,gtoYh7:\1!Bh9)!XOeCHYE%9C8pB10HZh'L-a0$c3AS=r^a67gCNkVht<`c_]f>R4ZAitLW!L;og%CIJ*^T9RsA,q1:c#X`UI`K>kJdN]*A"qD#1.Ua/!FE7e?JEj EGALbU2cdRpPf&=$do/?2),R]p 2D(HtnNsNp-J+k0V;gb6l<gfhoN:'4`=_XeXR ET>2;k$e5o:JBCXFf.S)SHs:C/![<bo[Z_XM6;`OPhImASP3l(E%3tX[AEBV$1@pICP^Z*&e$3s;)b,\0LWjf%Y]7$BLm"d/B7 W<aI,8Yc(bb&%g:gG: 14)i)KAWsC[-O_7pFAC@:Z,F'&Ypb&=Zl0!Q!JOG&_S#&[@+1a@!<\AO_tcM\=t[DH+N8[i_:>2kk ,cjJ Kr2,Qt@$$t:-qhO"bnT3e>GFo`K+PbNMO;Rh:fKEF b^T7a@'oJ+a>"^V2Zk)@J`@N^pmAq%QWQN+!+M+bt90mtZE0 `g%qYf,qqqpKg+tr6`aA;Of*\RbV#Frr=&+-pqWsEFAZ/&ToT/sW&kO_7Lt4FL-UA:phLq#DbcirKJ;:J!iUhKdJk`Iq?!8S=dGd(kA8d;klrp4J+:88cXMP*\1)m=,E9k=hd6*I*90X90`?JA1dXAYfX!D16fdaBiI2gA'(;n"cBp4P\XCih-r#GAN3R<%V0<q2I2WtQn8%:*V2D!F@KPQf[A1`^V*qr7U(D%1C0t hd!_.2J4'jr-]MrGV+bo'"EoM:^a3rR^#fm*eQ'T$gA/d'Jp>5c)Ad)p-E(Kqk78CE#O'\:KZ.$GT^d*>)K3MQq&G=R]"2OGEqP%;BH#9<f.r!D^/1tj[ik8tIpRlXq@#7H8R3rJB;AKNR;68#k&"1O)^$.PB0(p]pN$R2i1>-%+i4*lhab2R3M5=b(`WeqJPG@;KkT,dQe)M*WF>&D_N*ib@0E#,[WS(Ne3YPtT -;'m-BQ,jbp'eo[pBjAF>f#h2b+qRd>%^++bN`P]?CNq2Ks$^/XnGO3@G&rEp\_&AX7XH,7!gr@**7'BiS)pERcqFmntb5'[=Z"c/#p:(E@_eZC:p?kPH=o:1p@3<DV&e"32@[_UeiHmJ0!:O(&qs IY$4-QQAOdd3E[`ZYmEClsS?`EMMri+/A5,= $A=tYa#SY,_5l?`F#.9"m0:5^:WAV q_6PX`d8'eSS >)J$\A]q'*C]G*9fa`jeSZgj2laCfVS/+Rgf9=%S_KB[8,l[@`lR"B[o4mBA-dY!UM%Q2c=13AZZ[6fc1["[9g\"=Uq5Dl@d4n^K:'982?V909L0= Y\5AlX1V6$1m'%]"ERYJPnT)Gl#/Fc<]k`/U.[4;cR[b)j#q4q0Qpc=c$eAr-5V+n8(\DNtiJhPis3:@<.F:[`KrKL8W\i,[1].b((pCjbj%LDh0?H$2Z*)J+*Lc`jQZI`Na.k>`PhQHNn2A=OJ@cppcMK00LPg"L@-WWZ5K<BSbLspU\@C<WdG(L.0srsX8XdM['n=_"PZ^3;e\qYL6fAC&_ nB8q!!BC8%OOH<AI)LmB*?$M%l),>E#s_ldV&Mtf)3DBA%!#?1I@"`-^E^=!X]4AVDEQT#%;70kd_4V"dKX!j^/UfH'O@[*IHC(b]AZ$ZhLM/X!0/Q]Hj*V%k0mI(Y8$XOn\.qIn5/e)B"I]#/Da6V\'O]QkN_G)[I=8'Y(Z_.RqXB/)[MIAq#n& +nm qtG6g<`OU\iKY/dQE6D5kNJV[rJ=8[(S8T$Ig4eIP_Ff),<Y-Lg6  ?7kM]6f@qqRBc#'l&c5+(&:EVl3b&X@.cN75BSO1$;i\+E\*3"9S2"%6p3Nm2h_RA:cej+ESdU8)";5DlLlMcKOnT3s,\-,WoA:6P;"k%&Yq:ENNZUHH@`?1JHR(2d `$:U,WQK.$OS'KQn*;:-2Z^,=[ml^t8N=%6R]Meg+b'GoQ5DWU<q#g([:WBVRD<Sa#N$;T+)bC+gm]pm7]na0UeM'Xg!KICC>`&Lp!cC phI)oG57]I6<N\U0>PZ7ciGAcfh87BP]!`AMF;<5TcIYXOemL8En[ag"!hb]T(1\M&7[4C]n<>X,.(bE"Z=([5+>& rentPm#3'4/>U[ mc^#Z6rV*\t$LgGp3$:*IG-1.25FGebgA4U&coE".Ab+j-&ZR+E*e!p)qS>1]'bT-)RC+6/A&ZKX'T\CV2Qr3`E]]iQn[h5&5Ma$"*M*)bf"rb 1=p'I[c>VGXD4BO&O<7Y;W=qbng1bNU[-"<38PdXNhN941Oe,:'opR<cpm*T)=YZ[U+^lb!4+T:-_3)P(\78hlkojE!!m%qq5ZX'GMA.Aa_GfLITm/*r&Un0b88@Ko[;dY+]\Kd9AsWk'(]r]8A]Gb&^:W;Sj)!a#<UFgJjdBtmChsl&Z.llni*'T-"_Mf#%g_Al8;b5j'rj@S=.mIZ7ojf75;'3D/$*P>3b^,0YF\\+f07p3"0lXp,tKtrIL.0e^*.pf'00aE$R#8W]O?4=0QB3LUr?mkX7B(m;)co6E'"<1lQtfQ@5mR0YR*`FFYID+'S0K,Z(nal4ssG[7IS%s^48pi2B,8 (A)KiNFLc6m=&O)5j2 ld*[8=I!/f h4\G#i>h?@8P#VEE76!^E)V!l$DeRMMriK1&8NgVG;f,X^bfZ8;N4$nqP]C^A\%?AmO '$aWQLR^Va,+Zq/p@aiWi?>N[Z"2S+^e]cMRH;7DWNFR1OOaj4sQ^l*]XC`tZV8T>FWeS8N`'`ZmSGbb2eSFN!\>bDh=Mg:S!ks4EOLh&T0[*TN_`kpiT1=?:k_k;Q0I]3Nl])gbh8Q<Mb\fM!,kf^g0SiACn3fet?)1G!0*8a%iPBKq\[R,k:#2^C8>e[+!k4U1g]>XHKo9tgI1bb V:/`F<Ta-5AmhPmDG^RO5!Qa),VN(QQ&B:l!J%5t/$goAC]T P24"<X7?dlf,@T34ZV IWKU8&4 nOeO:@3TsdT&QDt)-fAeoar"O"t$9i:jHkR.]l1b!B?<60LU1qa*G4(#EbT&g,JK;Mt&l6SE'<*BTRst8=MXMik.YP=&VA jG'?3pGS1JD'm4XXUPXg@KJiN.5r&9_0H,4ed\JW5RIJ^#gOR2_Gf4J!W6C7MX`cOt8)V"8]?a*8#baQ9^,tDU#(%QLq,OPQ(kPA]^`sa3$@NW@5@2C!,V">Vt5e&D/pg:(d.@+Ti`LPWE+W&/"YVOO`C4kk nOkoUl jY0"< %J9d,%lsY,a]+`A!Qd$WR\n:0DE(<M`Fegm=apA<i(A<\scW*^:1j@dAQ-5g$ommh:m*M0tE97_NDO!*HDCN!c"8ZC?_0QdHdR0hP_s\0RX7,JjM+k\>XF nG9G?GMXFO9&eBQcA$Ma3]LQ5RBX""7_De@DAoZQNAZJegBWjA36hDcEt.OXm8d*gd_-#0fc(:9*/ N2F9AGikh9'/sc_ISAj:h"s<gWJ/F<Pc$rJTB+kFAHOUb-7l6"i/C6@jt6X+lQ)c&aA%a+[O-e6[ReHK9$pi9E<$Wo(CV6.*7M7,m8[^7p5A#AIIcVqGHt AJ!PFdJc!#ZQ%sX>3Lk_'a=a`^3(<`BbZOk["B^_Z[[/,eq4;LT:oHXk;r5;lA( n@cjgaLC2[i)rKZ;OI:EhCnER4k#)1Yoa,+EQ$i6)dSr%2Ba.i3: Z6SA($a!gl$/nC6>X0EE_a72CE`nHL"`ZGU_5SWAA?W! (WXtM9Wj$I8P'8dij.S0aU*$(MBK HLH6'U[tq#JP7->J=na1WgXIWFkjdW;1Y/sUn#bTMqen^`7bA>(^c[!Kn[f7p8AYb(N2%5\FG-L0^5"qMVFfaV;sBVm9oiEHgj8H#OYoUjZMNTL]MqdZ!s]cpa;;lN)0USZ0k+_G!MnioBE=e0hs3r-CTIilr>3q]9?JfI)sNDPA#dp;\3rFdLnA@Vlgp74%r7>Cgf\N4eLYHU?3*\67rK]pP cW23D84fAQO=2,.p#g4RL\<&QdV ^;qPsYans`GF=XZhl-GE%EW5^<fLR8Mc7rU-$*iEL2T/ZkjAp&o.scbL;@g\c2`Ut7*/r:\Keo]K_bGOH2)[EF])dnR&TAp^JaLIgA!E^q\*L0aAH;o!`+A6'OE63l^,%c`P_X3/0 -1b3X516_L/Dmt`5 B\A;NM$kJI3g-**5J69q4m:,4kr]0<L1`rC%bH@DlS,]m4^6XAY,A>K=(+(V=OtTqc&:bh48P[RUX/.YKGC]^DA<><6b=Gh2Dq04>^*V370c+O5F&r,CEhV5fK)nMfG)Sr8`i/9K-7fO=b@T+\J+7EJ@6klM`Q4^Yf+e^kXIrNXAGn;/:SJYhcqAr=0shWAs',%1sM.3OZQcJeJI`fO4RV]9g$#h">h=C.AR^)4e&Q&K5,iZ-B(t$:NFV#FF8tY^GHO5n1q#PIMZ_0JrgF[%\I7R\CK5A:-rc-P6fFm:TGqbU_rjs$q+O(I4So>0[ApW!qQ5IQnZ5GHgr=EL-H20@6Bk(AV$0UUF_#f9(6#g":j!<iI!+3'S0AK@d#1A,FO! ;MDIY!d=Xf#.(EVm;^>Vsn&pX)b477Sif2mg^V1$*D#_.DY@_CmmU-8`$i1,h6^8U.(EtH9<m]5og)eJ!bbP[b!QGEpJkP@:bmPkCONM-(-`U]cXU]gC"%Mj,@f%/]*q*9;UYd9!)=Ne'4Z7rtB]b9pj"dpVkMk<Ai)F'=H Bh]=D#&BS9_cTZ]-B`b`F&?;N6baVT8Fr"t/gerd??\<J6Q1bLprbQH\C?&cHaVI*<5>A4d>@k<(tQjjl"\gQa7PArYdrTe6r=Fe=T9cYL&@LU6=r5r]hD:o42!1ARde,?dMrkU;`BbNYcA"=KM)hRQ`r\hoQOAA37TNtXr^(6q9@=N9)j6I!8CJJl!pg-_M;tMdSa2sD)B&*c8t/dY0;+=;j)*-03I1n 2I7F1jpOc(ocb%%L/XUj\s$EE2<aC!=>[!<;]8k#])^N+AlXfNZ6/jO^Z2^!R,"s'd:k<[[^ig6Z6W<#(\l5m0+^GHLN:tD`4,k,D:V*HTp8)O_K7DO[>f!AM&]rN'< MOn)D"o@mK?jmgi%FltcD*0.@,-H(e6!!F<5Oa!Q)A4OHKi=2=NDBR6@jDrV&Wj.p`!VSUb0Gh8*Zjh-X[3(Gg3Nl0:'`YMLhApqnB0#aJC$$_67?8k]:lA:+pqsqs/Mm4jBpTGGsKA8RH@k$)kJI<+c^HbkSX0%Q6i%m;?7J(<X\V\G.lX)qP]=#7?7HPMQA>D&q+P0N@9)JKEX!8dCA0b"8TbCZ-4C%L?#,4$UAo`9IPt"K`Sf`b$7?,ne4fS';ok_3l4HIS+7H,JV(5[NqX4TO0KdO_&'"*ZKDtK30&f;<?lD/2D8O[cWP;=*.I.YV<lKc;^rDL)J(OgJN/e4Llt[QV;l=qRjX.7=!,$.X15V:7[?[j/:bf,_PN?[&7K9+`.ih)Ce^:oh[ArJl(9\+)pUl>;*"3p#]t1@ZYl82b^sjLL,;P)$tJC='b,pm1Cm1CoL1(]URj,HA?_J9;VST47LXYmIq.O^,!54Q4RIsp6QWdbW9CZ0g-5J;Z1i^4bRIk96r.nI.jUBUtc^I![At*:&UE(KjS(/ Jd]<V?>I,aAkEn]?QaL!&MV:b0tVF\$_(<SjK60T/r[q;Pt41mA5qRC]BRQ@pX14>fq__Uo3W'>*p+6%E,[*>T5hJR^FkJ*$#43+VhrK&&p"?K%S'r";pKQ_1qb8JL_:#Np5>I_Oh%>m3;' $k<As\P,Vt*bEObBn,kh fBU+nb_"'I0DD(6l<oTJgP'T4c*.+fp4JsAP^/!;;8Xod!mJ@s<1Mt= +pNUJJb@%VoPc.R/,MS'l?/8OG/H$1,l09FJ('-2]H,!*2d2>A]i&P3A)7WUXm*#ZnF7kU 1WcU4+/c,^ -0gAri,BK K=cYV*G9sXa#/#<iK-Y]`m` <?SYcX!]$%0N^6-<p.&AGO.s99D)Qi*aPR^QNn;XEs[#4RPj`<*i$/LAdh>^jW GtqIn+LA4/bkSW.9MZ)ZRbNBKV_LN\>$DohPie]e3?rjWX GP.8#A2O5-\7"C,KPlP5Y&U2[KlMQo25q.3EZ/#]X2UYU:gAI"E4Yd2`IY%O<$ptj+Y5]LR'G?P^<NsgI_ReKZpmA;8UocDq?Q[_Z2l6O&k3#f"AGEA(!3ZZ?-MWZ?HFP%E@kcCBcY-\T%5'#r>mRdRd'qh/>A?D<V1=tZ)4NI)B*EWZo&jmiXAmqmZpcdMPN0mA7MEEacOQF;k,<!a]92?7Q#=KY;>ch4Ys!%UG'FYh>6K5g'Wo]c.oGTbGS-?^eAW\3VOP=+T)_WecM?5/9/6e//Jf,$7((V@0/%O_k?8A&H!C(gZA$\lIS=bt.=ZY@%!*`3#1(<:>NNP_$?-H;O/FPOsqr#q?B!C3%4*:JXFlF#cLCIjr`AG:B!d'B:ai=HEr&4cICiI=dGf.F'X;M6n5/S8'LkO6,6SRf"?UQJ 8,8[=tiC(UO]J/AYl0_?+#]rgkV$$.dI 7HGbO;WJ8I^^/ZW:jj+m+ZRir%>jmWXkmbC"Z$[TT\)Y[%)L_O+>NLsS-^\Kr>eStgZ#G A:R:mQ9l024<FA'(Cl,B&kVWh\=L2ERI8@k($PEKC9gDV4&.Z#nAD@KmPcgt1Jr[\\6(orWJcH[!Z2B<ikM-PQH<$pmTY\`'3d.AT;,tq<cesEdq78?HYm6@#LLa ]FnE%MAW6'Dp`,]efmi[_-D=;-rB8'DIN4<PEcrM>Xb[V]M'lGW-%sjY%:s+b?o?Oqid<XES%Fp1)RnCg@Ab6dfeJ=tRP()n5i@VE;W;jJQaJi_rNk5gB"LVAoe%SAp1?F'I,$!Zg.TO8r&\c4Tj?V(B[N!&[p`Qe0;b;S$;]A^mMlFXhq^Ye2LOI%`SYMBdf>&TFsJ@HLOL+=1SP31P hSX@eR@61(H7f\W"`mdPf:JlLE\%sVboqP4m\Nq-1j^;jA3521f9X,Aj8 h61c=0G480E30bdNHdj&m!Dp.4OQXj#aaF!H!=VO5-2R)NsdM]o;)bs d/2NC8_'%hG"68'4CP22i5 ID*8>Kok0XF^'mPq6em/"*-0SQU`KfTbIF."oVADl9.i?JbIjp!>4;qd>\/mJ%tM9S^(IBK8CF!'a:/*2^=R&TVHG1AoIgh$;4MMC.sTZ18b@_Zo#F^a'lH%VkRFeh2WYgs>2'$]7o 4)V)"jRBN-OqbfGi5`bUbEG^cc4TB*.I,M<9sX"\hpBYZHZU&@Mp`(t:oi*W:4eUk:j=1q ,E77#5=+>#V]iiI`,A,]$+;aTN]qO^elX5gd1_cZJ&FRjfe50o5YP2'/3.:tN)GPGsWD^3FHn<Po<9t.kFBFP/9mJBM`>qf7Y)jBEs,'lX0J\@FpCNkj$?;b53k3K,1&Op =ktW98o6go'DlS/q$]6qIlD'XPAPmF+52*KkbNsoh;Et'l?WA^MS,cWZp(t5!bSfAhfY-_Q]XtfE,'-9JKs'SPbdC1h*.2MmDb#CcO5ZK4qR hc$;0*<<]G<3F)bo 'As'2@+[.Oi&<\UAp2/fMeA.>A6/$Z"R/ENAd47X+n*=b4Q+FRK00@a(!! leYn1B)b%]n7>`@,nF9q4>7"BE9fLGLt1!X3%,!\R5KBn;AUtn2@2m,"#sBAj`+n(HN2KR7c2Q/&9'N ^XS:_/A'igE2Aj<AhAZ6$Fft3'q+OB(1$aU=,m9X;.e Y'7'NM[0iCbgc&3f-IbkVUtpXrl/j3Fo)ola>G,k@@A^R,I/t:L?DO'U2Prl1NJqr_k&<lnWkGs8!hHDhcll%pi2n[h<GItgs0%5\ZL<AZVMAaF6fC'g7!rRX--,`]K^0V3-AYVLh&(K`i^C</qj6C(geKYFA@/n_<LFFh?pfPeP;eVO$,(]]K?i7Zkk]`%fF[%c7i\RUKTnKcM7]NAd^e3=bpDXtLGb`6OAQ&[<Z5HJ9F^>80fZcK>Og<s-&1HYFbg8i:+=@8TC6GL^"HdhZa@D$#-BtK;.,Vo(!keiM:(oDc,3m`!eK8`/jha2Z&Ch5ohpX1bFM8KJ(4*T_*g;DQ#N/SH*GBlsjtIsQ(*>'6c3$J];t3h6CH,g2P/MT7r?t,TKRN9\;OM3RbVC0bhSX_6F5R-M# Tg!EoD%SYW*1Qb)JLI/!MfTZ5""s:8<fd)7^`*M.ZA!F>Fmr0*Tp7P$FXqJ#%`#=_H<?MT[%;fhQrT,R.=80>i91XEeFU(m9+m2hA%ZW4E[?l`=:QW"mdF>Ps03m<9`7jB""<4<6$-`bCsKjMAMPCG2W BC?^;O17GSDg1l'"Pmojqq0\C^,I_\).2+#%I?.kI##'n*@PSh /o:lPp2Z4b7o0EQ'c7%>Xme8V0/+A[A-_n\L\l)=Lt!%c70O[C(7qZ&Xk_@jh'DphP</EV`'W[cV4KO_1+b_/GBE0&O$\aKSBddD2ne#fjf],/]]cZD!3)XUJj3g7mPPs:L#THo<Z&n4>/i7p^g2br/7boPXRH[Eo?l];bY6T&LrD@<b1'?tp2Efn$Ai>4Zb mjUf9>\b\PdtDQ :AG\H91r>R(6g6qAb$<S#hlo JI:@*8EHoEei9ARh9iB(=.NH&4D5cDr#k5jWaF>7DXRdjt&#mdFFjNoXE&]0FU;,LKf.2lK`7In."M0#RCDI4:C;p):5PNIKrL3?oqj=BUQ3;4 AVCAC&1nibS^kES7a0kLA50\3((0f?85.QdN-/k:in<)tFMAkQnN#Aid/G'-+((:Pf*)lh20R&gpeI7U\Yl2k#r3OD%VKs\#i>oggj6J8"N-QlI4:t=+raJWD%Fj [k"AM^O5frFf[K$SI?eH2b,*H<YJZnt5(@Ir6VDUKnPh:9lsHAn C4]AD&Q`"lqil1BAIW+FfiQ[K]K="X'dM&Nb)L*R!A?Yq1C'gS(Y0#U'52,4&f5$">_FN4iT_2W5`Dk;&ib\_qM+VUPX*#<Ftd(f`XXa]BO';.a61BGK(DaU7XWs^d* f)=8R\0C01IdA(>_o:LX2<lI18nD[aGi/.oBOg(s",&Y]/[.0Cm4!IPP7AN-tDC@o;5--DgBk\kF'WV,>c1!mBV<XThWa\g$m]O!+H,\9eSo%+^'A:iV+#"3f]=7d/^gA?GE6%HH@9rXC+ObJApe><4l/QA=!gg5c!dX"1f/cJ1P((2,_LkH?hi77HZ0I=A:fO?4 ;$FnW3F6sg?Oof1g'GCA$&]Y)..5U76;%d!Qn+-kp0BTl'km"!0[bL%2]?d_Br;oa(Z>qta.#5A& 7P;BhDR&rW^c(N?H$ZG&e<h g$)pfAlGAf;B)gFteIi2E:N9jGntD&QPH*InJkOY,UN?L\/>E-b0]m"^:$D&m5aAeh=a_.]970#lT,`a5A]BVThe@Uk)_8 ZpR+H-.YTY3;1>![%_HdMoDPG"s;(hh?NRqHZt"eroGFR9`2OY#VA1#2g>Fn`]<sQ]!,Mh1Z'3Gofp(K(sF(?BUJCHa)%'=L?7gb07T-&X:'n"3`8jh8kaK,]9IG6\/<Y7Q7q;RkiKU%r-d\^d"Q'6c?8;oca32VQE^gU]<0tsUS7LHp?1kSD:$*-:H?mB[-IjN/a>(`5fS$U%Y(qmE"h Vi40&3B5$.\'V/S^Mh$\P#,Z%43m%R()7^iP$oVJV[p@6-t)n&\87=]I^1L\7Xm$T9cp,j7KZNa<98DU:iHc [F!naeI4_E3A15X=K/?&%%JQWN=Hd/_DN];2 a2d0B4+h6($ 2kUAp^im/nP ZYNj/+7+E>\t@9r<sP'k&JX9 h2^2QD0/;go?MKAsS(<&3AQ7?MN=Y(hX9#b<D[$?'MdqLikpX1?<#JAeA,Al)fG.!]CD-s_Ua,+/LCJ+o:SM*^93D*.CY0A^#B<a&mO[2Mq8esn=W,eEtLci30-FK[YQtMWT54=])XZ;HUZHN[`P59LeeKg4<k)2T&qCEXj)?_&HTFo@r`sBp6[,T3LNn:2/3&kE;=g:.(UH3`[QLes6A!@LRj8:]U,h)=/_'_!j`(\ H!Y:h=UA.sclXI?ASLiA2hZ<]`!HI4AtQq4GPikAtU7!b*@aDO, 4?s$+&T9-69GP9Y^U%#@k`7.hBR5Q:s7:n8AetPr3XLSCs)f:K-a9lQ"Z`fqsi0$R,4t^AGMR4`B5smUM"H'B!#]&R-[8de^6X*:Vt]6ZU5(6:*$TNY_9nBPd^4GTS&ejf5Qdgl!:r3+P.`Vf$g#qhm7W:A("#<bVHoA5l^LAN @=:)PE#&1b;LF[G4ER<dT aaN_pEWe=<WidNIg_Cm*I&3m*%k<?.U8^XLB4il7 33F9)/hf*'VOfP>LReJ6M*/8L>="H34g#/NK>Ad1 \CmZ'F^F6E`PKDjC<g,dB]Rn%Nq@&CU_r_"+5=832 /C`(564:VIUVLcC@;G`2qc>)efPUQ&-XD]o?Eo%(9,N3NA#Z[P\rhcrL<84n RsP^.b+KZHk#;\'XYg'knHL3 gJ#doTCiI<lA(Zt`]X[S_4!d=JjP$*n6NPl_<h`MeAmQQQJ-.p6;-8sU]aAG#gl*/V>%XZ@alWl]r/9b_7@0q/TIMA%=L2AbG,3a)lo?H(]!$t&k?)a?b;gpK@tF:H)I5+29Z@Fa8^58D%0DO%(5564 (&RH(!OdYt9-C=.ZJMsfhMR%ZFUc(Y\\5P!gcVp^<TG =d k!i*9tW,f9'PY ,N@rR*Mt/]GBr*sno2/TmHG];M!7a#UT)53mfKL0Wm=+QW=:$n*8;Sc]PEf@b!.7XL0bg,],2MN+AZ_UUA5HD^:%=e,_A3Ep[p/B`j,lde1I74sV2@(L\_L/U$g)&4TH'`tSC2hS;F=I)=i&m4G1eA-$a8dDf?66rdS`Prp!B;)SlXA[N=Lm`o.4TtiFjSS):ep\pm)2,T`GYKK8P-6Bs7AXgQdpEs%3"lCBDJcEWZiBLm)*#P8'!L3M?,[D6AtH(#Gr`5Uc&=XLr(p:$97R'qr 6pH,a+G_VKBekG96_MG(`ps(p$6`)b#Kn*8Rj0Q@<d/A#SmpWSX-;Y+*p<>+5)\]*)N .1<"i-71jr$AY7p7"9T),c)=`S)N*B6:L0U*aW-HDXfsXVf&a<(l/tsMn`;`[l=;f&rbgcDP19ZEZA`M6]Dis-MCIcAO6c+p_Cjkirb!a@hLMC@MqCs2O`]B#rob8S>_U5OXA3pr&39^o:+e9h<W,t')V:Kr#7),^&"=l-iNfP?7jcT;/ZiA"'`s>i 'I3"4N@<qLlApX^qJt`Vt\.\=/QA^Id:T_<*ecck=d0&h/J\hBn6E`[D3[KW!mkdGI<[=!(1r!`NXLXQN l*>\J7Xa=lJE3f( #c!T]m0R<c$:P,Xh0(nCGc2Pk5LWZ/J2$2,aFQ8'$44__AEApbd)#=;YFi4t$aDo=m-VO\_C,=/'.!,TsXFDNePWVRJ`3;#D%`P7taS3N`rVBA2d?=`[b3(.@KRA82@U_TF^+A#KFeH/#2qiXKa` AB/[c?XX::Q4qemr`1IjR]5X`?\]o-( F]Ud8/Zd<1/AeMX7'MXALA.[EWo"A%')ZLIJgcS[>%].V"YU.*=4nA:rY`)W`4sGkr.ZjqmcYC0gq\QpDs<Jq5=BFg$j"4bQ#0b@%nsDE5Gdd6MQ4hbZ,4*F9HTgOPS[=['&;b<tm$,OahH?!AiR6;.8fL<+m%@H9k&=hLI=Y9V!2(p@>/%Qp5h1>ZaEk$iG4ddArO`raj:VV 0a9Co?8!$SI/:Y'1@hY75]?Ff'^? QFb4lTA.f$Yl90*85i)JDM)1n "UeV\ZcL6WMYrcW7"8A=OK YqMAG+\;q"$Q$C'>(S56?>3A2UgAC#$.EH]?mL2VM;j1kO[A__NaH)etB4AY.nDt0PlpQqVNA`@O>JJG]5joI]sKc[gAJ3GibCf`aceB6$AQO@=hcmn[:%9WVS$?\24 o>I^]qF01Sc18 KGW9[F]`I"A$rA]$6Q?OnpSHiaO5_q'XZfnP:UNHje<=W9eXhr+2qdXobd8)-FRk2OK?\GH<Z?V+jY'U!IC6$5"WL;REHI2kE@>PXt\?Wk`3n?d%5iZZ.fCG#MDi_2N&OObPNm(' SW:B8jX`%fFdY+_'f#)B?:*J8sbf$D8f.?<&Hh-37eQOA$E [(Z6"/H`*"%I??Gn<X&q5,@D!$gSMs/ :*&%b7,-&<;A"Bs3CMP]<&$r=Mrl^`$Gg"Pm/521-C>ldl[&$aZ-6gmUAESdC"<mkJ[R4J_rn+E7O4Jf9]atEa!sU3OA]#XlrMJo,JrZ%5d2]d5Kt<MD^J>t`86-WGc'm0cgT?0g.!InTDh,_;?C7!-SC-B f9t$\^.0oSHA94BgE+A2)Ca^"&sN4aY-`fF_N`'<!^NUk[.05sj`2-7AO'WBAAP1.h--3,AA';bdbNhfL$p+p^!7Tdn:k V+oGCW]j[IHOC9Od!>8''4mTaq[2kH70 i+EJh<,N:0<@Z`dsBKA/E:$];\j-9O5#`\Y6.'F4+AAI*L >.+Aa"smg1e-;U%U)M/>o^#f3*la>+ E/j!4FcZM(9RbWR0'hE\*-A68[F_G?dIbeH!+cS<,-m6L^8Ffa)]!9>E;kkaW%rds,W+a6mDnhJ2E?DU'f0OBb[Oo&>"-\P2'Rc!Ls I0cE'nArkdfYsfo_`.74[rF` Hi:9a(?&:SprMXm4Z#nC>I4SZ3Zoq3E3207!e`N7pL]?ALII%EcE)UcAsE.W2Hp;H9f.6+, ghLDk,_;h T&AP("mHq/_[9kfk(L0+c_kI+X[b[n.99_:>$f.%.56=4%s9A%%'/"LZ^Q)EZe,i%eJmpGE?Ys+0$FUa@=fjAQl!/Q.CH2_%2AX^<_Y:3SJ]rcrXA$dP).gd([HQlTq=]p7/]!LO9*@=ca0/Z3@0FIMFf_siXZa@9.G&^+ C Fsf!VNRTJV4"sdYt1e`A8fG#/1shHkP&:Bo8g,r^['#Tg?tFsP%J''$2>C)+>P`r)'DP&/J95^P fLEpi4=G3)34qS:,XGEEiEqS$Qj^Zdh7D/7C"8KP&(1=We<URP`"(9Pbg@B9-_]NJ)^]ZiS^XdL3o 6ns+YqAs/7/c3+Wi,kVL6IioV4,5s\A-jfBCQXt.qR/JRlL9B:]=&[f0^A(L91H! Pj?F^B7M3R:So;X!PA@p0AmW:_R;T!OU=T=I% @ZS/4#E%;gF,<k>3^kEtL4TbtU?mk;.VSrCA$8d\08>K*hJib__TmH4e!?G<KqO6^Ys`b:?/A"h="9(m?NeJ\'A=6i*'R+G(<"4J$giO@R.dsT#icqri$&Yj 1X2L_EbfKJJDg[5=eaGDWt=A>5SN_49jl!&VAO^s"^ PQKL%5)aA [@#,S0O&p]=6,mKq=J,mAB.<#*(_eS ii#NK#p\H>Co*SBDXiV`"%;o^=4VaGiDdCsB$H@\[  FiZ.iALhM>=p0R8UoC&CRV_OUDI5@4>iJ,%'G=Skgt#hMR6H)Ka=91GU[ao9UJH0Ym]S:ZY!d,-1$1"ae].!Ao&h-`/L"ej3YYI.Ge(Qce.Gn*QrSaX7WtSP`PKoVMA;KN3/?YreI$!KB'/mi/ h^$W2`BjDB@9p_JIrqFTq22\`WQH:;")70d"di`KBZOp"<:%U6/-@@T@:KiYlANXit!UJGtk(.t]$C#o+k;:WA&DgiP+TR1r[W2n=KD.9,<_F>i6`85t/+(]s:7J?78q*>>7Q' (Gta[p"f`;AAsNQntj[<HcSFF"iT%Z,P]j/G4lXD3O,ABJ\L3N%PT8_[='i+QhC8A \.X9$Krtr0WPSG`"p8 tU'AJ#e([Gq21Or[em.EB,"-r'+:\t:ZmBb*&( kpA!:63RTsJLbATN`//D$WXRI7,UIpoJ+jDc`<1tVGpW$Pcr#OHRPs6hbZSTU=mkPVLAhL]>@*^q:UJ,G"`'&/)?BB3]4c1p%P,_% +/?^@oPQ1?7o+ppq"@$J+cK4!GY">?J1'-1fA_lNn/[rmG1,_j4oq62Y?^9Q11P0E5'0'3J+tnBlP^]e"0kN3Ekc1WonI9-qsMs@b-.'/AACX:FY190e GgVY!hZ+ Aa7V`Z_k0E865">AW=1Dk^,,nadpT?l,%$1UUUqnh51&M:W,(UFc8Knc0ULMJ`f8h]Pb><?T;qgM] pUNi?FA*HgPa*Ig6rC W*>T*i_Y$RHG5hsO 7bb gM/A5A'5 5,*;5S82DX%QmA5fgNi/1&P;q]dsNU41[#D2D^Z:M&[;0>b\:7jR*e* _H;m;@09TTsFcjAt-$H?DmP*]l<i`33Lth-ObZ#[_K:E1oBKA\U%ZRW/$HD3JrZ$M-%5!t<7.Z0]pM.75r+4gI;5rS?sXMr`\0Dms:P]2@-S@b7hF\ N\EOW*Djph-em`ak,Re(UkHPWM1`S@_8=pfcP8ZF.\sl9LfGbh6r!*fU Y@Hf?V<BLY49;#"?\^aaUV<$#)%"2a4O'aQ,IKl.n;^dEAqKdE4Z(HOgOB@!\-AG7!?sUlS$?.M)&TW25Kia=0F6!>DaNQ=><7L_aC4oAT*BE50p%_nI),^WUrm>77Ah65?7lQMn<"4@nc]oi3t-A5qp*BD@e'&XCij9#C.FZ6\dt?YRb+^2EJtNkB#o$)'-j*T;Y!\55BHQ"*"fMtP2[jgYr:N!ncG>:B^p=+AK+8_oOY8Gd/%R9]$3`o'aKE34?h>E9q%iIQM-+L46?T]4U7`fUoR!'Wekf._+*ED9<%CIS:^tD+/dd>0^3NWop?Inj>Nj[%&qpVWQ<<'dAhmO\pAECmd2l&?<e U#5.)HBCb1A-KcG/MVi8]>2 mDI:dVSc!3[<EGd*YLii!/6_M:1t8>Z`'!%dlD@8pAKZ8!mkb^^kWP[l.[7\C)Ut\hHaj,H;QPBdpohH]fdK?O.A,EX'5g-6 ZS?]! 3rTg@NE1_r9>p K>[;D9OjYkX&b1g4#fGP? @VHt"HUY@2Iso4nIZM6TA]&Uc]!nEHLj2[@dUd"(f(l!VOgneeni],`S)L:dL!&h2`!,$F,=5MZA4(5*0m%#/'iQY 7D&mN=2VB,19(_Rq]Df+Y!\=HGTK:1J?rkOLae^5`JbE9dah/X,k\]:A2<imn=MVE. M/HNTl8g$m;($GkqHs&f$HhGKOfe?+A)F:2^73<l9KSBWWr#R[/#JXY*;&'R`Kk/#o9B;rL?Aj6p8DIeFmGefh#KQA'@SL;>h.Gginn.j+Mb>EXl)Xsb[NNF"Bltp.U=0mq?THM0*nHSblADA&aHj#s#Tf13Jt4@!6NKoK(d5.TLh)9Zneh4)7i=N;Ns?BA5kc"=Hc7gUeX 9spSSBdPO(,JX%a]"Z_fAJ!:"Ds9frG$$lIrib./&@O%f(/`P6E`3T@e?Tgo]rbc*+5IM:H8"GN"j"Ti#37eB@"h44>+B"gCU)eXFRsTHp[V/&a QO:W<^A$_:tU<b4#) Vc[$+>[/0"2>T,YTRNY Sn_@0#m\3"cqBtQXY'?Ial2BW6nhIA\cUc=c B^!d<XT^L*`eK"'/$fD(;\=]/OUWNB>^W9^_-2_/qD&1#nst7C-&W03D>ah.%AU2(Je.AB.e#GZFFZRP_GPM#E dNaY 0VP1D8d.hL-%'UnXRtLI@YE"@q"QSJ5*<S6HM31gL;leT&,%CIVCA<0o!l%V+.>.<l_^.0p59j-c,/D(PGX];F20MbqZS/71H10Nb jUpp<,XgB: 3]?!;]nJs]66fbIH^W@Q'>1Z&LKison[,3VDs[^MEaS]Sn[35_.]M@DNso)?sOc94@$QJ&533m.$k%T[B?%5:!;D95dCA<g8R\ U\YU67Z/O)%6r\WpY?+3RN+$c] 91:cqN'9s[U?I3^bo3"Df-2+&Zp*=A$O3[kn#a>t"f8(M/,$_8@4Ng[TBACtPE600l;0IMBA`e;JD.6+d_!,C7X[3Ap5S+JkPT-T>@p*HfGgC8G8l$Gt&;j qFHG47U5rP;'A(8RiEp@-"gq85#djA?-M8U8l`BjW*7"o#3'\$dI"Da38TRHP=P=b^Wq0LaaA]%K'8Ykc<M'RE1.ZAK=cGBNZ5Q-ehe`7C/AZIBB565C!l^AsDa-_H!W:e9"//Q23bDZ<7<N;/8,s5\C;]dA!A6*5F]!R1^_(jVVN>HJZ0>RVokpZ#)k4 $Ta( E*Tc`dt_L%h2PhE.5Al1Qj%)HdAUd<e1Cdhid2]iBZ_)Y&3(Vs6!.kH\E?X;EloK*@peE%tn(n-8YnqG ]c3_ 7];d4;IWQ8\2NQDZ:TGXQA<7n,eM!=OH6N6Zd+`j&PR_??4^pH/Hk]n_*<!#RjQ_A((2N9Kq>@.S4R8'+U'p172,a1$&m'*N+8iMb:A&]<tDCi8/#cSI\a9V4ai5LalmGl^ogSk@o2VRqGG\[8,@&4;kt2O2%qgLmo=D q;O )14-B0dlr>JN+-=Ad,"N$NbM?d9GjD's27\E<@s&bG1?me7&qFJjE?g)-^fUf(SoF1K_\<l-"OTQH`-+4UJ_n`D=m8nTN_t'3(Rm!*A@p;=5oH"qq4B[O;qM]t1UF#Bpla1KBSX;K;<?]9ce_eYJ;\1'*I&lp.jHTAeLc9tI1g/cQf4D_mTP2_lsaRnesN;UcM%GQS/Ctest/sun/net/www/http/ChunkedInputStream/SCCS/s.test.txtaA@;pe?>W!jN@`lf3h*-Ud%8Ua!AkXGOL%U]+4Ol'!5J)M_!`X0/T1,N^(.SC77FpSiE_rZ^N*g5Ul:XIks/gDdZs70+kKtZ/sdg(l6mE"akc.1'.6) OI%<l`Ea/YCkJe%/+MrAndHi4!CXC>Xoka[%<;iI!q<$n0[X\X)sJ>:.4trHQF,< =UMci)!lr1H'8bAc_m\8W7C0[fP`^sZU0W?$$.%XAB5G<%JTq#0L7OC#F"3Lb#eC?XB6:[2%PM;S+e6!8p[lA*QK7Ci6E)FZNRk'42FmIK"7$A NL!DG0!ih`'L'-@jH*PF0+^e$4336K>LoV!'YRt+!IpKM5o7&]JRKrL  qaLj\W`;_NT >)t?;[?')A\^oa518(VaT5ifW7i2_DFA=.XE-R%Y+TWiZAQ_**DS@<6]AtYN3qs=;"t+ni@$pS;*K)6BAta'lcX6kMF2Z:H:<sDr!kI2UsDG8"e&>].7)+*EOFhSH4S;R7:QU`N>c.S9B*o6^F01lSo\/NX3nd:7>9Nn0! 1Jm7<hl`RU;.M:_j%0I8 UQ"L<++5*<OgN2i&ckKCBMBP`(i<rapC'm;/dces:fHsZ"pn;n_R]e!FDA\Y_1CZ_j8_ONkb3AJGAO1)elqE_ZPp8E0Cn!dGq#hqH[gf:t1=al_H#OX.BS0)`te92e0:.1m[nN`/1Fd#c3'6J(7eCf8g^$ttjOKk'fiV?&mR%jHYc[teB#73Gp JFp<a,K3._$NBH8i&<?8dRR^?jh*$3?S'.kmCi_N`4'_gLkJ/Ag#gl&3L<i('OhGXmgsC*?$7*EJN('mD2LM27N9geZ[-nAb\&:rGJ;*+h6C!tp$Ys("dEZUT3T.79sh"%Loo[c3ajg1SJ1NVnZsd/ HoFS(eq+\/bI.&ftd*gd[,#:;`Z%<M.:4&l1&aY2?'_A90V^ahK8W1P!*CtA:JQ$L417:&]g;nnfo\`de080A!dBWZ%OZa^1-@&Vf=(4OSm'<XVgL&OUi[&ci(R'<G0iX&V3.l.a+D]'p,cJ8El/8A%_8o)#3G#&F88V1WZ-)=m.D<CGCrNMG[CB,m2<\1%6BV10q46LoQ+8`LBQi[_sP]5@cf"=/3]]0$&+3q0pF([9(%bA#mm]fR%"$,Y$T%JR4T]^p=&e5r'<H"ra,j5T W$opW:i<GJ)^6s2'oE-W'fiPn`aIU*@G&KSbM*NKAeFjf$I83:X)tL*6\4(QjqO"F[akAH];)DO&7rETrNsR_MRBhNM>c8X$<5`jQ9,r;qNsognA[a*D2sp5:J<P#?=K<pO3XA>lS@D QI+phUn!m(;kKD,2k+b`Wh\M3+1TY(, mAs;!2;NTN.U^ /L2%D^'L[QT2Q[;4OJqK]Ck7E[o2IM7CXBLW*m1?#s$* H"MR[3R:[/.gC\[T!`2h0oWM]*&K)&Z,Kh(JB1&.eHq!N[M[AA$"Ee"IC1\:j7:DPn/h):XZM\OPG)o],.9]NghLT$DA#5Cl+I# V%t[!l9GmT;)8AOiFEXoL^(2'AVZMN#>0;9L?,JF(gifpJ_*pOHQ3o0:dC>aAU=kJnrVq_:;m5r9BJ5Y)df/`3nlE-6e:^i!!7@!b&&cK24pdA]mdr(%hi@YfJD1YJZ(Ddrj08G:J$iR%o\o@4aVPjB0h59$ek;]W)hK1B1-(^=.gD^<1Hgl8LmF9M*e:B5`Zf9!)IH%8(l +VXME\Bb>IRe8JEQ) XNm9A8j5.Doi^+%c&l;N$LXq4]U/fkm$0$;r.q7aX3)I\b[`RhVg\&@]Q.n/>g 70`Ugc7pRQ*LplB&QOro5!!g@8HMZB7@,8RtoC01JED6o5:l_/e'Q(7hal>EiWOh)`KnQh+h?5r912!?'r5'b+(Y,[Q'#='!H\( _`CBR/d;`6AUDN:%d)@n`Ag[,#mPGYWrZ]7?(NRG'Cl9$oCS@^WW8fL#po4`[U7&;&#MQ4l9T.Pr902kA!LQ<N+o4/=.N[=@dpWJXQ(U8 ks)70(nRiF&HE<1<[YU9Ef/ 4+W:0o4]OUs%@tWD!+Xe`%CE:J9;As:KGch+ORBct/"^#oRJiU(*iYS`%]_M;L#8f$N!*,i4DD3ed'P_]VAg4 oQ8QeV.h4t(=':Oc,JYWD\Y"?f1ML^)$oh@k[O;AjZ:Qp[\/LKUH9M1?N`/J"?Kih(RN7J.$.]Arg5n;_M8;>!A026pHaLYmq2=C"A%pr1rIoJN:ZnX5ejVhokNR_#X;5h&]m&_<H) f,#*!tAX1%mX]W(\%!('#QAFpP/nAi7U8Z$"3i:s"2\a-f r7D])pAqsO,JJfT9PM0f$`96Vce6jT_k(<Lj#$$=G\U3\gS1A.9=K=#\+U.;SY)>Ckkb39/GjZkTm)8VkLZHmj5%UJ#G2ngHtb([`WTN$Q+33HbF]qfKqUmRZ3H$m9l6*G06>+h g^<fc0`X7Y_\aNE]]X!`\pTN9=t:br\LS+kiB7QJV=NV[+E8H<,n\B,*N4d+@2ne+A@ )fq4\\NZcMne*\a%6hFY='!%ZOcneid>`NWdnW*Cg4kkkteOf*aOTRrN-:\e#%2m^87^1[J9T\#&ASQI9[O6gha2]jqa Id?:@^8]ANc>@*G&.(W!%J0?U\FJIrV&_CKVR))Gs@H=0I4<i-`B<OGk)K6L*"Z;MYqA+n5,0Sb@P5VLoGcM+/%\_c%-+B9,@+pa%R\/GLC2 _'cH/oj3lAP8lDp2?C.X2.9Vh05c@K8m3/:%$XI%i.1jr`8(rRLm<KK9C=5>f]-MhcM';9"l;L>dm-)P9fUJU@G*`SE?AVGn>AkA &gtCe`Q_-Y`I@AZtkJq!oD\aIZ5H(X;g<O5U]Z\hpOjksfb aKpH>9K2fds^;SA%_J"<dnF._00$)+VG:IR:rUNYf*?e\0*KBfR2beA&JnC7n^(b[*g'ZV^Ah70:!+*UO?pAPZJ A82GEHfAkI%f=5*?`,-<Z+*@k]F3=-<O4lmie(C\:#n-OX#+-l3ENgb3W=88A88&IhM?H<5#5:r?r2f8+WqQ?B@*rUV=DW;<ARrQ\Y)TCX(j,rPW8T) PW0_h.b:,&>Eo*"F+F41H,KleCP717"L69Z%5(Ah*j%:q?HH.maa@to2VqoQIm!5CoXk&"Akk>%/['&bP\X']n-A.)q$1OA_9C4AqpW%f`U-PrC< M %6DqGB<QWm7X@O!flCOt_B;2lmK!KgjG*'Z`_.:6_9k:-"ZV?+KZfg$<EYIV<'s"&N5<m6ok"iFNPZ3WJA1AJ*A]#Rj&pC=hQ*?2PX7Og1<B7_>\_bYLtdAd)L$+4'd+6K/.:ANU.5nk<5$9fSM+,PIESS]a'Fn"=*qg5PO-=bCW7EhL6/1.=hgF.JkE_GPMLs7/2WhW#ZYGLY\Y 'UTr;M7Z[\e:JIEAEjr nisP*ZAD%%X5<U^UZAQthgV]=YG:R>bLSMC`_0Vg>hbD]l@8Wj,;CX(K&3XT%tFR<>F)'q*#R[bcS7Rke'E>_6IV[g83]4@QT,&,. ^7K>nL=N'[,Ka`2s4gerT?c1>+%+T9E6_""$NjT-]_A:C,\O!<H95nQQG>N)oLJN+cIh>7M9P;I8f7L:FJ`h[$88Z#".tae"1(Kk12CWL:mUi@[/R^6[Fl?`m89[Q'7MIKRtbVtq?jZOWADLb)5LfY9NDQlo[I'jbtm>1#s;n'F)J\B&X$T!9sH@4@gBL"KZZ3lpNV*pm%E,i]i5PQY]K[8M8A02L]4.P_8!6oBbT1e4U9s+LmG9BX1a1SoTJ!RArs-M+4oHkM kmbd?dm^IE$`>fCid0I^j_FA6VAr11+Grl$h=>6ptU>V.D$-Go<;AF@[>3j)B^`8scNE>>Mn-B>,d"H6>3]6_[`pbI Tli@StiR;]&^BmL(.O&<D$)U'*n7+D]7<Q :G`sMgokVOLI[nq#W 6D4.1%<PE/@nAYlgm& 3hAt_NHU&NVZefQREB6s),R:I% d_;.TZJdm3T@A<^1 >ll 7T:H4n21BRe?`b,Y9@G=BC`3%/f8^#S]K,AJ]Cd>%tA?@JhO+3jjb6mg.[lFKH_N54JI^7n+.R9()PAAj',,kea\.,*heik"i@KE_Wd,=KmhG]")2:_[JOp,G[C]tGs-d=!_7L_p<MA5,rL]jb*?p1cq:S$NLHAn)eMOLqiI\PpeUhSl(G^-4H`WVCG<7]Vh4i!U_Z>b\'kZf$1I'2"0NlS5XXH\hW0+tL6tIa8gWqDQa6SWil*jk4V31KfU#h!L-(2kYC=6RZSMKj.M&W44@Wikh*,8+A/k=b8h5/*O^Ei T.X=b?\bmFe4p =Y\%/5Ad:(39>AWP#ASmINmgl"gP8&^icBi;B?3,qR,UdC*COaV^0r9^j'J/g1'^k gr`W+8\reRBgta9P,Tao#^njl!.aR"nT:YQMp[+;sLi9m?"h, J.7VpH.?LB&'75J%9JoR&iS@9[rK=UldXCCraQCD_O("f>ahCTF/-Pq,e*QC7HO%sAF9-@XEV5QAft?fZ+]j6If9?D/*Xq,>H]8qjGaZ,4coEai+qPAnrsoL5!OCPl#OQ2Qj9SNTT5r=@-ghrL#+Z?)f9Cn\^+VT4['%"0Apm \ajn%s;She/E2cGUe6S7HifFtC\C'f$Drqhgn6BknpXP`&%lOi=t-aD\j.X/SAW)7AIZG&UWi_(%A! I9pLp8hgsS>;Q;rV5oG9dLDI1=[E9K19`__]/68q4[nWZ+J3Y0X Xop\8bDX.*#el,[HVfpa#`@JL-`dp)[Tk#.:sZG[J>9<e\W=5V,lnp4i,<g_cmqV2O;)W5R`64U^=Ad`f>P$:LU$DZ--9Sk;"PMja,,b+r*i@qMY*07mWpi@@c*@ G'%setr`Oo i$*<)K6UY/`X<tP2qM,;.An2aS6=$B86<"3\3!731(Y<#tL*X0[B+#BEn%F6lHP7/K$p6 bH.PBt=V"Hj!-T_ 6@Z=drG0j5djnd'BD,"*5],!!W[4epUO)E7\jdeYUbPAVq'tY<R:EjX0/EnMLIUffM0"TB^$XPVSGNnU5K9XJs-4JnEsr`@4-7931N%+\gAh*>\3./4W,<S/RRGEA5o)#Z6MSgW<0TQ&V7f;*dNpn01YprAAPoFH=&1Ta5([9[Q8#D)+Xq)AAqE+V(Lto<RfAJ,e4/"RCUgIcY&g,:IiCP&i]?p>cg])P.^>ENU4ZbY,p08=b9KW;06emM0r2D]rk7*eq>]UYAQC)68bl4>opCE\@Cia?Yjh'DNB5e!*%JLf=/:.eGj+P4?DC_Om=IFNsJ`Q,R`E9*te+:d6sR8O$k?=rhKA_+]7Z]$]6aNKc[mmE,:FtHn"Z[SU`r.E6a;'n` .6L1QO(!JhXaMIO@+Qsq)'+DD-5?B4d$!Oqb!;,%/c=+I5tW,Oj<^a?%TD:r%f_9U]1t5js[JC>f&O_cV"rIgiJs^,mD?<?a#[6G(U4b@q`T#6eJ6.Ab!M3#lmO?hsEZL*(dkZ]c2=tLk(,oE6jHZt&N=UMKf'-,$$)6 #7**I>g(;(_GAV)i\M)[#kW''Ur!O*kP57AnnHY7[47,R[&9SbcXW*BVr;=mr8+_U,nUr0I_r^@ br^=pAgHqka3BEOph67]1U#"j3567IZTC*`oGdO21Q3l/V:m7$KdVE;bV28[Z#G.F^&cISNhZ82.emT>oY/^rs.//X=<$%]BB"528)tTn4ETK _5C_IZm;"&."APR@,;=JGk6"o.[?<%Tq4S+9WO,Y:aB#0J<DfKk(5b#5sT<s>LjW0#Sm(\nr`o/sP3Ql%&%j&<>doic^smX8+27 '4\1M h_0q6)99eo^tAm^GH!C'QF`e5r:ns#*bFm7-:1 ](B`E2trob<6e`^FeI\XUA9kMIt0'M5)T_IoX5pN`2C*E#\2AcR-Ns<Np*/%X WAd`q_JP_gEEM/1(Do<C8XgY#Dp<Xm/%LQ"81F5=N2ip?Wgm>d+ pqo5=;0JsKDQh+'kN'%;"o%:ChHIN$R(KD5)A7Jc[eHBs'!C1>3m(-fGVYj$ZiX/Q7HArA'Mn;8r.W]i6m8/f,>4>XB@MJ!Io(o'T8A@s(?h`KbIGDOkMW_XCee:lL;\#@1I:YO8,p&dMm]#h,*B[o;-E%qUt_O00%l[_BkDt,Wkl8g\6@:S#a302_O>dZ>*>8)Sat@-\/Z)ds3.<cK]r=m?rMJRU//=EbSR$cY=SD2([/I/X\8,[noB[3;ors H$FV/;4C":NeS_0Q=R$^cai6:+r^/&Yl?BQV,?edHXV]5IkLG/CFp[19UpnD9sA=ALs3&8WJH-Y*r>t6]-a:eLO/0_0tPT-Yen9(*eGfRT@F")SfYnV,$?L>%q,A7NT=/k'oh.kI9f.4kQK S(%_X%`:"%g0/]-iEm&[U\ARk8N+U3bl4(Z]G$9_d'hbSoK#bYreD-cQ"]W1T<lE#l1]>26F@^WsI QV,cpI.<WPTP<L0AWn,X.8N826l+S`=o^_M"<<ALZA/7J`TA+CmW%=Hpp#G.LQQ7,-+d\X_;XKsIh:V;?^EJ[CgDO=W^d5[HJX^sf<MV&9^EGckgbcj>iGi0/^_H5W^PS<eaIa+!`I#',0+8EG_45qFL6`VqR<?0'<+9K#6@b1iV5lT=n0;")M_As1G#6s;+SNtAoM"@po5<'RV))KZnbo#=h7%Kt1C9glfp7QdoYlP#O5<ne4nA`qaYG(p6W#-<'koFtAo$aZ/iWiDlbLFj)hl57(eVgAqV:r\VX@J7?Q2dK#4JmaK6e[AKpS\:Yc=Wm=Qc*a+n=HP$8Rca8V4""'\Xqk]Z-(ER'AQhAgs'qL4b#R4#dfJ9ZI[Ml^+ V5Sgj&!)!e"R'qLoig$a+22Xg$%Ha]aIbfPgUCH/Of>etF:UY-Q4GMPIg:1N-Ol;P#iQo3i4CE.]eYi!b.HPqP8?8rqMK&)%ltAtt7I[2Rc,.F3()Il`)9B[FH0NMiF"imOiBAAQm??^25+I6r6eonS@9&nM5t#5HMGTm&V?EMn(#^6B(X%X_4*o&["<gpegC;NPVl<q!n^mh9(G;!9A44i$aG B8._?n.9Kmt_Zj3VmR2>iS<lhfLWQ"r\(S.,[FedA P[:eI.[7%1=F9feY+/-pqX451L:lO'FDF99QQr3 N?7)e8n=JR*B9fd'CSUR],Qm`e@9#bgTe$TNn;aV6#'!Fl?D%QIa&S7'dg,JOJDlp@ZBl;Q#_-Tg";T. AT#NRW;.50QJIm9aZglf5YlG+?*"h7hZ)^Mp $`9&<2X_i)"snl(JEW0[4)D9A*/O].#e!*V`YXIe<;[*W !UY6+;+gMZ*Va^Ik&Q;_`Tj+S9S2b86$*Na;?ial(-5HWQA'T;rp/kbE/#9>mAC77R<pmk,dTJbl*l+&Dt4*$5l+L5Kd@W=@@r)"YYeK)RQhDhAWtU;jBHjg<n+XE(-4#rZ/G*/Ykp\nE;kb7JZ WdR86< 8.V=_O<HcE8cdW!\\&VFIM=K9)ZJ37I<2SNB,K`&9FOIdcZR6/@-Ykdb-hXdSi,5[#gb(t6Nc>7$h<(k9T(XHbMA7/&C:WHXsLIsj:_JJ*Pjpe$sh@tIp:F*]"#H8KFR=QiEorEFC2VZhO'(\4^C9qN<UQIrXifg19elc4/Ebq;8^U_o9Xp2_f(SMKRC<8Y]#)eI':0-CiAtjdVV#kZ%!3Ja3UftM%_[FWt_ =*X7()b^A7B]M]dog.cq;b56+MLVUB,fO0<,>Qh,rjGm8[JAmGAC0V@=G;mMLn%Va\ZK>niK0tYo9E7\)kK\U<#6(oX1Ngcf?Mo;dB`lpDI9fVs2>#Hf>LXnq7PtYp[8>:NaCgFI+Z@#As?kokS9%.00\jN"UP+F9*5dQ:ph\)b]Lr*Z:E)bkl q8k1L3mKlc\70&ZNfp7Xq&3]dNg@[T3&/p,id;XI54:&`jOVh]U"Gd4Y"gq5)\n4f!TN!A#lCG:4NV< ,%p`b<-4q\dt^n8b&lpgM+dc/HXAsPgFABAakW!5h38tHVne0prOK@GV+Dh;.A1#;WF5Pb &)A`Has6C#0[TASJW&\8JYpDTHK3_DE1RTlPjRrcm;G&I88C2CrFY'Lf;8c&sr&pBrTSsN-&LTj%\-ct[k^\Ro8JPP% J;Pg]p&8C7`CRLg:cd+T=L*p9VD+'[R$EPM"iniN=s(57WT0eU$K:l,;rATGAKKE39d=`!N-VEA&5[b!MH?;.kKP"XrkJKa-\kZ[SLD5VO,Z+J_lNp@2\HE6^^NO%l%+D0^c9>j6D<\'V;;6[8hd4)2r@\">!pAjXRrKW_:hPT*039!&)D^slWa8AVt35M?6^?8)XTqI*9O9$SGsX!cbr@t!.FSnIGf"_TA(FO)_DGr?Cm"*'hhOkF)C2fiNoDP8YCIkA!l0gOG8bCXsn.F'3=9#(A#r@K@Uq51k=@_,Qmf4;^p7XqnGH=0h#]p](HJ?Xj[>r>?U-4ZH_iR3c;LenPcIF/4\*.ORidj\:Amc]oJ.XFN-<es"F#Q3IA9>aNP")GnN[r.A9qFK;Uc;f<$%X>U+eU]\#pj=Z hqWM:MTq$/&N;eS&<8TLd:&Ab*r$I`^Tjo9Vr=O)->.eWoX67"jKGNfFA%?q-'EK=8g'_3k:PMOEKE66cTU99+#(D PREH#Xer)2ZO_:4H:'/d Hp!I)nqOrglfos*[lJ#rR&]ShTa)$W;ZKenVk`)S[8@?B*'3dHLh$9&h*XmqI]Vk#@AZ"RV8n(tC#LlbRUJ?(hjnf)ZtlUY[ sgHsHKC%1nREJFq+@D.nb4aA!9pS?c$#$-DA*6GQ568LTrtQOT79!lk2d F<V \=VL),,A*r7Z`\M8Tt-h6b"kK0iYt<3jNM1@,16$2'fZS#6XF?)@U 0paO$6SGHL?aq:nOf1 F=\]]8HI1qMI^MVRTRK8:rAMciqAYgWp-;V<*eim;N=!c,ICQf.5jD(U*Q"s9mbb +kppVO`fBYCr%5DOEt?.=8dQ,^JA)po"`]Aoc&dVOHI"D[2ZQ&4LUl:-OA-LG.k^Yf0cfJJP)jBF]0;QV_c\^J[AbN6.t,*Ya"nlm,m%$:i(LURoO=R7C=*pS=!=:El3k0PA:!Q2YLAiNd3OhDTX>Po5@"s!WY7q`=s1(Q^S90M#2I/3pHgkr=3IlK% 1r\_'N\1cEU!;668C<oAKN*1JToC/'IakSWG2lc`DA5E ^eL6(#RcT3"c,Jo5EJ&(9hWoq.];E;6#Hh\l5*es\7r5h`O*&,VFX>"1-W-JJErl:VNKbji+2SUDcdH0\3kh>c26a1h#6$8AV3dCAk<5EbS1Z 4`1CE7a7ELnOsLFXTVF9S =N$7b4;a&,7cUY2hr*P]nX06GrASL`<\03eZ*[lG`_k`i);at3041D"Q$`9L]YVZ)Im)IX<\62,DmYMR\HS"t?]o1:NBtH=[:>b#K&Y,$Qa /sl][0 &?pbqX>nU?)1#?jkC0)^$T;OIEGAQ6RT>Z2+')DgKA232na)2586_o1=A>?mBk.9\J@isKbA6//kpEdDbA]pCoK$f!%#?`e;Y-;d[<`&+=XS\&VAoaFZWlAJ#N\)4E=0tMCMKXk$6 Qd<#5@rNCh%7bQeAke(c)Vi0bEG/"8`maPqA!RC6D1 ZH8ME@A_[flBH_CGObF\AAL];j<a$\sCpRQXhFHJ=+IQ'Ap`NnY?@dpaXiO6'V'9=]XA:A^a@dD&9$;GmjRT <5BcG=72X#SW_-jm8NZo *bf$HDJT<F99b[a`]5`Aqt$%lr0[-IGP1m<L1tElo@J567#/\[)q9J!$q0RcS:48tfhg\?8I9B)bd)qqZo-;O$?! GR6O X7E[OqO++"X2oFnYr5AZEFqSNi!MAAJ4>_@Wkf?hM25%P6&fg3/0&ZQYE4(ABStJ'qKG&=YQ"A=esLc<W!hKDoiHFYa3?9]",+Q'qDf\n'qF1*Y3q%.C;,es+'SXd^dCto,X'<ZEel$Q1kos$a(n$G\Iq[].13;`PA4'qEA?X8o !Nn0!e5D<`m&GpQf?AL%/iV\WZtgl4jrC=(5eOQ(TAS9rj?5%,]j3>a`BO5b\!,Cs%U5WTM@Xe@59in0#Wnl2CGU/f(I.nnh8d9nL:.ha],dL%!O&(;Nq5MR]r<pR\#Sld##DF;Z[sJ9eGCe1\I&V&qCqEh8h*ttJpT5X2eJEi(Kf;)JQ`%AWI4g/9.*=?H?2@3-:$BNSWXGXdX1)$-fNk/7q72%GgVXc?7.%-7` Cg2ksnoVOp#K0K1'N*1Die>N:/Z;ga-Rd_JjKB)*`_4T'2AY=IC:d./8@WSU1) [8s3E[W#gm#Y]g4;[r>XD`LY)lAF4[TCgf$-_4' 9MH"I=N+!+g4q:;4m9WI.n/0dm?!6mW,,C==\O0^I"PPLZq2b^J$q?gQG-16I<rW'6]$dII283 UUW7V6D'[Vc)la!]N:Ym*K'^0$U]RH_N,U:]fChQQ;\"8\A.Z"\1-!mP#3P"6TY3:Q]6Dr:&6q:N9lYU2$QEtY]fR3aCR5P^#hO4:X#--91cW8Z96HVZ$A8P'm9p,8-2_R-Wsh2Ckf@4+@Ys(UoK@*CE(U":0=1OA_.Rqn<taYeNB30THNp#c-7\ZZSdGK>WAdHt5a>4J/_ls R#6\J5$X]o%<%'"3 `A*]=0>+@dhpC!UWb>_+;@E;ka<HZKR8T*,"<7J060U">\a h9Y9M"7Z_IET>)L k$d<W#+Y-)D,lI'Yp;_ACgMEQmhQWtn!$)&9m(F4VZDP0nWM,;dqgGt,rFM!A[#JCK!gfSE(h1im]f9lW7RVa<)(th@re]`/:Cgs>bAaL9]AM>As=oO(Pc-D5@p#<'NM[JQqZT[P=BLtscn*t6C+EYZ_fh0QSCk$LI0(a&`:jtpm6^%=L?8?]\F"P"C](I*(^fSr'$[gatV&o:&k?m^:PMi.f?//`>jOF%`N/ 7-7N>d(R5l"q8=<L2b=?Em0OkSR.3"\rG:o50,sp0GhRrS$)e%Af1^11B)^ER^@mjZA2%`/T]0Yq<A8`;XtM[+[,\S8"[\RLH/tc[<%6c mrAN"CkL=bVE(M';(5.Y&76diNr<PDD1ik*]7sprI) ] Pa /l.@O0Bt9-G'N[I(QGYdRt3&Gs<QgCJQ'DU\OZ$[8NVro_<T"8dt=Ba&\"MjJT[&ZMZ@'7 sQ.bMcf0trcH],A _r^A$p/*7\mZP"*id I>D^C0(-M&tddJCYL:UdMj:pZPmi$_;/0cb7H/1?AlmR(S62#F"e,&CpSQO[m9[-,&dal@DWZ.7+H!;etAsatJpZg,i*okI,S;8Ef`rj$F-N\'%=DM\;$jk (Wg7!2531OQ;Sj$^j^m0jU%(l]j-e$RGirG"po^71[Ie9h+(OLAU9a D4e/HbsAn<P,5@RG'M:#g/eVa@<@L0,(7)X7<sVT&,R!0V*3c%Z8tkS._Io(Cp^)Tah`r^6FdQ T8p3fXjBQ82bbqh8FNC`L[i:O8CX#c7&NA5p%Ph7Fj=Jg4eTV4_8^fBm.:L7Cqqb^>e.d%SN;;goAQ1_`SV 'N*3#kBs 'TfNp`_JsMp$VYrcpIG9C+pV7[j4d,5N^\EK.q1ff!AU9AQ$44B8T6Hg=T-E%"VjgbM%dq d@13`UgbS?#6I$pdl=Qb"p1;A-Q]0Ro]bA "k&5G(8>0`#6)JtrC;_hdrU@CBBrb'%$C=ohkmZ0Q[\1a4A'8l)nA&o0#<TM1aXEGm9.%BqO^dP(H&?q%?'FKQ&Y0g];8,Y-5c"f<1V.t(%#H:;ek!imGA"`(k8+.ak^_CUl(T(eZ@^;sf0!saTpj)5peDp?ZE8?S>n'=3!iZCh-<:f'ZOb0<f&)cZr,!/r-]^I<cQd7FfbkOOQcmX@p`^^4;%OKtX<pLK 7s*r3W)kWFDn^J sjIQ4/^Zj=&7&Z:\&+\^%4-5bD^siO^5:&F9d'I[im#fMPDHhVJJTpJ/sAabPtV85*Qt2`_!2GePMb.g7E+`D6)]:dFAYm)>;C&mAF\[7(f7=KfAhpjlM7RT)%=jrK%C&I8J2&E8,+c"mag3<rUK6+FR@M'=@L5g.t#$k9Agjd^(-I@C7Z'!e!''k`[Nl'km5P;;#i"OfWWtNVPKpZ+6QS%\3l;Ot;d+NM,=h#Nn:&]UFP2n0_>"re'""-#rrA?O _0G;KM2gQo`HheqqRGAmAm7"I3r2WYc'eeioojXT"V!hZ3`D\Etrgo-O,VaX8UU_#7GiCgsXV%-PK1(JSm7R'&eN$kQ "#kUArkU4lS_VQ;rVsiQ &@A`csf1E`*+EjNC59F[V\'o0$#Rhbs)><^/Q^/bqL3Z3k. [Umki)clEnQN\. s]S+8ZF0?MBlAqgC\[o7F(1WQ CkHB@F>BU2f2Z>AZ694KkP4q;tkSKaoF\RJcB(+-92"7=6Q_*$'[h>e9W-2,'"E#:D .TqXm'"HeM?]3-jc*(T *QET<^]bb3m-P Y#-\Ql'WL\X5r: H_BjbLsb9a4N"Eb/Em)"hc2XLIKrp.QIXGDL.n*X5A6:Iob=-6k;SAQ_N'>e#C<DAeL=8_GZQTE*brM,-o&FALgF@G^i+HRLB)A=H,.(U5tW_"F`>\G?BtOAd%r'H8aX7%5C.5iR.`,Ng0CEKL6_:=UQ7F])d6r(Vo,^O6[I. IJC(Z$kS-sBWMT(mRG*::NS0$Oo!1O<e!7hE:R9hJY6IE?as"^YBI#/Cp'9YGW`V*c>`IO,&)k%lQeG4&oQ*2\aPM8]Fka(/AH508dooZB@\\I\5;iGUNH^;S]NpcJZ>j!EF>NGe<i!2Fr&"T\cMiC@K-h2\g^pTTPl%X3Q#2%t7H/cBGACb/q%>rU>On@+&d"5;;Lt2A=N;$S67l*ZT@Q%7A&t&c[&04&C]e3M;+!:k&ZpA`B!oT$CJ:IJ@Zd>ki/!2_-9/'mki_alD bhQB,nUVA#7dW^8EB8?R[o.<B^,*,^7%lf.LEp-pHD>T(A5E-?80 DSoTFEi0<plbV]/C]#gTHPoP3.+5kF/KeAOUeW>4?%h1h$HL$L-L=nTrHlLAj;q^A;lL1E\XL'p/B5/@9orORMon>Gmd+6B8_r='+MABWfsqL:ipbmcc[lY!A#^f;AS\(\;inAR$^FTn<6%'<rGA KY]PqQ(B CG8km(^4*FaI#9/AX5dqWkn:)DM5eNn.iLnl>P3dckS&]nV/q-mpOIgF-6\kriesO&TM;<HBX.]iUlX'D^.Yn,HQRL08N9l7)9F$,hop&[:;8e<`k4."P?Y,l8C$Ql"hCsEa-8;e!n':))D_+qCBYZPDk0hci5ZoNKr$t5=*U[p2gr^\Fo[R.]T;U7V7''2&N*3KSFA:`3NcmBr %A"%4?AACV9YH`H1p]3S]Xk%npQMk/4%)t= "rRRH@o)9g_Akhh>B9hX.[d[>qbd-J[Bmfbd_/j(OVKX83N8Df"4JX^:CH`\fT4,')p$T#0XGfAqY`i]/rm7"PNQ[e6J1OD#LH$QYMMqH1%QL.[tW $etq=r$F=YA^L>DNj4Y.?_C?+2WppMb^:j!.=gl1/<Pc"V+0mjfYF_<m!0t\N`k[kSTP:Aha n/<3#"2[Wi'^?.r:I@!Y8P< 7seCS0OBZ9](A5?s)Gal-3UpJQ**nj.`b^1O/f:*HG1DoD?:36F:*D0U];^Eg+ PR(D)['@Y;0QT4L>:?pBa6#tZltcS)R58;MR09A#p!Rm%RDaTT^"o@-C@*%<;)q%/4.W)nB7^SrWMB3l>$3`UfoUkbgRW>FGK$M7c:gV6G4cWcj(+&BXnc]6Tp+QP%.RW^'-d3Pq! U%l'j26D41c H*(gS3NiQIsa]$Ion p8H3nr*>A^qpl.W<mL*:J.4RE@\g`HXc)go QcYk.T"QRlV9XA@N"ei l=HN</:l,m>[6)\-o+RU]Ydn%s.ON3@Ws`O1<TUEH]Vc2erbp.GYs"B'0K4__8YpA=E6+R1Z.Ad27;$!T_L\#7d%;4c-5T:D"!4h7koX+m2,teAq/H<]bBH&@.'hi.g8J[?o1E.T ^$::r:t&V'oE`)F$;jbOnA24MY2Q59ms5qYI`Ai`P&]'V]fhmi6KT&ZO:1P&eeUZEK+ 9\-00jsAegg\+0;E+F)_A*ptF,?,/dVZ'FW>*qj/B5AVF4W/-1!ib\(B9@a^gIpW;e96\E*d0'bmpb]\1)22,D(jn1<Qme[^])fZCeD>X?ZEglE[U[9;%D^n7Ad_P7)iAXj@tOFI[@4=>>%[g87r8T3+Pt<)4)5Hh>ODI>'?5@APJcV&bXH*AY`<T$(Rs)a.+K"EHpJ^-Q=1pk mj%](g\9<>#L<4`0ef;N?<EECbn]Ub`W5%HA?):HA+_+cRP7#fimbL\e5SsnXN'Md*m&1#X%1?d+D6pNE`J21s*Son<O)i'jcnCKA$("p+%ZBL3-+:L=1Uhi$oP60JN)T@Q<AM7]Tss+q_V.d+GjYG@[9+D!G+`jS6_sHq]M-acLOb3+*!_gqp;\5Yo/P"joMkrZh)L5fA0W-_^Q"&^lDi:aWef?+d2p>aC1 P*76q!4M&T3T-/A$njY]p"KSCla\=(UAaWKAi@>iViK+" `]F-EJk2b&_:6OZo9rJ^!tWW/OFNC<Y?I1bK5o1N1QX8]iAM>N<3lF ES=MTpc<kpe1G32)[*p`'At?J> S-AnSDq0'Qf2,.)r5q!MQd03c8g=VCR:)CEe[2HVNg*7p'+Bp3^i#3HjDQ(7i#!!XSAmt 4P-K;,!k9iJ ijHq4k]LZ %KNOA+kAN/^X1fJ"2I,0#<1mlMX59[XUg7ZEm,$GRW\L%LW>Go/qCES.A-0D/fBHd3R("7&n5[Vi;&q)4Rt)js+YEJg^0 &\V9A"KD&8r6t-@=$>! (2aNc:5+I02R3qS9Y n/FBP%$;&T8Tc]_X6p7(;*D)m5<#b Noia*7V>Wha)9/Z2i VW;S"8h=LsYpW;%_.iM3=#g`L][H1rEKIPHVV->?):`8!*Kr2<f)Js4rEP9p>Tl?hq!LQ;\4=rAfL*Aa"RgH1_lR&S?F5HAjP&T0PIg4XLAYRAh [a1Q+lkSA [("*1BWR+$W+i&G9q1][oPdi\' V_1[kqL0K3ID 3h.MCX@V<8Q3PDP4K@=]ob7A)cI*6YOkn\"R=o2@2rXaZ=h&&8SLr'2?8ht `2Ygj?Td,lsoe`b9c8hMpQX&,:o=A``PAX_W;(+4)UmgC.k%k1`?HfBg(.l4%r2 b;s)4q=/kU07;*Z!qJ/3_n$j9"=@p<([2LfI7M`##'[*meTIHG%AZCXWg\6SUo]>aLFk'&#RA5n:oZ,2raU7dIE)l/eo6S<6^EL+tZ*dQ^@m.nr6F`-;sX'@'hOcoT +D)Sgg(\5B/7%-ckN^1gB7<M-*YI([hqT]s^4L7+,6<q%`aZk[8M.DOX[XBA3q350ViQ4nAsbS8t;X>7WogW&+=4:f"d."r)paXl3>HBFX6XEGjLJ3*ChXJ<s&`2oCRXeDTQY[OLD8`t-H\f@\1t=cnB =0PkQb:FM4tc2tLJ!OBR#VU+#mS1GI:Kb2L%Q`"*Q55c5H8-VnAB9'>S)k.IXhHZXITitshjDD;mSU43qBRH!,MK$AD-&H 2A"H?q!dj^rZ9= \TYg;[G`SU/@>A`0n;(a@Z/2O>^1=5);qCFnM*3EnN-rSQC ;"+$BK: ?ZjaHJ,FCq>R@cBhXG6\Nf?Yt^99f!pI2e]S1*\p)78"LIY;AY+f5%hU`s:heEq;At/0?^2n\V&=pP5/n`KPVO9UFmNTgdZ[ZgVA`\ILfOfA/$\DXq>@QZ/1AC:73)shS_[o;&%iW<;^2M3=hU'@TOmK>A#)OW`#A:N^@bWOpi@[3st>adqdQF>5]CsdIlI;JrleEc;"dtUI.iPA6A:eTAs$G^oKcUJ?+ct4idA+(`C$H%M?G42c*h(3Fa\7j^#U9(`34N8R1b2qJ_dr6dhKdU62W2L%];]8MN@ipji.6s*fh[Ji@bJ3Agpt_cIBU89Tt('!t)B'J`3/,W?NaYSr92X9t.n=3A.djBQ3Sa0p:@8/oqFPYh(*P42s" 8@:1D\>nE*sn`/"^ATJS3W/?atV?Bo&P8@m1 HK;AS7+K,*FEm#<ciR%3'[*GYK o@\DojfNB4?*!/2P[!.<rahUR!U5MDX_ka'l_h$ cKWN"XN?c:Dr`)9'hE[2c6mD[*_OSGnj$65o$>4X9_oqJjXkHg=S^#8RWRp1`!j)k*PY. MmlA&1\r@*t$//AKr8F!R^.bqmse14(AF&Z:Q=o*LSj:>FpFn(erd;IsnaOKAmTMH)9G8G@YFp*(#nh95M.@?iX9,lK"]%r<HnW#qJN=[^I*9=b CC</N@eVcp_AB^+N'ATI[FW:73C<4\*>><p<^' T01#ekmY?7leqSFN_a? jA4B'I.VB_K:)P4#;<V]BT`6=ho)Xd]k<g"A?a!!&o3C3e=T1D-E9EeVTo9o7BPRB7?^_A9L!^Cj&h6f4gDqOE_g'$opkBmGp/Bs8g+]`(LG\#A<F1]=%\@e5"9]D4c3="g4e:eSi+8@O$g>b4#CR*A`cT9q%CUV[_(NYU7LJ%ciL(4P=DW\G]/7)=M31E=Q=._hI1+(Y#4e;EAK9!6qbZX7 9P9YY8q7a\#PeKlAWO5Z5A(1/@r<VL=>1?J@FA:X5[0+j9I9,#)>26ScaPBl_T8G$RAk4^b*K[F\k653<aALe7A<nT8T0 ^mZB9WlJaI))n!l??]`9SjfY/\Pti$+B50"hp#ZWBm5NW-j$bj2)02!^%7ZoFAZ.JZO$/%-2Yg>JaN1Snf^lBG1o=[$7<``l1jAI2TsIl-;#.16D%>E(C^t65)0Xm0(CkpN9F'Sg]VPQ`j/D3?$G+?L;)$[Sg$(^80m<].TF&C+h8%[-,ojf;GN8 qWYRg/CPL$qj7b?7h-D24*G1mC?c;"Bq trOD*aHJ+8:4M1'8AA,r2*fK!%&P)[.nQ'af=`W6<gE9T\?)l`+--aAJnQ_OSo1=dmnEZmdR)SD(+IshZn+`5e2"Ba7,Vgb/'#?qNL06;<&#BdQ]hS)MnrJ;XT::XPK!5"T&sc_@0X .h[1=.(;cf5*doeLi"=?Pk=Zmjq G"YZP&o4^MB("HBgZ?[k2l?WNBD5 :,,[[+o1#Y3koq(p9t0MIjV/dlL_-jJP6n;[/e*XWW$'SH@fo@j`(c^8R61arG&O%a<ZW'7]"hY":csZtB-IW2l[W)N53\_\ M DeXeR"9P`<>/0:h9!9W06U2&YW*=m/K2@ANJl&KPS"e:=S$)sYC< _8E*4`=d\]NMLW!R*=5H!@g4rV7F)AtX0!^U CFP4e-:b?0AOmp2+UQh[PYK2H;@\cmW*!QfTraE2BFWQ]\M1jBphT6>_?#8A]'A./9<kY=+Ip-?5l-&Y1Q./Ub@Bs+Ae.3s7:YTdSrKF]^li@*( ZN"%,FL93BPrOG.A&7I_Hk0*C2U *#WP3V"=ris;oN,*`o[iJ:"RrHIOQ:e$FQpcMqLF'Yc@ERqN&PB;No"YjE+JlADehI^-"5s+T\XNho-S<,0+7SF)g$!FkC@r3$M3 _sB_+'^'!JcQ6-qL"U<3=1o2hL,% :jV,:EJ_1Dft$6'^7m[A:alJ.bFa)MmNkZ^T-SH.4)Rk[cHY(7T2@I8Y[4XDd [!ioGAh$<7?)%i::pZP/p<VE"ZpD.@M)DD,R"Z^raq- p5%UNiX$_D..(1,6e1oeAFI7#n-C*eSlgjrS)C40pMteohS`(l(?$<TdWpd($/Dqa*K!Z?s*;$8_.XkJe&n;8L>'.[%o8W_KcWtoF:*>W,6PKR`P=$N#*b*R.0.F*gIQhgV`)j3mcLZDpnLK_*&=@)>B'.9sP4DQV%dnb]>=.f+bMq)#kHHUJ^dC7"RiAL@QDLj#r%A#QV)`5.<WMs7@PU5,<9Fspt[AO#1g kTcl+#^,ON`6;f;);<?a?g;rG.N_#eABHX1.G,"\Y`V1Z44DDWo^4gKGiWTAB;g]';)^E2t_rp29&-64<()@No1`q-#!VDe>6esha9J7*>-[C#:&N(>\tCV*aB[]$XQ3S@tE<$%^Njd>#n9>.K.0A\j@pEbP:FBAt9qap*"GM<X\_gV9.ElN>$]=Q7 7?kR7l6,)fXV;^^GfoFNL GVS.ni#P\@Me] <&>H5o4(B!g9<F\+7_YY"fW7'$\ebd_U.s-NeAi><a'LmjQiUj!@PB<Kp#bh1jPfhb;dKE@6m6b,T,9im,:$KL50lp=.g<>pUY5o%[o7dnYtg[PlctW`5`T.RprS0CD2m?*/a.?+sHk0BpAtr84RE``9iK"^$i=aMg_e79AMf_p1qGGjY,=@>E+E%5Ft PLFgKN$;bA)XYFNdg[1DPlck<NMsAb2! @0t=<B>ss5mfe"A>Sp@R`mL%KDnZ/=HU@ZA&a,N"cA?KOY*(W &UZ+f]Wd6R`;<p34mA>q)bgnJ(m>9D/c"!6(ki;AlB8UGY-?0m6l`l[q@J;.`RF!^^psAIHXr)CMPbDRNJ\dFA`oVG1l&\-?I5QpAQTF+RhF'!#=F\bOh.MsUKUKr;2:Arg'(Sn<Oc%2V=E0&LsO5$qc8dK"#`6a_>l$W078sP:kPhTiUGL[0Q<!sk8-8t(D&Bhs!nqg YL(hX!`Y03k:*R0'(f)A.,toa>!I$/!+)1i!@5+/hs9@El2XkddQ&JoX4f]cB>Q9#EfY':^D.I&Mtl.cAR@75]Q\]PbSlsX"G/Z6$NUe.$Q)+gGpF:I2iJ@O1HHEgA,d=ZZ:-jLe0U4QjDtN:pK.CV:d&_^:U(,EGZPF5rA73&L8V\ t E! Vr6\J;*\[NNE8QqA\(m1dQ[6VHT.\?PWq.O:l>"WAm+H^AdIs a_H\2FS*tLBZj=*oi7+Jf!T##@ <Kq5IqVPK9=WTOfIXX\Mm LmXd/f[`A&Y8gK)[;\$=eoWIBh<Z5e`Uq&^K2(%p.T JD`TIj:`nf*BsJIm\Zo`Y#c$3YE-PAQ24K&DE9t3a2rA1-JNo)6pL".L4apa<,Y@cO?f4d@d;Hilg[9-"kV4DKc45Z2'_?OnC5ZD$<`:A2+$%<KqP0W=)!6m(#1Bf\*r h'ZC:&"NGQ(`84k9(<U:3H[6t%@?OfjrtaR\+fJXFsGU&[tB23C5 \9h 4eF6b_p`8VsUQf1J.4M@DoQ\=hpcH>'gR\Rgh(GL0^)a\?aZ*53&&t!Z9_h)CCT"]^?XB+>/ZD>e,==9_YQOYZ@=4C%J)n5"t<7mA+<e55U)s9!.>s;l\?K%e@pCjq)Y5(QCt>[&L4m3kb&5/7'Kmq^pWUME.pNKcB Rn#b5Ti='Fr;c64M!@Hb:Y/O(=0T]K?8JnVf %(:H<59Vl&6o#ijl&r9SQl(#?Srmt'#?co"JSWBT"7)PanS%Z_=4aSf.:1-Oq>266R>OclW]*4h9_A,j/^V PDEN2e7cnnNn6Wb6qb4E-RXK/DC_nkW5roNrb0k/T_mjK:8)P@[ i^-:L]_?JE.S^im:89(g2?a8F\->fZ\\YqY$79i>p8l."H/EmBP=QL9JX[jB1N@,c,bg0f#=/nPLaQA0'a,oX)<B"@$GCO3CWHdAhP4PKJ$sH+Z3(`7t4b))T$WMgJGSp%. +?Y0[icHQ=e\GKmE5"pQ$>C0Q1TQLSYs/$;aGm 7mC&<o!2/ph`YH;ek;#pT7J+RO19@k7D0Xk4K4.sVX_aA3FZfX3(9Pko:q%PY.H:8@^G&pL__G<O+_o(Ua(n>5^![#d?bcRl-kQq+3WtiAr"EMiTTUB@Te@YkR`1W(TfAf8a?dP+aBj:A7C-eKSFcY?KOrl$U==9e:i"gA(?XBcK`Mo,)igG6btA:%C$6shNsn7=[`83AOth^RM*W%*oZ(I)WKae@:K-"Jno\#OaH!"gYq*Z^d@97OX4'%AN+lQWgEd*K,\=L,iV]!AY'o!ba+PXQiaViLAW P@Q$n!V9n;].sM$^o<.[mlHAcK?b*=XcROiN<Ch#k?6CicD4M+5'HA"JL'j1\s(gdS<*[^`:i?`4lPF?CqVqf,qm1MJj;o.V,X]50r(t-JTQ=Vht7!I_To!Y4#4Po'K'g#\;*%qRM& SAS/0"^]A]ZQ:X-VZk][D(N`A^7caVN+:X?%l?t,g>F]eVD9Yb*&iq&TWnlFgb e%1e1+-@ gN*Sb/1q;IZ%mDJM0r>Yo]i\ZA!%:8UF>@CDT-e&t[',Ag5#0GJhM''L'f7tli1A(rPBYUhe5U@\%!;3OdHa%qO4V>!B059/Z.c4-8GCm+=q0A]b;41rQ9W229`FprnZN1o!#.$^#eAp5R(*1K;/\i(I0<_ISZ>.?\^Wekbs]eZERn@5"3)OD=QBrN[krVmA]00HjR(j=.=3.#G&jDT%*%[0QUAh7RGV1=O>a"T/KV)b&B<%JOM?!dkEf-q^/d0Mq-Jnar7HsTWVeZ3j)ONakRK_FJN)[YTUQdsC/$^Elt/blPLHhiW81hWX?19KVeeB()c(piE0CY*HaZ ,dpSJoWHnrkW)BtO<pa?s;M9K`E+1]$-W.s7pCC(&AL Kp$dpX`/Y0@RZ5B[;1Cm"U%*D?.!__E<[s=$XbZhT2iVH@[a4JRDt@*XAh]eX<V,\NJ"hO1F@-! (BO:A<k7[F^_]2#B8J8%.r+Aa$^4AQ$#@O+Ig9gbX2Fc]=QT5)'8]5B2oR_:9]P*l+D?[V^!5h&3p3\]:5Ap#7LKtn<j0pmknLqK^H("h'\o( IIgNO'Y!i \nF@l<.\R2_Jia9p)89PC;McU4bKXm GCE3.BYl`Wt>Arr4pj?YM2555jKLI`2:&A^ad)s,#0K4mXTpU..-@*FDU@TX.N7>%(h:JBFtYkr!KEG>Gh5KstO#)H7Da@7j!erQ`_6qs3 $8T6D1gP!5H5\`nP-_TCk8CKoDJLNX5-jbnhU`g5:gTf>g#"\QUk=VoSJ4d@NMV)ZYmbP &"rDGK Q7roi.+^XkhrP\VX!m(:-,lEY*bd3B#1$*UY&ans$GBc*:E3=&mZM419#E/mettA@+'=grDQJLY^]GP42R;c?JZ5;;LCnD0a)nRYoftWdk4_c(5#A?M;*D.!O[lT\Mn(2A.$F-H&+P/rg&TXCZILAc9YjBXA'28IZ'!!F6A__mBA?D2CX1:H?hQ^54O\Ba;=F=?rYPk3)4.^c^=OV]b3??]lU%$!*g`YkAa!GRG+UQtS8=)A!Y2='d:;#Fp8B-X3sl'qg+iW.EDU5n3[1"]1\<^4W"oJiE@Ma*CdFJL9`K%-4 'lV:5'G`mj)[]bgA3W)e"3ls`aP%05SDcXSM]d$;-mOc/?H)NV/#j-bl0YG2K?_c(N\5AtW^7mP?b)3;!!$P8\2BRsML.(/,.9t+?d>2(BZCk4AnR?AG*Qfd=c9GAEfXIp9(a3T8Wbg&+A_MN]'p$EeTXB0_4LUB5YiXOsMigM*SY2?Z9+&=gHlk\i7*-M'TP!e,!n<o>b]0i21r6EY@/c,*np&P`( Y4<s^=Dgng' J"02:X%.ATst43Z;mN#hL6@**0Ytt>ntisD 'R4DV.h7e'=gchIDed:U<?P_d]h^J!(9JE\/NNZAoKFA1'''pWd]Z!HFt=+H\?U3-I1!#\;rHr+>?bR9=NO,s\3mm<lSm*]cA_`Tj"3964YZHtA :fYs:Df7f6OA>[#O+:8+6?<J6MVY3eog0oTM8!Aroi&1I//0]^EQk-kpob;S%kt11h,"s04 4fna\a]N)^5+Jt$0j*>hRRS@*VAL,UH\Y`"Da<J4R6""j5HnNI$0-iTl-3+Abj?DG6QMqq7iig)U^+$MN6$+dsm+^V4q9A3Z8!3i'ej%i0AS78EG2%rH=T5Y*aj@B=DnAgmMcU]W7b9-JY!+lVXTmPo(FhdrnS>ZcgTFqX<"UYE7F^DrG?A 2l3dZ<46f9*"WB"Cd]$&VAo!JK0=YnoF0QN*ATZ\$16RY(qg]2F5&\,(29'(H'8q!)V_'pI1l(R'm444.QZC,@;7AJmP.< QnYbCkEC]$2,%OIM)'snFB^4T%&X8RMOS+8Z\?J/'8/N\2Gb/<20A1\V0IN$(O.MDh]MhK+,NAN'$KKfUXb(L/A3EWKOk$Af7;ms"Jrs%)H#VL>#^90!4<KS35;e:oI%GRaF6""Q:l(Ch;!7I&-[_m[\Qp/U3_)a8s//PYdoX[##6TTXFL%HIY(T(;d\N#d8I55X"6k55Bmf1RcRB!(BB>2jF?(+btekZPD-_>RH/q[hY%eNS;MAMkAM3[E<2M*6C)HQ>3+I(I^hkgE*;Ai-e7-U53bDR.E?0#f2f4(gm"m%B[2J!3MPZ\DXn33RoBN=>op\XBU(.:g^)g@'<>Qc#9e?Z[55L]jnW_^Z5U/:+@/`c9s\K`)6OZ[go(agP,i+Vpbid'A:N5]Q8q%V@!+/*V%Utd_R?MU1e+;@:*GA/\DQjcO8H])p:4A^,i[:\<-1tj0VXI$->% /5m(OC&";77"BmI[^.8C%_;)AS6+m,;j^hKG0`Nenf!@%oT [Gnf.n'Q1WQ,SMVp\'rI-M!tHce><V6:k5-N`h2qAGfK%a,G#l^D/l q&@h0edE6.P C 0`3btXKsBKFg4:C!!lUiL"5D[+NStAEB;A@EW-_sjc4hb5GBCEQ3T7U!OE7dN31C]B_ &Jh@gN+^6hk\GDaJO0A>(+B67J`NiFJi7SP1>j>$)&T#)tF_b;p[ecN,Y?RN!$F!/_>;(>:;GVT\dJU#nO-qXhI],t$V1aD[/]-[2B1%l0i\E$fDCmGK?hnG^p?K_H)\#UCZ)O$C_&JDX$F/_o6UJ%'qg%#r,`1jE)lDL^B;TnsO A'mk5pi/1Mjmc!AET;Cot?F3[1J"-(i*X%eRGp$.nKfVJ=A?h)4B1W&b6=8P."oo,?VWWIo-<TdmdG2FEB>Y,DZ&DL7]laAWLW)$sI5\NQ5<.eJZ?5(h.L97rDJa4bb&AsH</gZ[5rZ\mWKlQ%R#6rcAbsfH1"Zf322_3l4(cT?q;dB;DrfsK\mtfJ12.^c!hlcFJZY7qY\S$OtHXpPIk<mY93^ %Q6*pEZKD@n*Z;:G<:sQN6,6[-tAegs4'`S>^!Q%=5iSD?h@%)<Te#Ad-R%\^Ahe\oF@*'f0 n-\5R:,lm/n5_]iji%d$;!]W06#\Bo@D:o LPPEM@d&JW\n**q&Y:R&E3b0>Aep ]PP*9f"q8-]Y$hA+/bfbe5b-p!"H(6C^P&k&sipE\-"cb],a[B.kh^ ,"A=/]B[)6L7'9B#LjOR ST/]7ID`Y6/n]D^j+[A@V.&UXVnS7.J0Pt)<-[tZqCi#sC2PJ*68,ngP?: D3P<!*)9ta/$=.'q^dIRP.*ec*2/^Y<k /V5DVNTMm%a!WONr<\_2/bWr:V2+islr/&*'Zk HO/d(QsdPjF0l! !-Z ; lG%9Kd[^5*5$'hPI4W`X0tse*k;Sq75g.s]:e"5Z2#-%]rg;!kQKk"gQBnL*6:\eQ]YOr/Oi_Fmd`)OB(1r<J,P=3eH'4n=$h%(-Y*;A<M&3?n?b&&J>dC&JB3Db^e>=g[lEAA"1G5p1`7nr@sDgt8AP7:\dFYSgB\e##sM)kQW=tmI?A)qqFKfGn\F>5cIs 3qM0\fC]KJL?A$.((ij8>";_a2[=Npr[H",#s(=I!a&*)TY]bt8L&bhPq2TK7kGdQQml<[*i]jF9o<lpXm7"R.m=nd@^d_*;*32P `cR:l#kfVi_Ct1!r_cb15R>7$]gK=dd ;>jU%4:(^nqQc/c[>T]iS9sXK_UH+UqrL!lQdM&_S7gr/tbFA^P2"#f[kYh.^Z(U(al5m@TL#Et$`(LO57%Q c^t1&\4dR1F lqANA+o)'@/IetJrJb%b/WR-$8N$A=d38K3[is;Y.#7=Xt)cQA:lX&.&;cdq!si125Nr#GW'f!"@jX#oNts9+25Fa5[jt0VNpBVE2<Z62'.@de_`T,K\!$P*R8R3r&cm+*q(dVrL0/]j1p/r0)LdqN;A3'tX,EmZ*E4IZl8ci6\^Wf:=J5= rn^2Am]c*r%3E//1#Z2VCD`\97!?:@qoN.#.%.@c'D7la&ZK&nAeC+.Zg>7/@4IPk->VGdg$ZOI`c? TP)OFJ8`%nhE@=SflQsXQr!T(@5hPA<A<G@B?B7)S>F;mVb@m6<2LKDedE(o@r&F>#F?,,\Xb0c"]!A31L(/7fUf4N!Z,&O7n4YhpK['K>FVE1R+oT'4)YR^,=FMNCUIa_hS=8''%VofLdgT,Ik*DggSD7*X0:C!'`PY]qfrU+76Xl"P?sZKmLAgn*LrcV^#mF=1'LN<6+[.@qTT0;0#N"FIV,F<_s=^`nF$tm"1h.-Q4Ri0*OI2,iJg05>*X"!i#_ctf)lV"?kn=1*0(p_]SU[%U4J2q,LAA,b-h q;BFRs6)kBgp'L/4EAU/kf2+KP_o";?t =44MWelLiom7@UfkO:N"k\Z2,=AhrY$s]'7&A&A;_I8Xc)dMP`-*Db9&U8>YeL<4#-d\eFCM3;YM1;A)8\LL9q1G[fCd*(M<R&ZTNICKqSkEfD_A \43Mn7e$1$oC&))W?)Y'W-0ebjkDZX;\5r+^b628MBJ1a+@brWkWEAagc8QihI#OP*Ai$KHhkpBW7.-(l:@H9,K$h=/E:2CsHP$8he3N7pql-?cPsHfmAXSROL`-GN'Ihe/I&ia'RhVNARA9a V`Lb!bXJWCn XQFG?]AN-?Q4!?sZZ``I%T<P l`&k#+^@3P"[ j/<#11rEF")YP<E-rPNIZ-Z;8 L!)X0kDa?-faOs0P"5D-\Y?1c6(;nUfh?CC)K 72J6E#Y^qmc)-)mmsA/28[9&4%-Jo>USZk"/LTs42#&YaFl1BFQ*-/)a>>@2X-(;ndjcTGT d`o(p8ESY&Edi;q9?#:qG'"AbAW2$aK/;.UjRU0-@Ih:[$n!c2I?n'^IFArki5)Dc2,'XQTO+5r5L(^N^PP $OQ',ET!pp5dLO-E1kJO9a2=g02OV?Z9DV@a+fASkLjo)aF"JNjgGZ3p^8g5^N:58?,)JK#Zn&4-eGrHXU2Z9OPj]p5$+/Z$-Q3=[Hib%jL6\6L3^G(sAZ;/;$!9MIe!sVY*c-)c9hX ^B8!`V0=CY5XW>FmA>3X+M2KiWJl#nRL/]<<=WG3Y=t;_j2hS?_12 X7O6lAq.%n)&@bbm-%g,e->2ipK49HW VD%@c6'SE/81mAL6?\Z0ig9"l(%-&bA@NPapRAeBosJ*H;e<K7<'sYW't &kP8%Be'C#,qA%jdM8%eZJ^U\0eTAbi)t?kZBO6`+rr8Hmorl.:&Y&XLTK%cA *6(cX*IPD-`fNEef;HtGD'N#7FM78r%)KdI8!e*ls7%6'(Nm+\/^%08*KBj4t(OU)6bdlc`XK1h3UH`(%o;10I7]=K-T-YY=JDoD<%HFfInO1Pg+NqW0Li]o>SBq5eNfa7KI\_`FAe?f<*aRKKmm4Y8\mY.E?a\AnRK-'nCM6R4_rZ&7@U".:pJcl[+SNVAp0QVkA.c_<3!+/-KpW#1#gPoTm$-AVaQbMm)+` #cY^(s1(\d3-pG[bQTS#X2e>jSN*6$6& :ZR87O>&7=1C,7q9_`l-f]fln ^b`^CQ^QI5%nAH8=j'Z5MW&BMi(Osr$*22[[js_:%J9Y7FH9^/$>%%L(e:gQR:f0;s24gOi&J%5[[39bQ&f/%CO",Y'QZ>>&g7=ag=@q&9Q*Dfp:7-"6hJ<"7dXNpIT1d;]"s8SV6l?9Ra7b=heU^<4Ackg^FQAVhiU;YdUIB&V_m2#B$/c.`Cf*AK[!*0Q@fBJ#X)cZpt$=pY:_&bJN<mo[hZpDtn>Xf@&01[g+j_Y<FH@@m/[oiHc:5gpsl;3b3T)Q(R^"tJ3>1bBqBk6tDRZX/\\lmQ&n0E:"@BrsAD#`8MX5d-;EU:ODs.Mkbf m:\"08hIRg^6C=kItD;WM$8[PMNbMJ)K'3#sbH6s>QSlh5o1Z367dNDXCRM4I)N>)SFn!:'E^GN0A@6-6,sETk?Ak8=rA$L_@39h]X%-5JAf,3eeCXHWgo^3VQCD mVqd!Hl\O$15c8dQfHVSnDCSr'0nDAS:Y/Em?DMWqc.hQ2Y*;WAK&CH7F'gWTSho?DTQOj2_!#LEBP=NA,><AaK_&KX;or*Qqc_A9t#E`p]$V^8\d'bOOG,p-S^9 Gn/iP<PLop_"\s&#fVQKZB(Y&X07"o'p7]sD)e/^gQ0JoIh^9)$%Ae04QMp*5#A+kg!e,l1`-2^NS[oJ8Q`(3,j1]81/!Q>KX5&>o;_PTQRO>\Y@0gCnerAWa!6D^+KF+YL.Z'b\-"6(F?g\OE;Fs0kDr"1qVA_PHSWXO&;Y+:2mhcGaHsS/G_Z:!'Ch<e_sN:0E($>Z7I^Z<+:?0.^-@IS@1@? ]: !"$s7!SE?(KJa*CJMmdD,1rRKD5O.6t:j6IddI-! UfW#H33O'YM]hLQY#JT;=5WH@V!nO@BSd-nSJ:8&k"0'S$Qt[-K3,m! RX'daCqMS-dfPHCBe\fEKRoF%ICJQ`NcG'M/SIfQns,7=>U[_4iF(LO(UgQt4h844%[s<n!gW<K?NPNA2DJT-6l@m_RX[HPt9.Nn,[rF/56mq"Xc`\&FQW:f[3_hC1!GGdd3'BZPt(st*/q"Uc9.ID(f9reXQlk XAA6#*)XTjWo>2_aP!mj4\1G"6fA:3./>Y!:&bP/+NL8:4c6+_-F +J_-[E[sV_J^Na&RTj._ 6at+M)2&DF98d.e;]b@4%KA9;F";Ag-"t8>l(-(54VB$\=lFO4[5tqc-,QtinJ*5]D#i14`&C@oSP9n,ma]*SXn3`;)fF6GA(Q_fl"K#gd$T&_Akelq#%h7kO2qN*YPY*o_8'2gXSmE,Y7K?"$7(KF[.C5imgT%87;WI^Tnct1@N\C!+qIT&a!0U"[)).N>,K-rhQ%p`W'RM<.JY@RHepUbb;?-3(a'r\;"hk^d[7H<I&3VH2EcAm T@^N>b-95b(/M/f4"so2f--WAPQ7-C<+!?B0SrE$eb.)m/-`A8+-j44d6]U"\QQdq`^if0mrDqa`P b0B/8cni((k(oTd>Z[]d8k\Glfo25A)^[)Z1pE&^5dN4HG^($FB4BFg%TK1^Y(:,M+>4os9N=d[AJ)qZ!o[J9$F:6/G2f[o[3D?+N@O4:U29OLK,acrJ:B..A%N[o=n3%!U5jleoIPD8Dc)SoT! r^B'a4=+%lp2%(*r"]ATCh^+\[KM@7 h7*9-S(fc_eaUK\AA`U+9P;DQ1HPb@Wc$NX%gOgQ:g2dR>0,e$g;\Y%D.BT_jso>FpDAFI$9I?FUMS1?$iI5enW`ICBJSP^Jmm)pjnB3.%s?-M<KLW+P0/njN1btYdLrSZc-Rt)^\oU<_=)`LsprFmOPC;^8@1[F61!^is3asPVVA'Eni)OdJK'7dqL^`:;t7^cP9#ph\R0Tl+J)=AMPBlFH)J,:&DR)OqXUt^+LZJA@(=0>H[t$612MJ_B'"R X\$aV".J!sEp`q<'>[nbZAbbm$Fl49A?LS%Fd%g;[ggO;FQ-i0B+(CS/-?g*Jm%1PH)gOR?4Q\IogY-JXn8S^Qf0AW<QC#]!sL1W[%rV.-,7s3BS#@4EF%<Z7\ao\Fa7Ib!//Pt'RPAM"^qK*8)RZm/NMaXWg=Z[(2Aaj^OM$MN!&$RIQnZ+g0s>Zt=HjmS[)Y'CjWrSUA%1&4>o`GA$gUU\LS[kB+,r`/cA-J.KWbjd4]X-*$g P\Al'q-d3q-AfbA@+_7k>AtIJlL$$M4qo21\SQFLo1_V[B;tT>!$^feIPTK)/)V>EYiEJOi`_) ?<BJdB<E`<nr=RhcJcqX`gJ#Yin,d?K.2 A@oo&Y[CAmr1j/1ij(2Ohl[p=GM:_1&:_9'RKMYPB60>c7Bk1)XJX#PtR#eAP&T-j2i6.=4tsZm\Ht9XAI+;M@Zj>lh6aA*P^9Ymg(-nB==-!A!JrP@Ujn,c[r*(jRfkdp?C`Ka'\tCX"c-]eWnE2]?!>E.353qKs7+!n 6#@GFonM(-?c#7/U `%=RF:Mq*<"(TX%6m'N##4BN`FCU]KA'IAZIS6Yl^M@.Ac9lM&j+B^nBPM_F8&U9A>4A1\,?]Dm"HjZQoeBS+g@P*r(0$h-(o%_)tF-^\Zaq[G!a!2*+kZEg1FIUQoQ'Is@!O-q6,RDW&L*cfTJMr;Oj@K-m4'q.h.&*g>6UZ=(NAE6(&A`Zt6_.a"'B,/4g6Hfhln\_$VK<9b%sD6DS1ik7n?VoNl2"AkNMRJ(srj,ggIqTJD7qinH'Dr"1T?,?%h<c-Uco<]qYh5_7APE6.!sA'D[Pcr+k]LepHG2bmBAdernTRGZiUJ^r([=(oV&OWNEHV^Z6icZ=$@b]<$9$iK<H 6U4<]f8HQ[9X-Q=o]LQtcYqe"h)`m&Oa*Ei1\nUCq-C:GABZiJMU.]5lP@ *5='qA>bS$j=<-khf7pSLbmIOs<af&TciBf.>&lf^:q)eJq-t<<Bp4MYX]R,p3l,/!L6f(Zp9$q/+Fn,<<qtY&;pLEPUnfo@kV6]lfE&lPW6+InXRm2bGg%Mo_Fb_#j.?mWW252os0'QAH9lBi'8c%lQ8\2sXi6gAH['BAoM6Jl+>clAMI"nj1["LM3$H.A[]_V;R5U&`:]b&6Y#jVf)[s2<3^EFT@'1k[m.sAA^O_^LFL5<i@I!Q<: 0afF/=fZ]gXKh1M'(5%DR%iJCa3faIJIN*dHkYY= Fpl1?Kh"<a558gPl90dr3q#brqD^Sl3bD>9`KL#`c@_.37hW^3D!7q-d+sH]H"PKBbEGaao'4X0VO*q_]DiJ#ETa`/.\VTG:6;/hY=Wa!&dS'jIhi#gQg53R29DWgDB2mM!@0YmiXSUjT D!&gnKpPXUESfKN#bk#]%`T&M!DV5H#\6\@D`s#Yflk3&2%qXLo/kH-T2+lB3B0d&IsS7IA^oG>>h%GSF V<Tl(c.j'6N,P*4^!dV)M/,GgO3Q(EU)KKAZ4YWF`Af<%3l#tcFXr*o*R0:;H.G*@s&D.3#ZRe1J7mFD[L1_J>]s/^>I\V7Whb<_U5;P;9SW;iK^'9MkJbf-b>'I^DY% 6/p1lqe$rmpi%@9QBQLOCh9Wa$ABDN&:17**U,7=]j`42D]FA(X='_`0=&J..5R6qj#'X=_SNl&n<0UYriB=h[1YL>bUtqiOrXhb)B t= cb@@148"gmJ<%kTK QA(<IM,'N[MkN&\``#J\(MJFkY+*hpVtsl_anG>T9]RnQM5*,[3K/00#\qa(\$r7<?GgZKV#$%Seam]F7HIVt6if-<f2DC8"5qiSVoLA]qZ<k@k37aQTED(EF?$5MFrVcnd7e bGbm%e*-./i!K78<[pe-RT=Z1/sDr^nNJlS5QWlI"B&dX'Fof;5?R2\pttL\cO> pID+>)5j.<J@DiQogea:[YWKp8$ehT/"\7/&GZ%&4TXHHXB'-bAHZ14tYFA@+gSEA$6;/Y/fS\6k87OCI"kiM*Pt2!1*B9SHM=KHiqXb4NpS:b USaOSpsJr8Ie,q&/00NR;XCOL**O]1D>mCf[oOPbk *Hq]Un-2Neb**1fRf$19k]UK#)%#/icFAV62!V2K=>\f3(@^SAhR6rO9i`c?MtkINO)W)6m9r(H2'PB gFG4,I[Yp0;d1!BqB[S;'L+& pVd.dlcQ<O]f*sH6qpk$5K;1cA&t7+67]MoX4rOq2tA=J7\lCKa]nH*hPS@a8H+SgosRb4t?=m 9A_bMP9.Z)>*bXk:70KQ;WC< <O4 0[T,\M0h3dL;rW*\m-A%C,B=p:F/QK'WOG47'$)1C*`/I.tc7*``C7q#;%Tf+P&?gn jN,t22cViK-s22LlGpZsFE.Sa;f\QVM!`piM`[X/BOn;b]@p1I3\*D<B#Fof/F:/4Jp5X`4!Ab`/ ra4j:RFrQjYq`!8AA!a;<G*is+]e:%)$;VJ<D')71 oSa<^&Xq[p"LB5[#]ESNE-El g9qtTB<n,\pCoOFWD3=>EFEEB.<f[50b;?X"Octl/m.=`A`Ac0o640&tI2VDri;p0^6los-<A4d`U(P1?*026IfW;UQ$^'as e+AXDDn@Y+rYC5$pA3 !"UmTc/WC 445`Ifc CfO$0)Z8W'*+2qlG#6SXd+NIbZ`Yjn8dBjYX!6NT8<27Th\HSHD2A5A/.,RMNe^8 Vs@?3<bA;:m,HEq\284<J,m\tdg"(C=LB')WU_b!@Y0R_>A^piDC\,Im^#IfeW(68F8jA%+/Q>s/BgLP1DB$OV`lJ`REf[-Z?cGs;A?ec2WMM)E5QT;A10:]\WaUT)s%-iC%7nSb6>=Qf#!q7Fjj+#Gs X$k(m9qAm;M_4/:AsX$Q_V!L5@1`aisf--I,&;AC[7`2P4,RNPD45@N?C`k!ZR_YRgfWtIfA*6Vok#S+>E@>19/TQVImrkno550EkE"V)c9'Ak:=[$#_,JiSFUg'rJZMD3D'fQUc<fT&:"hgO5pc^ZHWXNqQB(\EDO+m[!YAUldAB_H<nF&==9^g6=k#&Em3+,W.[o!T/<YIC4,LA;3AY$S1T4('W#o'bOi#aE]V9ZH$,T-qp`AFST\\8a?tk$20J<4N:SA*9HneA5r%VY" $E7K$qt-ng>ZsQ+*q/MbR]JtbAA;O?Qf.X,4.#q(WnbNEd7K_@]eeqZ MCJIP0Qjl<5XA#+c(53G+)42KD1'>-#71FPlOpT:)L=gpA>nio"mCS/C>c^t@An]C_<n#p>m?Kg$/\&fJ3U>TtJCaDf&!+O"S(\&i!0'Og;Tm1Z7t@"jbk[D[oI1qsV4"i@3Rcq&doS$0o-1+U*T:MB!>$E8qA:,s4*UFg]8P*P17W2"*NZWk6$'s2@ `bOp,s1C5>h0fC!gX`X@Q>># BH'>bB$BF%'kd/j/8KKIm7W_`R4Xa49b7+Fqb4O.qc)A`iVW&6%S!,6IE K4o@(lpLEUX96o=SU#cE:J1HKn5VFb3T-kJh!!eI.b-Mfke/EhEm7T0U7h+q7K`Q*]D0X6o$H Y8:Zi`Wsi*VFDtgRdiPY,A<:_M_`^h,YZNG%YhF:_I7*LF3`R.<-.0(o?; F2,A>sW*Ci][t3-=hpVaAQY?Vc`R@$2cY;'8+[,kc"r;fiQI(6[_GF@TU!Qi4NI*?=0/s$VR<_YZIdI.=9@]gakFqn4@on=`"Z"`/G!59#%^lhAi&:c-"Y1oN:dk;isr'Y7FC0`hEAoh6,2` Q-K*``XNoAc(gpA!A=[nB- r/\R;P`(=7j:UB/(W\F!L8+]h/#]*;tW<dP/ai#piqZ0sXqT_(LOb*8.=QQcln\CWLL&)aA)fGi=Q^DN$B 'Z/_^r$O$kW&dU39I"'[E`)$-X$\9 CI04eEc*'n(Pf>C&*A/Z0Y,Vgksst7)T^Rfi9=38]od>kZ\$]h*9cUU,DMlqhdQJ"?h^/;<Cj&&R0252VkXp(EZ!bWA=nn4a<\POCOC3TO"a-&O"a8Ve(ojXa!k#C6H-"-p#q9Hp-A'421cNd?;Oo\tf(j^fqkBQHM3fdQ']2l,Wa?i;:(<k,TL9^UV-Md3onB` O5?AM8Ed*'+J@T'?`'[>m:@:%ho@6ZU3'gmrAhEq^D2;"=8[O(h>Zm^,jO18*&>tSt4UreJ, +Fk<,;A1Jld[J(W2D"41o[jBL-P_Sg+RI)S6jX1.<>B rd.TsO%nj)X6l<bY>3Qd8@W.`WN6F^j+tJg]pdf4=?q@,eZg0&*-ALgS"qb*St>4bhsJcZWA2!A2X"Qb`'Sr@-MVjYLo."C@K*VC<!",q1ejO)A$'f$)<>FI 7kT1*A_HBA^M1i\j@.^b`NQU\o/c?'Cs6%hCQcD08#J.2$B;aQm <i f@.-%0S=al"^:h*7s="O)\W>nism8,ZKpA<W)YAEV`fS$Zic"4LZ0jAeYti/M1\\F55F"hgO_(1VTt5;\*.kfMF%^:QRBclt%EU$6SWi85Y?5m=;iL4f&("R]XZ[ bD!!n;21s&(='S)8(pOCqpi 'e?4Mb2o2BF8A<F14[a<A'$?m54Q5U'GaG9U; _mXA<,!OXR7SR#2J edc9 8H79p7,A?*qE\f_N*L'0MA-O/C4MEEXbDKZ>gh%.6-UU;le#=CURN+:R,$5oPeg7<.1(okE@\UHV0`\e @4Lto7<1I-jC=PU*/0rX*jjAO>U=7q*/-4\lWG>#KjftF@`/SB_cJ1h9L"as=]ZQ'0N&<Le1fAO[3qfr>0Kf`>Okq:oXe*]j'hn;[[dsd*q`5kH8_TkB_4-ESEqgMU7d02[s#OebDd8tBH+jo*<ZMWi6*K>MQ s)F$.-UKfl"9+f@4'bd<im8Sj-opD3J&o)hH(SQ-*:U?Jp!<^HnJXplhGJ)c%?Q$.ktb[-V4r8]I9b ^6BW0abHAhc-W9,SR17EE>0XsbR^=`NJ*#`.;^E,ftp;/6#AA1(VM\E9m>*4tY pe9V\+Odq+V4VZtYmNShEfd9s<a[$C`3tN%HWU?,p<CPL_@WU#Tnl/nd^ DaR"cW`HAHPW" 0#][5A)`<X-OT]Gp`2AsiC14S3pd(bsPRN&2<bG?/cSCd_o96R9\KG\/3"[,d6_G1Y9aTj",Q0da'mlFm=Q)\h6$aA?Q C<d-^&(>'LTC\)%4Kq"\2p`O,;(n5TVe.JjAtWtt%VBs^<`ZV(m@9W:hSJT#%IA,j5O+pA7Nac7B  ]b+__q9>JnP3Kd4,?'R$;$a!;PEoDHOL\GO3R\Z%*^0-7g_cpbSYgI=U%&?Z2JN*"c8+/g4n,MUeG.ACAd41bI%"%1=A->InkNKHVX'AU%LP^@0n<t`ijbI7&#@H<S.1Zk/*!5"@ rXh:#M<[sYS'NnPWWT<": q?t` GMBK^.+j7mdd9?snet0Lhc-C0lZ>4J=Z+:RtgP<ZPO[_9rJD;o>;#C4#r5!]4<XH`-.K:2=;ZQ1P?+&#Ap2Xii9DO3#$X /pQ^tTS*\a'U"CkRkAfo:NI*"Fa'$a/X!ZR1U]SUBGhN3m=kctBoaT&m6^)dB>kXAh/rtSRk/tah0h#qq6*UTRW)`?9;_&agZ@<J"L(d9&%$&W:rIWCD;q&OH7<8@nC9:@:E[7U/&#.SKi/)1Hp#1CV$[!O:(Gceg60Y!a);BjF:gbLWW3,65%1qEJ Hj;+0)$rN q'#g# UOcf@p,^WAsoi&Y4o(ID6&l5%sAjAq-+,"`JUV/B5HY070!'F3kNWTSb[A9!gNFG9%3-\^*Ieh#e9'_2]DnEg3<:&TeN#WSF'0,llsE+^C,,1'6\LCg/F/68aJrSHjY[BI1'&/`Z08'0YJP9&<=m,jYFBl N'JJ+=t>UYMrtFFj53\c.Ane6QQY,%FtlTt[: _>\P`WVoMnO]E"ne- BL>nM?r_L(.7P,$l <'oV0+_+h.-Yl5hLP_tRUMYJ]#`/5@aW[0;rZ(\DmXN.XR@N&jW8_RL",/`s"4V-[l.VF&DW1%c.<-5p:fY$"bCl3CNM[-9Ji^tngOUdo9WODbYcd)3@r3#hHR'3/K&/DLA\LNkD dM:XDU`=)S3tKgZeTSt^?@Ye&1];%b;NI4qde#_^Q+mSk"caS%p.NAIU/6/Rqq9J/?X_e/^W6"Osj&3QAcf,A)+s$NA7=h2I*O PT7!jOd/(5RU[j/[J+P`2XM2JK+q_f$bGlJVM*B9C@IWNoD-!>Vf$ClFqj;J5\@PX i8T<J>!PLA@M69mfY+<X*5AH"`:nWr/R 4[Ei<d(?-5a:(o#T_E3UAI/P7+LsB;&3Dd \:h?pJ.J.Y2EmW+%D76]`[>$Hj&GV36ko?,P1XL#o#5N]+.>Z&k])W2JOC-,Fe,@o>@H+/];/0<!Y\V3DlZ>kkj+aTqAHmYBfDiW_:I!JApEq<`,oCX2DI3I<V1G?a"\pQ<8[WA>8ls4ZX/CmRi+_LMX4)`/b+Egj3W<&Yb)-,mq:[$).fYR\230Q6N9J7pJs#g"EQ\:O-#lp)@e`NpB[LHo$_JbNJpt&-5..ct@gm_6)]A8qRtA-P1b7-r3X, V<^OHNB!VLciffttgVP`1XLDJ!Us5H?daa?nspLARhrasnA:iF[PYI4g]5E]6X3EUe5-3AQsMm.*[J#*El(M:SIf])\A@hJ6?U &*Q+sJmVC sE[ n5!WVkb!?+n"Ab00Z8gnQ?i=FC2B%gg5qi;k#Cg>8"2o*6YpO_5p7D4g/c6ESf&'Ao/TpH>WhrpA8U5tO-g#>t?SAgG/-h9<lAL54>gYZ9sAX\Gsc0m$lG^/%%H(gY+E#!.iN6BnR`Sh8;7.-ndY8j2jrnC92f6GmAq""UXsE8-(s+n_3rN&04pj]'"8#TV-W/etb3T;$q=%>Wf1S^CEZVr7q6HV21L`<pg-f\0`ifI`/fmVe<R-5.[iN=M7(o[d>6Bl!F!#'UcL 9A`Ltp$=bS7kn3*8cc<+3t6cr@^Rok=*17%(KJ; $0WRPg$7#h'--o22?aWnC29!Y<B2009oY_&"(2@l<D/<BCYQ[tY1Umh++L Q`97*!ZN.V!elE2n0a`[IEQn<[07/05/06CTIVE`&"U#Ih;t:YK Dk\&0BKkQS\=.Yah2nZU$1T/-RKXi`rcB86C5\E*L^Og!s'?s"aGp`#_r&d;>Pe:b$mhLU\XU0&mp4UM=Wqrs^htO1\<ej[8"<t7r\@'XhL5./X,>>aCI6X99s_9C_09XU>4?E<(Di:\Rr&n*^]DWsZAI[iXr1Nr?j;gfRdO654c*'/k-D-K'k^/)NTL5n!63.>(&`hrYB'/-2c4lkKq]G YijUHe@% <O]%H<'Wh\9,t'6BP1%&iciP*Q5^S%]rIiQ(=8^D*t2be2(14bZ G^c+I8%0n^,L+fb9\Y`8.t?GOT9F6BT_Rg68]I<mUkAAH P<F6;hcT-Bj,fi4. jkE#jik[n6@o38=37"3&==8;)">bSaa!<HO#4Ggp5QU_ho6&n1T/Ch8e0E$f*_Gkp0@n;e.;.$Zo_9\mJMK99D8BTjG:>gbLLK,NANVFKeKsmA^pb0qWgWt6K2iFSc&h..4+l)q.%)F$C!Jg6*ofZt$h'"\Y''*`SRa.ct)R$ P2JRs0'eos&9V5Gh"&eaT&?HG.ZUBDEH*gD&TZQKTY07"i]t1'rY8SCMZK5f`A5pF-n`b)U&J=p8?^>+&SG'sn#.X(=Aqm^6S)KD@8mqG/(5]^Z4LG[kML3$>>rUX7]Tr,kA.s`RNDea#oA)8Vc)F;NL:JSMt_`9FMlVLi[MLLAU;Bhg4qXW6SB-6S+]!pkG&/IZE4.J&p\=b/Z6EN9*!Sd Q,S t3mfRlZ:$Cs6d9f j;BU&&\?Uks_bSLr<Qg6XoeaXK?M(U-TeeB@oF<S*<lL$/&1TI>NK_ Y S]tnmL?\SgkPL CKVOdIkl40H<M,sS&B2c#ommg,\J!>VapSC& h8le+Ob"7 J@#(E>.`P`IO(gGW1$&)m,j8)]FGtht?s_B93RKA0!^?f4_G!tET9hS6Q0/33RC$l'-FfdHs)RAH17:IsPSD<PmlNIpQgcWUs0=6?;s+K@NAFd.XBkP2q5XJ%Xcairqr_qfpS.F=#\']lSo1pfm-U\it.)M*HGJ<./SlAiT87eU4R>3Y+L8\e-)W\d`qWrkkoidqt[A6PhNX?Hc,kh!,?,0i#TJAdmoaok['CQ[(*J`8=@FQjRT@N<Q:'-H;2QnVbZC@1"f7'd1[psWO4Amt+]ek!n$jA!cQBA##/-KsD!M@fEI&6?KSpT*pA_.?IJ0MqGQ4*0(_ZG?1Z64:e+F!kV\sZgA+_TAD8EF_T8psFsTYYmKtU)n[4I5Wr)XJXN1[$b]8J9h3[3RLCfLY78a;gq&Vn%: '\*oRpI<f/K*=W8FS\<t@*;^[IeI'U0U&@`qp-h<P4^MBLP.clB'5UpY?@9*Qlqqr!>C31hk""_>[IhkY3E/635,_J"A<c#'#a3f<l0bSK^-m/b1-8PQ2I%aB6hE!"]`sj%K+@^B2+K\O("Z<VV;bblt9p!pQoVgp0 bcc)MeJ9X>`H_8GP=<.%rl'dpY/(`9Cp&Q=>(t<$fm<6:/a!O ^Y,%1&0 ghC&!F0'A5dqn/LW?7ehD5?>^Q=)ZB]0&lUPjW3B^2S1D&%,c%m*f^ @NT>Be<9O0No`VlQOVYlRV+=.sA_&ll/5n,@@m+RDkB(0Oo-E6$VLZX5kAmnr.I<qS<ZmO8=&)*;jO>+?h"Yte6A'Ia"8,(=LJWQ_b-#UC%m1I)6AS6<gO[Aq]oZ)RgP.[63 >$rJ1G$>Y(C;;A77A!C/B2X!Y1$L?Y;go.JT2WpG3O/al,H/Y4TP7\ 5Yf'A&^;(5d"kSGmmD/!7(C<la8B!L#:b!_UHm9,qcc9"DYAQ't=$jhcAQA3"[&tF3\rs;<"hiXI3I(#CMFk0&^73Y9E!aU:m*27;!4AS`,X`K9JgWoa?<V@r>N>1?a9Tq/!BmS%(+n8iJ#P[C.9\h6>1<T(&C!9)jT$8iFGC?fj;An]F\3K'Se$93Q!WqHt"O(D_ST>$O"5mZ?=L>UU7+r7A)g?g;)Dkcf`Acq+bUWt;aeqJe&YNT&U-^MUctE8=P@"M0Fs IZQiji)(!)3I%2:)k&Mef.MfPP2-b!\287:Q O43G%D^9T1e$/B6-_8JZ[ 'JKt&MM9+?&d Bb8IX-'tSaYlD8.=hl*Rck:+T!Y77.9I ib2EtT'^>A1YnIIF(A?QPAL-]4,jJb'g^f@[NRVO\bU)jTfRmX+m0+B+^O)+FDi*4IYq@m:#be*i"F^8N8^f^TJ!?W7$Ho"65r8@s12=X ZgBnWj^XhnL5i],M(#oFKs,YE"=Q<(2.s:0632=M_;Dbeb1Ka3K:I#NQZ)92HD1?O%e]Q"/0iWXY&Q8,7!HJnNs3tY6-($@L%^Hh8EHL(s)tlpa_t;+lW,]"sR;6(AX 08Jb3_VEBs'i%$k."rbc]Y(J"CnC[ES"Z?)`pQH&)P^T#V^J3Uf%:B^:1PbkacUf)p-T8Sc]AF2:j0W:b61-]N1khkK;^T-GD,1Lj`G\S93cH;?;df]meBX"@Q61BW?;N\>HJr@b6Uo`e/FJ+=>Gda\7lQF`2D<Y2glUisK]?(EO*NlsNSt5<N"#Vb''%%nE[@gL* (&/UY_&I=htAcTt28DZ7GL@%BBht?d:37!Y _5dG\_[-qTo6hXBM36q_`:5*DW -cG4j.FA,7`@Gp<Z&/tTo2 ?hNj>@pQ\rUg:_M)WfrR-*a(:?\:BnTFNmO3)_*741ZFb9?L\VNWbUQ\ iNOY<'=m[!K4<:N&933(!kAQO+0ZMh>:<^.!BL-^LXM:sPMhE >K<& P$$UKZ3-Er`ZUO'Oc010APmiXmC$YYLmqD<c:K6Y<WaIHH]c>OB:&JJGr0p_h`U^2!Zi$O$+qqr:X2QU7>g-j$rBtt$:=st_ST#WiWG]Bh(m#U?86]Ll=[#M)r'K>Y(Yd;*Z@OPnc0O6WBIAWqE%ChqLXP"4XRI6g>d%Oh2c3Sl@FcA+]9TN!1)5Z_4qNR!JW\(*OHor2E9Z Y<c]KoWZ@NAB7#(g1!X'?8Rn)J'cn'O&h8n:LA!qNh-URea-o;10&.k+sZ-Vb= B_;AN7@d#iE/O]`6I(Crn(`0U-nYED8MR?4]4CZhO5`>F#ZW3%GOHbk<N=rm<I`\6PN]ZL;4J/e_W<B2=%[OgQ3"fOPg\fPLq4jY. NfR).k@s;6*m0,(D$oW\O1Yd9m;'(.=4'W@-2 >er*[$1[)V/l'oNts%P3Yq0hbc_\G:!%42PVKSTE<e+a#P>Xk)ZL,`sRD1Z>Ht]ioAri/5?oa:%rO%/f\+lI(de*J(!W[Y:,fF+3`T2I12"[ceY`4V&'#Gtn\ H8MPfCF"l*d1/4iHnaFoJHR7tG9rPtsji[AL: pY?'VlW;I"31P ;SQsd6<_88#Vt?#0q-gZ?CY%]D]nan26_$07m[/5'm?0>)^EI@q>a N:g^9DMY!#g%)ZmtXr[T6"[]WH&g#Ds0tq9;WH9RAra!0qPA#0^AM#s_&OM0Arr/Ng0Ym*A"MY`+GmNeHKVI#Bk4& Q:oG-<J@=KXRgr0=A3A^P4BnL>oWQe9t+QN#GW57cYNorf(ETGA<MeVgorXY2`U6!=Qs/iU_fDe:N#IXs,)iA5ZB>(T4P(^CIF;\0s"r.T3QQ$Qn1.mk6a,$m"[6d@91Qrt%s!4.c7(nq[n'"*c0A^I!JoZ^BlUrAA;li[E-fC-F_chc>/IW,/VqFPOM*1+)(f=tmCBeJ`CBc1jAZ<'E"!pa%_Ig`80;/4H ;$'Z%9eJb[?W5NaPtG9T"tfC<=HRJhdRT2GRK5rE#9*>)5kJ8KD,8ap Ap="lA*c##gWa4A-c`if_oLc$L387$c)*TNVJ`'\L?Ok8lsV!A\?;?]46HkkRh,L>aV?"A2I$N60>K.06:4XZDh=6MGRheK(^]]EIID7ocQXN4B5HL4q;QS\>6qp,A3"G"GJCo"Te>5ihQ3`3r<(ctdP&!s)3ET*8&ib:1Ak08U0G`?7rg-AAN7&"lc,r?-6@%T-If4`m`/1Nq>m-Uq\hr\tl=hM:DH@YrM+n`(Qo$h9[[!6AsA7(JCm4J"-D0Lh^=$_OpM##ZRb5-&>iT-,l](anU0gdZA-=&-ag<7)glW=ftg_kRNQ043b%AI=r\!Ab6T&I!QCGQiX+[ADl:?%J,AOm#HF5Y6>/6Ls(U?lmlXW\Di?C6[a-[%$.Gl$&]9#d:#)rk`6Q`HEQ/ZR!dQr_K?;7/]>l*VAf;6-2O"#mFDfes+.%=CPbXX"0s.=C]Zhq%-r$<rMH6F>GDC1]fO('PM*K>qB.H?9h8T0pZ.l<9EPlClV&_C:lPmZ*P+Q0\;c]4WdZ%==5Lm/BXMVTH_-d`::Ol*7q1c]A*NNWKM=-/>O@H$a1c@r.=0@;M*FRARei!@a:#O9GAAO%Kds&>ddJLcVT?IT$-AKATC8^MBRmm(GW\SX/WOIPH?1imPf(^!U&2A=(nXd&^(]XQ"AB. =KtFE$A,-8/Rkj^7>cYE,f?Je-AF"l,CA,jS>l`Z?&00JV$_X6262,/n*m+enS@*k1IUG-1f'C9QR2lG,\NcdHf^*N$JZB_#m"4`<i\0oJC;?;>Sl cpW/6,)5nI;j48=jiE%4X_PFnH:EK"QRW?HZt-L6C#?V6ng iia$VRY>/X,1)C6*aU-mO;Yp"`Kb\oI>?9.<A@?nLG!Y'^<.f.(ZfbV/^jeH8bWaF3;pO@'8[+?8qg6)%]q) ^]>0_E3bM8F)%3T<HS91m(0EYPC<(JE__6O_.;QNY=-+K<>:=T  98'L^C2M,'t],;*2d=(AJ?AA7LL>)+cp^F+5)^?Z \@b.#E=m%WTs!3?5A+Il_aA)*(1H>]nC( [o9'5b5%Bc!k.I-(?Ed/Gn8:sl-(Z\LA irQ+LAqelk@EJ!C`:":bpGCT-%Wism20#Nkm0lX@fM1*XR.Xeh4pHSHn=0HUL Vf1_r._85CGRJZhi=Kf=:<Na:JU N2P_aPt+D*3qZmD2kE^Vq:Q/E^LR?Qm#O!NO^W*lIApPRDTj59oH*lttP3rFKe$Bs3.U:b_#DS+E`Us<BZLX3=4q/cK:aL8 /0T&47eC3ge:Jl6n_N6$<@,Nk&`?bFIldGYXilP$C1MV4'EDA1[8oMpHq F:FD_ZPjX'>a7(_(=ARSeai-bX "*&9TZORq.62:6DcMOFA(=NN37F]q1Xd)W(^L<Hp!"DQAgg_XTqP/(0CoEDhR+@Wnc"@2Wtod;\/moOnlSqf`VCehK]cI4%5&=`MN558%B=0E.s&G>5mj<,CZP$KDAr_[0kb7b4K"i#>= "[46g0F/0pPWVpNXUHgOfiV@&b=r* %s2?([p*jG7&re>C\qtb^_Y.Y\KT7GS0@m<0E6766[?_(c)H7JSqT67AQd3&0lUp4.`jiQ'41[,CQe2LU*.,in47K(`d/$C#"qCDHs4?L)rb4,H!]Tn7Wj<;j(f`rI90'$HQL231/4fq3A"cCd]Ob-HH"9`t?89AhYoDV#<\4DWdo0;9PT+9ba;E&.5&(s,,TFOGO."Aq\gn3-r\5#a& FAVPE(-fOUcJ$;*JZmD\b1fZR:'2'dS;fkfEh7B-kqL;FDp#s+[T'tpAb+cMk\819^86`6@'@k'mm!Ci3J1%T; p1TgG1mlKRe:C>H@N&lpR>_2'RG-7')C  35)H0R\88.5lFP0EhdH_FRPRRA/U*7op6<mg5ADOq[;NoXZ]j#\7VkPZ&I4"Q:(DQ(8#fH"?h ]!q)FqVd'`5K?h'.B ^7QK aQL!YhTiNYWtJ)Xga#g)1b>c@HrKbo(_6InOgZOL(I"d12 .7';]bcB=PA1\tD^J7q oo$"`"=AW7o5M#PclL?%Y>9P+/+4I^[k<,08C\`o!=?MQdHI]SQm$SGZ%r7LWG<f%IJ2gM@\mi[sTCrBm8Q*:8s^a!E5";IVAWn@@rXErsrRo!n^@WNYAn8[%B7C/YkZgi)F&2Bm<gS>Z_]Nm[G1Qi9,Q`Y!bUF5o);(q5rdohKafk-/)pi-Z/i#E/1Fpn&3fXEAl2$gAo%!9@*RDtW/")#H&jF!o$4HZQd(k#bT5]1qaY55B5NX*qAkh;^JAdlUt]tG.%AK6)=G>T0Q[Ld+$M;-U5mE8jc7%!QUiNae55V"1LMVqO%C001oeA9\UGglJ2QP>,]j[F?3bV&_X?N>$ mZY<\Af06H.HP"t\*a8!Vck8PR2io0\9!?[!>#]lZh*i/l(2m3%ZkF513FWEET;5f@?OhF&t6AKbYOn>H2g,$[\F7Do9-]dI!#!!=3MO*])A2L`Ii5)hV]*\n6;L4IDPgo.Rtthd<>HN<$:7@K9:!=\/0t'$CPsc(lF?jXTm*gA_5+X[X)d@EOnQjd8f'+ho-P7 Bo&02Fo>BaU<5jFKWrmm*3=9:M)U6<?;M/4c>MtbAX9_;cq$8eWP+YZ-VsB7_ZNntt;!HY5o0%NUq"7Kq]D`r<!6"W7$Y2P>Pf"%!@7Bj?nK.t6L%(H(:r eJ@A&)sjLF"OI/f!h#rS%lPRV:i3fhld]%\*;C9PTAJs_+F+! Srnq]BHjI4h;6AA&,7&j];Bdl;]X]Z[;H+.IC.537HVN.ZM1WFfK^o4aZ4#f:.Cp!rK"n+Q8q_@+99&AEILR4EF1*kV#Zi17-M`+#B-%C(I\ 8D`9EkPjJ:t@\;LAc(P6#%em7NMI9HhmZ,]@[#lH.W%&ejsADRKVa<o02ALDh ?m8E4d!LI9@S=35s%3#IhhDr/_ciln*:U4I;li$!p TM3&'#opi3#Ksc^^A#EbppbXQO$Vs*QhpQo[KN**(m^bU!P'dTk5<ThQPp1F"t9mKcj`Wjqe]#%h=A+Q"&IbF6<l^^&[%$&;D-bq?,?p:DpA.L#Y`7fhUd:XC8RD>!B1o%!RXJa)%98 arX7!r3!f($ZgAp%d:$9;8%Xd$1.a&kZEKbqS1 *TPbtBYQ\fY!Ct\- bBAt'And8N/Z3elFBm*J/J5:qY)A,o@&`f,<DK31#cY! F"7E(Pq\r(C+MPC5.DAqjth%nsOZ]`$*SjXap#P*-WsT!T*X)\*j?%dokd>q\Pp.fCZf\E5JB>&\`2H\1TP4Jj*&IZF`7"h7MgNVV)(Jg[2HC_O)ipS8<P&7^Lm'23qqO2mKKOngHAd&Pq4B#bOU0ZkQs-2UhD.DqA?JhU#%J;Ck$[h>L[^'K!lp3<O1XR?,K$MENo)9V*_"^bl7sQ4A2Oahh gh$%s\Gtppge(EZq:=3OY6 MZ1oSGmTqSL?ARR%= 3*Xf3g>re'pN*E"$M!GG]_f-Zh!]jMotT(32."?4;=o*"&Kks9sBRc98c=gGCGoP_=EA0R&B4;qAQ>95+4L]mP>%,$(6=@G-GO'* F@l>I-q-RqFB>?8@7<4Hb;(R3Jo[Za_dIN$=I'W*r-/.Rb&.WK*lnL`iVrlYa(GNh62Gt#Z4L^L(o>pkCn"oNI6sF\%0U[tLrdpq,l&NWt4giAdE&08K:ar6nBh&kH+>,5O:EQij+M_[@g5tM%TFAo#^naVFUEX `cEs/Fo-D5i(WNQL:4`WQ/&#LR%%>t 8).'fUYtMl?QI/3KE")l:.7)J0)Rr4p$Ui/?rV_Ui%YfeV9i=lGNb-I$HmR>q3(kdPB`LZf"&*%+"X*O@>nim?@nj+Bdd\a@i7tne0:hfH7WIJ75M*)?FUtYk?GXY/I"GR]gl/th[Liff+Q=hc.U&p]r<7Yrl07:T=\:(A1pltB8%dS#//>7%e;K_""NbN($:BP-i.aC*T?AmY-NM"_oDFrY3-pkL!EkM=ZFes&KC\/+:b?iA-KHX"AESPrS(*A>F'X<D,=9ih1FUanAA2`4gp$<=.g?TYcF=H"sE(#7PkcSO0.m0I7*.,GcKI8k]>)+=s/`!pPje] \"(EF'Gc&QT]n`9CM8W?4kraj?*iYB,0[FTt"3#@X(.\VHE;20 m#_da\(c$]3@CS?*s<ld9Dj^AW"k0$htA"BDK`f,/B.Wp=1W)LaA'Ok<dHp>js)U-m L[n3V@[<%e3-gC!Z)+_f8i9p.'p3 #/940)^4&P1SB#.66iPVn:T7c+/rtgOA-CaV$%8jEc$ 23QX9j<9lC[Y_VJp'1?l"577%U>>(HO?qmi\^\HFeU:R$Ik:^*C<m;Y 5AYX&!W(VrYMUnP876)sQjiNnbr3''%QkEE=?$#-&0QUXANki8723SiDB\0 %!\%6pHLamb3&eMWh^,I;,nd9MVKT``F=2R^09a0d.GNo)jj9ge)A0)k!,3gt8tKb4d(Ef_^H]%] H=U]9N)QEdsq24ncLar#O:R<Ea`Wj7@0s&q=5lD&@'nZS>@d?Jet.3@`=Ws::id[qSspkk6He;"kGSOpl__Dn@"!&9$IEUQtUM !i:X'G\LDpeEs:h1g4PJ]GmO@d:lkc^t3(D2WFM/l_9L5Z4#,/` F^&1:bXk9C_0jM!bdcd.j.1?s8O9i!Wg_P8=W13AMS+"MEf9j-'Q)^,o$hnsl@)d#e>l&+Qjh%hO=X_ZrARVQlT3N4Y![N`7,h_VXkEnnP`nTt\!\7!R:QTEK+'fSo%)kD0:#ZWe=*FO3Sic]i`F[W1)aY9kRIaS`@1_H<`hA9.#+.W^`V)Yqq2Hn7f:.2;a PZJEKRJAjQUq<?=9HI5>DYU.`Rm4*LX$Z >r[!R2Y*.ROnXYk?+]JJ`H/K6(G+(l0n/)tHFN%$P,R5_S]B?(AZMH OPbCX$*)T6"YjcMQI^B+; %:@\&+A$KTVB `M+)<@TZ;7$jggJ@b]V*[.jp3;I)$" L**>X+KHgQ*N&PDG6C[Ccs"MN-gl;jPbg)fdR+^_Ds7'`UeMn1,@a9`A:SG]UO]rCqXtQ_C j>AaHtKV,$-cN@D[O`?-A,<qk5cW((T`-%C_?A87T>nU1#!1Zd!R7nb@ZG6+6P''^ZhCNet:lR";8&l=3=6m'p9lA7m=Ht&g,?H"A`ah=qK\O]Kn=<N"1np-TWPq 3C"6+IfQA?DHK1""R6GYiaf`Z*+E1bI:@H?s,GA G=d-T%p1e3$h(gsml<dFK:q(1\RCIc`WIj938[tL;nc49'Y6MW#a?^@__*%(2IDJ16IsoA\bBH2km$\;I7#l5.;4t/F7"sYAJ'0sq>US6cJBAt?5Ti)Q8!K.MeNKn`nAU_Dh)=Zo_,R:>sQ+]O+tX&b*Op5*N__mkJdO*-+#n%3pn7?E 0A/pS`V9:VCi+*40;nXU[WBG]t=O2aDBU9(fLUH`AK(TYD( 9JJ/[#Cbm19n'3H,b(Ob:rprqVFfQt; )b&@WZ(tgah#SAJo+W%?6gh6k+*20T;Dh,,XpS36`4"6DX-nB$7lNq&X;b N,f)T?9lbGVFFL$B%9ofD5\5o)CtthNA]CWLAn67h.VS\Qp[7++]s/iZc,ZBJoA1j,C,A#NIBr6&!H>d;F37(A59@btq5<V\04NCBgA*L<BVF]>./1?S;.F*94=?6@PCIln`M1AaBoge7C18K%JS0m8_D;ZQ?W*4l`1Qa^ [[[^SG U%ZMXP:.4>FpGV('i@fZ`bD8S3<Gr#eEC%j.tL=l<;\0e`p&`)Yc?l/YHRSgDoktZ[\-N^4k1h!-VtPkY%WCX.R-^^Bf"Rr\^rB4I[M_=f3Z\HS-=[LM7;! E2*2'N'D6H75D7M+KZq>4<Q';J(")%BkV-jk2c?RljcE$Z"!FlS/( b&k+e+o(3](Mj=jl#is4A&R*Wda-UVGdA;NpS/NCB=3Z_m-+fs2s1C17;qf*]k:*EYe-5)oAS=%"g*_\pXGSAb&doD0.f\SUjKO?13_1+9X[\=KbSM!0B0C <ch<&3,H(JIB!!g)!>5PtgN( m?%;gP7rG"-Y8,aqtBXne5"_7!*0i/hG&jNV7V5FEGY3TW=L$^O2flhW?7.T1["^Ef+fk=&h!JSi9J_+t226p>8K:`[l^[&%AOI)oI1AIE0<eEmL^M;pWhmi\dE<^$r(2Th<.BcP`a#mM2X!8oAi-(6\_(63<4AicTAVFJn6XMIQ]'m7UaPbb&/b]M#A`naAB':@^l3EasRJA6Nod>A@2mNr*MKAP)W$>l-,.s$614h&b:hW@eWaO+qS7/`)EZk=P)@UtjI)6G^4DiWRB[GpK)=:&YY @ElO5-P+%BV\8;_H=0QV*En\TG%_YM@@7,c@%[3TDLVNi33!2i3-p4UP.$i-653qrVn0&A0F(n<YXO-jKZjVWbZNRb(\U]Vt$C@?AfeCm#KP\lN:B,;8k'T@G/l@l"AiWQV5Zc"E%QTe$m,'LHMB7ht3Z[g27pa,_\(<XODta 7#f <gZL)LF"gf3='a!#$`h(ieX!AYW)A:9^KNJRAAB"1q(f0J2kER"A53,[G*8Ds*"k^`IU\FS^7^A"0Kl?9*V_LlF#Y<4NUJti6ZV_MKf?p7QB5SCdk4`j>)8"#b@\$j\\o1L_GY]F>reb8%k!A?W'7S7g2GF-4`  .`s]H$2KL[)e$tW$4%(E1T+go"ZJe6N8?ftd'!F'i(IooWEIOCo!F:g1o/9LP.M+R`r#[P.I(-S"bhX1mGIm!T"<V39KCB'_C3i,k6L=8knE[Y8Y$j5rqHLAM[K/h2R&2bp&0Nt%f%aG9lIc8fWNQL/Lq3ZEWOkO=r79Aajl'^a-T0ae(+-,.kgTlD7tM.cnqSTX`55tp_aZNQM&k)?maA8L[J]Ye6NH>5tPm0o.l/SWe%<(9$pSDml_9JgoY)\SQn6:b_>TY&5/9_BmY->iD!@n(W!0\2kS!928^B  Jd%jKS?l+S% GiB$Bk(N74jEcJeHF_ssI[&E3T\R)NE0iqRk^c9>t H3MKKH0FU73U(MU&V\IW\F"A%5";[ri=#A`oPY$=Vo=/%il;!A\*D8_dHL%6S)Kk2IIXBBY]3s9F)H!&$ahAOhS1?18SY.b@WX,.\SJEfbI!78Xg!U5(=N<)iYWOJRQAEIi%s!#CN_[/ 0#aO\7@2jepFrO\^ToBUKf@74VT_rg6/_:kZ2R?,si Ks9_oZFd:A/@\^r'@k^mr'6$KICk,F$;@P]1H[fZrfpWFbg*,f7[Ln$4m.+(;3i:]/A 8#P&fU/\ZI*\O@Y)ai64fi"#_G<@4N2>TD<FCfh&NiF"0Q@F]n>lRMe-o$A&a=l9JWtY:;*5*L.>'rID!qop(R=f0SD*S"0j7Sh!@M@HWkOBb+QHbL["*.#eq3,j;Ri8sGq:#6JRj)Z4?m(WtCZ@DHo>W$<(=mS7*dI :.f$X51B9U`"d'QCCr7s1E_,GfqS<i2%/W+NK2s#;;t]IILa2[/d1`W[FeV14 jQ61aU,_.r#od,%'0l`e"FUKO/FJAiOYKD(@1UWC@s@,d4n'Dg\*_;/+*rSon#pdem7'\Ss2H)R&XK4Gqc0;V]RAXfmJU:)mIGA)*[LB5jnY1FArIG=.%AAY)"<.ZqopOfqcT<sBp@#0AmBA +hH`gIA/= %LL0mb=P4*rN X,8B(`7<eAlZ/i %j$e4T<EVEqJpV:FbAH@.K/cpAnO&5>)N4Q](>[:4m14*k/'oW&g")9i>F=mZ3YL=">B$=T1O=M4Y@AdaI7\)23KEcM%-B5cQ'$lp]g\1LLm4Do'I[[+[.KSY(hj,[=A'QAHtZGI)iSB8.O-pDH:K6Nqg_AY,aaeWo;9&.Z6!lSP8$<M$SlHA:C2_Ns!3cJ<EXmEA"O+[qEm:2IMjfc aCEY!nWUsen^1:r3QKdp+;p,P4q@42iT4-h,X[Flksgt!lW^j6Wk7Z6A;J/'04NoL.ADW>WY?%Ucf*l,m=.J>X1(+=ro)$hWSRY<[R*ESAO@kYE,+ZE=B12;UBgB[r.`HektD.RXZfAd=6tMIr0mkAgXrBbm]%hJ$Ia>?]p]>.5>a;8!ZLoc-eC(KB?3(+^o^to+Pgt:LT4Y(A 70sos!rHMEpW@T [=`SA-,jTCB`(HbKgY(!]S=OiPMRDP_kLd_V\?s`^* a,OXlt>i+6Le`,t'^_t[@1,q:dqPA855MEc4&m65\1j3n.+<sp8IH*CT-?`Y20$k,b;@mn'HNlA`giV7KUI!29;YA"jZd7=Sn=nGB&`+D=Y@VMpSVK/cYlg'hY/[V$i']#t(?\2Y=s'AnDPf>_J0-V 1jej!PM4LYrW],HA]3GAtLF*c%NpR#eAK7W@.CD08 .K-4QeTlr2A\R#P6:t;npdCXPmE;=M)H.Ao2ki.Gk^X7OS?jRr9I4D cg7U]'e69-f-Gj'2(h*.js:I%FpTdhnfDI MNLcQk7B+XY\AQWZ.M;&N)l^W9g*g1)O>cIh(t]l=:g.8MNR[^^HpbAEKhN_VK>=/C]'e@TKAiigaOjofP"UTKJ4?g)N,ro=C:q:<=Sfg4pO#BtfCrk^4*<$M_OI6>\V)"m[8?)=9K.!nH_\@,[J\l&$o4ZS+W_32GH\QKWpRSPo\9ST>,*YIQXj54\O<K@R9ocIYA#a@n!X8+IUq'Dg>aWT+KqQg#rCIpL%:/^A/Zk0!&Q<q;0VN7$2r`"l%Rp"dEe3+4,;AIjEm'f6&qljfXjQEAWO^S?;7HQ%*)0HboA8:WLUpX%dRANd:UE\kYPfSgD-C\,dh#E#F%!c/oYfY^Ho]#GHD#;ZFrAk==(c2Nq@ !(]/FTq.8Wmea XJ-@,.X'9r? !LB&arUD2Ai;LjC*KaIA:EEK-,6&'6Y)$jYdf=QM7Coh5fVp]B#/(E($(\b7"rZH-!sAd*VRWW(c3F6 :Jl;'a\."VDJZ"NsMr:30E /]PRT7d.H_ma1tsM&UAI)q!jRH7Aio 1=IANt.Z#3k[_'\3\h;s@nCH/2T![ZJ:aBgUkOP'p4pEPNU-rAGAk,:_+nFm,:b/4AO<b+rIA(.kGt2b\T[d63hjELNXIVqajbG0:HKY+W>MFDtt7ceOoco(0Z9$o:1EV,bq!hT7QjUBK@FRVXA)b#FAG<;c[Ad_&R41&&P<CcTR'taj]EF(fCsFb'/$Of?@1M$!(.'m:Kb`RAXh[X9IHp/ ZG35b.TZjN*!hb(6F+/qUs'@A04<TqGD?%+3jI3`q3ojQn=%^]-PAZ&m+QKl@57 )!p0>ik-#?=YWgjc'Y$)47J*>">o<t:b*M@6a!$F=Zfco2fWiKB%P,_a58kf)'UU>U5nkr'tNTC&bnc,nG*!W5[`8sfC;aF+tTG8t9>1jIf@nOAaEr]DU$chWZ61 )fB4In+i<.]^0af-pJ/C8RlP(ZA`\mP;T4h"c[VMr1X1%fmdpk)4BF9#i&*WbY+A OG;2"V'LML,qI>V4P@U6H\%$lg+D>=B(I?-Q+-$Z&+#mL8f UffC2&RTX"hk7eYXi,55tjFO41th)'T*cSD/\YDiRmJ&4lQD]rrF88apB\qZ[Bs!OX64<PGR$adA:GTfT70q[W*X^<0:S_aiHQsq2%tT3]HYs%IYU'm7Ke\Jiasl258g>L*9lPXApK`SIA49YYMr/D;%*1]8G1j:A#o*MP?N.+=4n=(fW2KLhMD)CQ000_A%XAa^n?a1!ts1sg/V7Cp-:6[fI(YQp*i6AJtf\):mj+)`"[G=P9<V;;`KYh2jW N>FVQJ\H"LDYi/<\I#aKdnn3,$eeltPj#iEJYG\(R[*.s&U!KcS).Vm^tr;gA$kc\P\?=+AhU2%V<_Cj/KS0sZ4^QR]pVSZD\:is+:-GUH8i4HIJj7B:tT63-B_m9K>ChXigVt\MN@BUmBDig5A)4?;TjAPpW>6Ab(jdh'\!lF&IH,AN!Wbq <nI @GWt*"^a%HP@8Rf2!7""d4iC"0^"m=kQpiEEZ(/lr7'r&;@2IY B;ZVolI=3<cJhn%@39A4G<q##4n#j(Vk"b*l)$q=U3>E&=ZeNW6:ci%"_Krid<\`No2Vq^]KG>RW'C63MXS1o$(jWI/J>Xp5=N4Ns2S4d33ifOM1!A$&@4VU$1&#-btCrgF""Ge^MR<s0(HAAiJ%RBcKkICbShQT/W6N8D*6.[UlMh6nU"YgB@FZK4PX#,_o<p?HRm7')7gPW5QV\aWA_,h711$_.0nhh@&+=-)&O<>@TD6)Almk!ZgqKC8OgppkZJHl6ZL tR#n,31n]$i-)AO4)LKM?<K_e`hR(=^5@tmsQ'Z+<'#ZiNL4%[iD'1`01cC?=C37=KaO<<8?.A71,QMVZj%EEoZ]cF".(ta3.lA5QFllF(HEYg:;4MqAAnZWKqd agd>bVV`b[*A.k^R8 9>/ZV+pP(XNi),k(\X F2)T9;*?$9K"-l^#K``8m<@Rd'\LCGqshA9FJs[2Nqiik))Ek.M fgWa[":])lE ^igSa+'4FQ8?O*(DBWH\?8L1qXY(h?k,=i*/=_]l$%d$*cW+5KP87`6RF#c,1r'K6Sg2=&74Yd@7,b`4"95bD(e'UY)YR#kX )A*9.q0sl,j)R^(O'5kP7!jMprGFM*!r5A]BJ'&pkPdeO@Q[B++1KEPLJ0`1MbqoS!GgCZ]-)aDiol)cq@5bkCAn*@8 s-JT!R*mo7PX'&]\A&Cm)F8!1+a!rpK6#dO8;0:gC$?XEb6N.WQTK;cS%KF0*mraSTAP-E[aSVO@7>5$1E##aD5&,aIZTkUE%#9W]C.5@=8(/<XR6p3ksfh)NcQH^pM?s1D^4XKaD%E7f>KHW/t.iDZ]QQ5I=ErFf)HqLV_J)#X&FbIh*QJT[A_Os-/#f!Qgb\!KArLj!UGN#9gl&k3*X3/g;:n`dY-!Y&#RZG8Klc+PSQmQ:e\BMCgJHQ\Oi!jVO!-6g@A3rmb\pVq*B0@5r*ktX\BAdl-L,@@GjBGS7AO^.!jU%icc-^+@!!nTN/E#9)1"m86qQJ$)i2^"<E;V-I<Q6h@)QV4pQ bUcFCU>e>4*h'&'=kETF-3I(=5c7,bqY5Mc5c,'R!n0hr87*phBM:%t(44jM(!F04pg"-R]DIn@"d`'A4##B;X9d`,R1S%n boGUpSY!4helQV8#69MQRj.(fRJi<;'oIhK&Va@g*eo/k^3O+9tOA)V>;gZ*M!CGYoA^Mek(+p0M7AY9ZY:sS)&7Q+(h+t.NRqX %`1)dh4q$.Ib7s ]SGKn[Q0h]mm+G3IUS@/bq$C%<#2#k9scGN*V$/6gL!Tf@.FF//.r5gBY?9D[#mGbQ+paH4rlVGsXXBqJ-E.ZKeP0AKQX(mPh?W_l.X\I@UKb"rHrQ3n*5nk3L_jA&4`@SkL/4f&,UYsrA1\JU75X `<2QeJ(4,(s*SYAO5<-tNh>V:F^p135lAmDd"#FA2_U@raXOmOqjT&=IdAoa"88UD_5e*=7Yib-q=ier1/b>(\b_?%D)kh(A;A,9Y8ob.WQbi'S@QsRf`%C@%F]qsbq-e<XP661H80!Oa'Z<\1;4i[?l[BQ9GG6%Ds'HT\_`@)4n;H^aCG[J?$^N.)sEgR rh5#p7bR(A!8t5nT@c$0eaP35JL*U+A`=Wb$AgWhm0Y7)[]j`,;\4Adkst[$7Wnl;M_V?lonmaePN)HK5V"g!nQ=`"gpf<B2U@YWj)jnIelhP8g$6+g:Ft&6jV (n'BmCD@t$Ncg7TU=fnRLC!h3DFk"NaVL*6(c\kd%Z>&il3-&s/`riH3;+8M7>am<sT.R>MpJXI/XoHAkPKpXJU71$-'?>eo:XbD^N>1J]NBq:Xj9aW8L4"+<Hn?p+tp$R.>\f8[r(Nk3AB2Ca3:9<OA"(`D9l2EmmQ`d@Lj#HskG@PI5F&ckNnk4e>M>>(>r3ZP%S7K:Jh5'_)JiQn"2_ff6>#0K/hh2,*,dUbS(UhFeRsr"ZO^p*qM'BW9p8 (/EOjJqM<[pbme&L`2XT/b&$ 4R[GI8.6ReDO#/HEA.2Md]DH0<_YqiF;]mFEX?12oq,NnE5qIV7mM<hHb+PZ`GS#rUagL;-TN?lI!kmbT_,CZD+\'*NTI/%LTZL5X_gKd*DRd@Ve8ZG1#fcmM6;Q'hFN(@+V;9j%NC3 &]ohb&X$Y"W[/'g0Ljo3lOIDQ-U4%%"#?]g3:dqA53k+8tAd!aM(<N,7-Y7+'B0VJ7E6B?MRS&Zj=*ZU;!S8a=qA^?g<?<C\AiRF+q0\?f#bTcWpB*4:`7JAG(.rMHmq*5A8>, k+a*)D3SQY9*9!Q?=g[m!<K,<]"3`"(l:>mTe:jmq&jAQeCad4Wo^c34ttXHq8Bf&_*11<t?E_'NI6,6N9/! 2E$"Ctc-3[F@`:+9f69b1o2"'!l`o[iIP1rgX&9r^]a58n]%/Zo^XSA+llTTYEGAelN4D_h^,.rndN(=&AlntlF9R:i6gNf;#E8@Ni*:3]*Y7L`PPFM">a90K"$Ti7V-LCJ$_\XHOUe9';=>pfk5TFBpgJ.\<BA1RhL6Y7^tP6=3EAG=JlK\e<Mj^a6c-[d=VG/#L-djIrjB._(/KosS>`)$rV#[kDp@M@%)Z`2MQp&=kq-T*-T!`.D7*e*_BM:oA$`O?/,$FB>,^7Rb<`>?QNp0&P?HH\6e\ZW"+]M.N3GfLPSJ,&$<b-a.?m4LEMbt[FH/.LX%]!'8S<`kL7H'S;%Bhml-O%#A.AQ#dl$+PXE8!Y!2%aCt),+ NK>UHZQoT3qk<7r'RJD9oFYcY%X*LdK5l\M=VVG%Cd`1V;0AV:/@.GP@&;pMo/%70[36V@UbeR/bqp(aA;/8, h3cYKhm8-<pXs3Jobn",N[H]FSfKenq[78f#4Oj74\:L[`O#i=?Un2tmI/mD=po'i-,0oBQ4JWjP=9">!X>`Ak@.=KF/Kn*Lr9A2HBMb+Ma5=6GlaJJ7D)VNsYn>kS+[1W!rF-2=/LV(7ee-LM%)b]Xf@?MknFlJ?6LAbX.H7toG'U*ZhALkX>'`/Z:O7Asj29LgT0&V!+/ABRn=aEK+qI\,oU0sMsfo0e\[YXY]+Spbn67HIJOfCgJR3<43tZC1KnJr=f4fr:.#KRfZ(M=;!k;@HT :/iZ_H.HTPn2VkAnn)s4A1'Y]1iV5#,mI=m`^(4@+(`^%-ia"oh<N:@Nttel]\]Oma*LdIdFt+O6aV/t7MJ0c&8Q%M`attI'R0ij3hasMh]VJj?"Hiq'GR4MWn5aOt-K+DWJ\!\mn[?LXWA?MRs9 ][%Q7+=ZfN>6!%g[_/D&;8!':E1NKG&4))ARR-Ar6=)@Q &&)>/f/=SLQ_#,efRaiKj:*4G.8Lc*>*BkJi#bXoHQr&^=aXB0\-m;?\^F_<ZnUT1W8IsdIVf#q,^t=o*%PXj<dN5W40HsbA.FdR-Ii&0All\>AF?r5oA\EI`X23a=8RZR3LqN#m,VE5+C,"Lhs*mrmFKG^(4ffII==VkpJ"f)T*qO_ja9,\J"-Qt$"AClXSUTAc/t[-dR/?&fV0E3r-D% ]M;SGmMo>h[H`iA%a@.N\D+eh@*-`Ap##HP&$bIY!-Cm14pK7X@T)^%b'bF7M`F;)Lh!SN9FTG*dY_rT\pK;8A#6lINMYs+JGn9Q`?-oKWN.YfE g^?#n@ZFI8"a9:sc5$tKmD8o-;CRJ92+Y[3cAWNS:Jf9/T1#Y=F-64BiIRiQB3qCbIU</Rr)T$iV)q\ r/cAdd:K^;WA?b[b<L(CpHRk -&%PVE8Ng..Nfc_k\Pd3AFA()<3l<4CWAs FcSbb%$j6WEZ,_)O(?(&)>XX=p=DA*/j=nrPcOsf`Ja18I7R=_(g5mABgVD<M4EFPUE9+ *>3Y&*>JKP')OWdVkC-fRrPcX0KMt2 C`Fol[nRllZriSWs']A=LX<3RPV`?cBfY#-A:#F?Y[$Y$5.@2sW7T?L`$;QH$$VgYkr4B_"eSC[a36dC[ cT%*a(C%<%:X/_R6Fq;YBAgd 9)lUFXg$PtnA,"EQrAq;:Ss+tA"tF3VA^7$\]/!*]2+8/ ,0ES-1??Wq[>_$`;2kf8^lt-fZ>h[#5G>j(BNG.'Xqk]?n$cLOOLhU2F?W%MOLM%JsHp8GJ$VO=3h3P/*fp1)hl]iP"32CFGM@_$,G!`KHh#d0L"5EeSn hj'.3Y06q`#_%Z[%7PZoZ;YgN)b^@&'Ym_,_l_q"#)gK(#6U`k7W,ie/W_)a/Q;r6bl<q0EAGX:^.P@Ol')E^/@EPPjaHoM9JZjC6+ki@J^\q/'#sRa%H1WkHEG/+9%KWamAsUdU6rp'Xo)@@E$Fq"KC`W/jLk.Lnd'CoEcA2N36E@ENT8pI@9J:R]^O3oN5YY,^ JV=6J\bmaR8*A0)&</R(t`/+(NC[(6TD_'6Ul10n6Wgd5b*/A1`$fciafJ/@*4UY[[0[N!6?gC389H9]"b]<ltnA@.QYJ#X%'hq]&BX;s5d36g])4Oa:[0+QZ(MQRmsKS`(r%&mG_2NpF"ATY6EqGF68OB05o0k4BU&DEh!(.7j2k>-:@2c]KGGY5WW iKLZe)WWjm 0:;Nm[dF/$%U"f:DoeGgRmY-Lf:tf:a!7_>G!lAB663CedhibAW+PYF-,_"26:NRO@:B8;>dBk?Ej(%NHQ_2:p"5aR#<:N6+]b3J4$31\a`B-,B+)M\0Ramq(XMV>jNKA=Kp%lXbN+E;#EbCHsaX00jg.iAXL:q?\L#1UFRm_%K]h,C%9mo^h*Q,14#^+Z@m<FI3En$:niO.5;B6l,5b]Tl?.!cT1].Ke7SeQAC7AVM^I4U,*>8]FqVtFL\BH.m$t8SqGpc?nHLlMJ=@.SCkXhjE5-lqg= BI9Rq8PSf;LmfJTa>jBUaQ,p\,7V]"4jf@dT+<ft+"3 ;FrUmt5`^<L=h55./CFW3gt9r6ctRCe6+3lZKs['it`J5jFH2@V.7JA8E?RT; UQ9A7BoM%*@2eE![bd\5p9]CPGK;i 2EDGr4(Y_T2X5mef,sH0+Ei3*gVD3=Y.@),$pWir3YV<\*)At6DqAm``JN/6Ld$*nRPSApMdsB^U'RK_tOiEle/XZB5E>,T(r^B#)gk3*FR;'*mq5!2)@N0AK/.`^1tp_p^VZ1,\O3"S]qO9eNQRo`L.eG?/f7Dl.2nZYpd aW8o03SA%ZDLNP3G8*1<6:*hVoJ=TrVa&!t(I+DoLW6h0tkmAnehjg+>UJV ]JTBnm ;hN^l+)S.CG:\j[em6m>;TK5Y%;p=>H.8ZedEk=BN?\\DOG)PP.S-!lT@#&HB9)FOII[CO(e0&JL<O)gX-CiH=I#&q(dAH7$5X'%\G.N,75ogEeb]LmLS0)^@=qAgZ@e+De!<7JAY\#6N2oT"=:g7k\S]?WJ6gM"]bJ.fZ>LJ7V=d(Lr:=dm]p3"SL;'*_O)"6V897cWP$s>Q*<?dla:j)rV1p#KDA&qj.m>I\G*JHZY6/9c[/.!:31'mibAEiEkRa_%]d+?)bna.,ZIf)cfc<L8j%9X.330t7\oUlh%tbH3'kF`'8ZLk*r,0`ZTOo\]AB5n$^D+3l]dNfPS/c't#<aT&Z=kd3Lm\)s+?P@( /^Rb@FK"Wki]h7XBNFMt)lh@aGT%?CqHV*`==Hat#^4!DYTn@Z>Chc;gFc.e'[)<tm]r)As!Akq<(]9<tiZ92SA>6.eV#/itVK+:Lr0HC3$Z4*oLk%0mYLj3#DU\4qWqNBsNS'_B9d-*09U9),e'>bYi?#&dAl\X(7?0%eRQ:V*hDeI84n"X1a/N,ITr5/N,S2"3Pm,k5]s5gf7WU`NkNrdFd(7P:,)(nPD=jH8c4ksI/<.WUfE=_OF7Up4H>qic<.r05nsEoeXfgB2$ ]MVSj5Hj=GsCs"\hA1>c:9Uj?%[Y=<N6*`QE*L9M4K'k@j!eYC/KDs5AbA/-ATEn5F*tG:KEVSKsbc%TMC"tV]j!LA[8EG*-i^TYY),3hZ1tlKbf\(Y/o66iNjIW-_g."piGg0kT7IlVDo1e$ai'iJ 1k$Z%gJ 3n(ZmD1X]IZYZgHj_"hP-a)N5FCL9c@s[j-9SF-RAeDU)eL0oNT.\nXZpe$,ZHH-:ch:.6:P"cL(09m#L9&g_F)F`kLBfC+n&`MaONb\L8&'XOjW&A5V!9FqPYk1:i9*J@<RV.7Fg8dR[!($0RBG8JAjk)CT^*80]\.U.Ll-<hW(HF_';i9Bt>1/=4<T1H[tm'@W,.^J2%)fUBk+SZ(_5Frl4B;#< @<n&qeFD^ 0=#81[sSo8@@tWDO/S#/*`Uh+NNIRRONqtgb$>jA^TCVaLePTrY]2IlGI?SXRiA\0loHkLs?`O\8&?<>3!?SM3AmB)W>oRDU$[2k`7_>Y,a,+C.e":sEb&fTD0EA5.XdX2#ZTM>$X93b37.*$+YrFTorc85839WA[A:7?4>X`VIN3`<5!ZH\h^4pFA<mZ@[c];\6MPj.#c;3.3@6c;<p\gQ9IAH<i;G+'l_8>^JAln:8Q'HFG`jnAnCRf`8WHPF]]$k[B#1;LHW?)W5IS4@C=O$4E"]4!Cl2GpA ^Cm/tJ *?`6pLMkVdd.iMlo#[/<l\b=JZN$S]G]#Ee&/pd.#16olAklO:s,1e+9d2;SA 'j@VP3BEFcAUh_9Y&Y!cnMsh3>af]Jc#Jn^tn?BefEJ]jMMkWIh`]sha.f+e< 80^_)6 `G0=YPUDm_Efb*\&*S?Op6kBI;Ae!C)-(DFT<q_<TF=+ei,8oY5NDA+1r&qr p?''<`Vnp;SU#9.>>2iQk2tOftCQ!M8V(>S=@$RNZ*n=ANm2B1kN#I)PC\G;\aEHjW#XrRo">"YFD(`pU5tf)_T^8SLUZqU#.2C6`#EpJtZ^KL\.(n/&-0WWO&mr`1RAF"&LtQ=`<h6?#:aJnh@H WR,p@l@+(&;2e]NC?cj8e\'!Xi!^7=dDl[2^QfrrXMM0#aNVXB9&Z?02.^4AT#Ln;QLCcN5P&45B^cU)mg.,IJ'/gN7?>i?5opDmm`Aahli-r,H*1(32^Lc>n<,]]-^]0GRn$l5j_N#hr4;*.e6P?-$+Sm%PU>9as`Ls1-JINC"eFPC%5sATQF#,s`I;SRFCN@:- C?R)b:tX\:iVpK<`0FR`].7k:KmlBjI[,Kq"@QGa*QK.mkd$CnOp$5WW@GqDZnJ>p'E4,/j[_hX"1@)O=A>`-Qb1Re+b'!': $kU<W"=s)AnJ4.:4&'S&IY%#CbnJS5;_`!42!b\c*%%+"@F^n]5PfCjORH.-^?_t=:Rd;/lb=B8SjC35 (?e>B`bgrrJ`&"9V7e/)i,(:I:7j5gh \,&I5>&h/^C0PctX-3s'VAdCB6sO(F."4:OL50H3+o("=n.AU13=OsE-Y$8a0jSp+!3OS0Kn%TY1VjAGI2EcnJ/f,9?RRWUhd^AHAcq=%NF"c_O6\5>lK<IKE"!6G[/=OP!)K]mIYCL_T*U;BmBtV3[1j%_t"OI"c@`#Ib2mim6e*#KG t3Xpsq\MlB>L45A?.jr0B.]XRc(?7?6MB0P1VWEc1n!'/N!75,.=!D>92_FPLHmPDB"qrJ'BK978B3g.6(Nh/-#\"SsokOb>DcfJ^eEa &.ccAH2_bdA"")'I)7Y$3o%@l]RPrAogDj;60"LXpE/VtkjV[%4rYh0NAs\a'_0/Uc(iVb_i%<A$qs>?>[GTP\5@9%iiiB6r91qf0At[t$D-_:5^M_7g?M_ZQX][6OMK8"S,Um8@S[Z3M&;ZYT)N&_LA.=c PAl5A@(\5jB(8O!t% c$s?+Ga?,JjP.,#B_DI8,PPjK+!e"^dqNKdd[jHL'\/_eaPifs[2CiG^nm:.gjV`&qak@BJ0SRVSQhtQQ2[A]?]k)o`lln\^4I>6*SfJ#tc-'aqt7+?h-.Xsn!B*AKV25a'NH,j&YQ[?)@PK8L'PZDPtK>n)+F6^Nnj,ZrfpjWg:H+_*>/6K_]a?#mn^dAa:8tDLa3hi2_bZ@rk?g9%a^s&eW_#k!9Sr(mM0CQ2Ot`qnBfR2Pm`8hq6"g#E9XNF)th%(IH=^FM'bso!)n7Jg3o?\6mQ#jlI0GU5G+#DmC(V;7@(^gs(t\mJ*V&\V\@Ed[b2[[>N^;#SIWj#Af-cF!m,=1Hh$R=VSclKEqnom[0,QV-s%K#!g9G;4o@c^?;D,jRFHaaCUh-L -4\6TX)`\IB4:4(+Adeh9CQf=t0(tO>n;C@Nh__?Fo&Z8]TO:hMf_+B D8?+=PA*bE<M'6@Y!^eS:G`apd(:5)Q>[JP1=Hjn aYs>SRl"ITj4qL)ILm$i\f^Kmg)atqED,]34Mt+IAA,%Y ignFm3&[4rbl6#c9+aW9/R+HSXkAA'.Ij2>1(OJWF24H(SBY^;V1%&Ora?U#(mr]de`@&QJ&(^Zl"9%N"`t_[]E98>c?sGjo#;nd*,kW=>r_Ddt!Q,Q25FgH72eL*fW2f7E+<aAn<gZ"PaYaP'p* OE>[hihR;bqmDooi9`jiLm)PM-(A:i=D2%P/MB)k@'dTBLX@:EKK4`A0"A)`1LmBr U$ql2)AtCmUsDiAVAWb8""#kV=b65,&CX!H(Da7sdd-<=hHHo03,OLCcC6* \?(_T)47RfWfPYc_eZ&%1lj"0W/?.=J?M%orZT5<qA51WSnfroD)`ccg-\K:+HQ'h8E_ALTimoeA1dXFP3 CI/c9_0%J]DE=(Fa''lR4<9B\'5s.2Co580Sri[A85n<1ro/"#)_/-A3*),@mmp4UJF4'j4FM[tc6Kj9&DeB<gH7=1AIN3fB):e@@jo\'-teO;2`M!P_Xga'C]U#mSpV(CedWd6/:\A*k6_3S'ca? $hei3j$_8JB-W_[m8oDjQ++m5lj*moJH$3\hf_<92:eg:^?Jb></is_#l&CL7<&b_U7Eh,c]4B9j*!,_5pH3<+BRc+n2 'AB])MT\eb\-XF5X^+-2DDJ7 FN&ra,T;\97@l>?9C;<TElZRc'VNg._V'Hio+fm]NosF&s(R#$tOfd=LdVt X?8E^W /fMI41;Cj]e9$=^m3<m?O:%11F7P7h$gleBj+0!^0=;BE+4+`Kh8Z[T[6prYkF%]_a5+-pk 4a/$BrLkY>#*N S*4l6*R_70:fP>"mlEA-#DPXVdfF>A5JJ*\Lk&!.@CNtQLn74,gN_NK=h'I`RAc'Kt7^AslCRjMGc'j+[,W6$LbUJ0@GLd3FK)]81&P8b]DT;@^7??c:NA]8ptTrR&,/eX)I]MK&'q%TcPe(TaV-er!'3j.iXGAmdG3$GZ&G$')>/AhbOtm2T@hNq\\sfps -L248n$^ALP]ho[@1/.ZZq3IWG!*2WXPs%[S-$**YGC?($A-;.G-NOa>B&)A0_.`Cr'QlM_=>GA(Q%Crtn_6Oi+pj*iq@eX_As'nS U[LJtt4p3Dn6FHIY(1I%D4\>r78crA)<OVqpHbm9k!Miiis $I[O5\HVS6"Tm0L$Z;k6AJR':(]^c&C2)_cALI:8^P: GD+]r7Qm<mf*?3@Gq8 fP%YFr=JB#3j"[I7FM+A?Nl2qV((8N!ARRZEFm.Ha_C-"l]fTJU'-eD/+().mkYJj G/":CE2NM_ngf=(frO3NW4KfN@<%"ltQ;6`MMD9YUL?YdKE86Wr-3@WP->E4aD#\hU!][mh3)ME5bCo(OlVLEU35q (fjKUlBq^qCn[%N"WAV_8Tb-EA3IM]4S'-5\phTYY3SFsCn'( F<TWXKhQ&!Q,Nj*!o"],M5_YfG6*EHcZ=P(SiAY%1CZmnIfeA;Fb(hIdH3WA#$?mHK@OFcrF<.pBUQC46T2'?qr5`>l<(cLB<p:*jo5FDVd=6+NB1>=XG#rVjK5P"b9%bJee_/_AL>D_Oa;XZs\ JFHRP"n+.#AY^.$a1p'j+;3@Ho4^)W(C<;J+PfKr'DW(qKn/L^lB@X`8[9\.$#i^,)RCE).`gMXC?p_jh`.d5$<VE<n`!f&b-J2VHDmgXs_2T3c<84Q`r("Ua"om1.F8OWPc8f.CJ=^n9MC8P-50PhA"P2Ibm_EWGs*HNR)+'Se-;#X9OC)B3DPjO%dabK]:^VVEmM*6_jG)P\IJ"gpKm4 tp[^N6;6!FR!nVFT^<gL3%$FM>Vb4^h\b;Q5LY&&7"BKkn ]$^-`+S9M'=!f:Vh643P-OraoJQnn*@AE3n3%kcMt"dh'CRBK',l@6`?Z)M7_1<DOZ`[K3h1<.FH?lc9-_:Io&d2Ta#SDr"_tRBjI8<co`QL/Aid)lOYW-Sk%*eNch3K8`<N-]X0)(Q!`HVm $^%LB9paS;qK Ad2t9c<:BsC!g#NA)V2IFnr0kAZN/..P0 $B=moOAaJM*<sqLUPpnHK/E5-f 7YM=[F$>*3gg)].<hAk\,72dH`_2'eP'36F5%pQ8+:FH'Q:;2ZXXGIApr]q[s65"? XJ g&r^=7K;G+?2s&->:Vad("+gft$T3aW]q+3o(d/WcSW:_B2#e@n&mq7N7W09],V[Hio8!6OZ%nP=LjAl"]h1@&T`8XDF-3F,%MD=.Y[j8#Wj6>=8;2nLO`93V8HiB3GYjW4CBdeI<)W(SZ8cm;SCGgC`ZK!$)Z\CM>W!Na/:VK`MA[-YdoS->HZ5Nm+^R[W^NrOI-(c)J6HCC7e'M=-DaNcPf]4r;rTdl\0[>6"\1)kHV,IoaP2\F!PJ2jL1G-4Y?)ML"a:G^^/EkX`>)@_#r$TZJg6;A7YWf+rJ5hec<&FJ@?3%;]GGmB9DmInjAY@^AE24MZ2Sb#(:!ke1J3mU0'q;AM8XHLgrj3<&Z-]l!M%2A5.a<RYhIK[!9n@8QDnJ6=]^OVR:6->f>ZRh!qLr)MJn>>0bbDLRR<[hq$8oi'2h.blNaY .N'L/'h[&3*\Onjl/LUoMjA= FQj<2l4[>JGsCM:`Q:#[QL9/U9V>LZrfl%=9OAtLg3.'eLrH75.:Nm,i>Bd7 3n<2-q8p=?`FM"Em6g_ngd*WND4PtcgT)ecQe%d1"8A_(qPr*iW9N#="aBd8N3bKONV* -p[^^h`/C$Oc[b':)#W3>rBBtQ<;giCQ?/kSK'j.gMK8i>6`c<B'gC2D?tQdAFOHra3TSQ#qX]a8!3GC)j!35KLV,j#]OYJ,c*E-^&OFM[0,9j)sf&ORm@3*2tUcW8T5%c"OI#-tZS;=+5j#0#4GPAN[,E.ptI+g/BX_GVN`/)OAA&phEaYAT*UJ:9Q%BA``b7q8A $?CaTPNVXj8R"#kE.22A`Ch#,stA^Or8i02;<qm+kF!Cm16 V'Or& )KQ-L.o>^h$6rX>,2t#^r$6Ai)Z8q^=8tr]BdP!WAb*5=[J]Fl $c#Q*JPX\^PnSB8BBSZQ2aTH5qT;:\$en7K737i'C.14gE\/ FK$.`6Ps>k@6+l0G^7fAmgEb\siM4rVF?]2X=Y'(4]O)jBFdVQ#ER0=.BZ,scQ \;l`>Sc3Sej^,YRk+ka%l,eRoANq5+E*$=edWYdJo<#j,8]YIK+t/&6<5.WW2& 6J$i4l[gj.^R&KtPq:+\:AR:cpX4M30_fl77Q*j 196-t9i>PfNLh$/#&+-:tIq__jbj@fj@:.>e?Ysd?=WF`Nf:mSM.0he25-Gbb3s<&#9&Df :$OJ9X%gMm"=`p,(otl8II&M(kn$3M!UI(M\Y'/Cmrl$@AE9 _;cip2JS_e4plaNP)S(9;76Mj]deeSaT.[2b+ZJ1A<<n,>oh_/t!XWI\Ac0#@_m^aT6Ng@0Vl3AscN,ib65]4QoCLg58d)g<O87SsN_-eUS6#nccYZrI<K<P-+QX>dO<?&k?M@JspW8GM2hG=Tg2q83k@ld'+:#(pfRr#09qc3Ue&!PWg9"hYb^>(&&qJQ=J!T^2;9)d+0WXp@E9/n188"eMrV4`Dt06]dY`pX;hE)!PlT(CAa+.MNF3C@CCG<71rkh;PN]Co0VcY%biWbOt]tLB"__IU:L 0? 'CS5ZlWbAlG#$"Y[7rZs=4UiCPKiHQO[ai`Ocl-/R@a?FSFaEJ>%>:qhrPK7O!qS;#EI\,Z(A.nl>r:LY2N47la4e]obrh@0k->>sTFQ6L6T*$AKBJ43DHNtij?HMh r#d.WLOCC"@LPX`-_q1Y3bUHA0<\K^k0nS0O,?f[lM'Y(tBM$2A4Hq-F!3X+,X\"MH?M/6G *NTRg\T[ol5"V+(T@tf=a5'g1(iDWr?9b^D12@6PX1a!C3B.M%PmMNXI:1k9NegL(#9D,T]c;ZhqWj;:[CVs-nflL+A\:7L:1Wk=1e+%4`J9go(4ap9h(trBJaP+3!]LJA-e-lWMp42AfV",>7<S;04qKHDNAFrk3<ATKbfBdKr.;(X/UFa]<h%-"IK!t55i369VY_f?@=#">(5$M58/_UAs'9m$:,J@Uc8jefpVg@PNXiF/b^bV+],1];Um31DAqk"nE"OR)VR/^nN `giO%X 2ZX/S`&rr5\6Wo7ZN3^!SN'nq+P!$7S]H`.(&gAs!?V[6:GptG1Z(Der*Rb]1^D--jX(gPGZ4Le<C&.N2/no<TT"lV9O`*WfRq%oBJ2dKVhHq"f$tPUCI0;BYZJiK"^A^KW,D]^3Z:Af&+6L1A43J%I0YCkAZ^7Tf1Z<[H/?4o7eWFCKEX8n(/FHTpPg]%D,]T%d=rD\9-$sY\O[qKGVL]^A-VVD(__5%t<H.6mlHXGX;!jg<#/p!_bLmP[dV%Rh\ha(US> ZMP*IcX_A^rOeOC$To5YKg"%r/+-%h%iQ>^0Xl$\dX@I` ]T2h$kFhcQsb/VjV<rF]D$!-Q9BWfQt6V<R4V;2B?$LC4?G#[YS3rt)0IdT7*]b/Y6;)pCD,r!lMFcnng_+hVMtjD_VURmL3=jh1C=[<U+Oo!^U"'PO;f)spBB(@tgi4fSQl.%]*pM-^5F.ddTN(*nS<9G1(=mr^b'b0Zh+B*IDW#SF;NS;#F'q/FDV2^qsA1cWZA< bVr?s2Q4@=0!,XXt%/Q>&g s^O,."(KI4h+ARfselg[`Z;WIeV=A1c-jfNF$s6$/3\ek;Jb PHk-X9WpV/"JMT1r4/iV%]"UDg7%f/k#N\!ioc2M;CN1:4!@+XY4d`K*&/Cdjh)KO5@YDm?8l3*2 cnSV2KYg+-S,?NEAF'7o8gE]iVnYNXG9mPb]g [j]1(>!(l1]j5`7E)D.th\X 8?G/dOP.)@)GRSn(EW,,<<E; ]p^+6%sP%jFm8W(aiPTNnPhT%YAfEk_dkTYt<Y9 CdY2%+HA1+3A,G;A8UmPYDa8A74.'=t&B4ns]lo6[g#[UnH9#phl!^;(ClNtRppFs)f7B+@IoNNNSLpj)";cNF"liZp5^gch6#9:03,:"BMC/bJD6LJVho,.^Blr0l$#]#oA0f!-8;?iK0ngCPdtjc8RDUfa9Ql'CC-O>JAl,,"PthC@,<H=2#t@1G$5*.'#0f1m@+M R7/Vii`sIJs+7gp%f#;*I'ZgW7$.^-J1^L/a+P C1E66fA;CAZCoSaR$%q>(Ttl3]S+2e&%>.j:)0#[1*kD$]!+[SA8T@jt/^iA,AE@YQ*a/[BPH+HQ5 FSG*S:,BiW@H_MdBK76.<-k75-AR.;6"PD(%0INhS7ePls57q;J1`.aK-E<pjdAsd'[noGf\N5*gd`Q58n`VddhUJ8!B!AlK3_aT)[=Q9O@Z-c#scjPlR0sN.A_HL-YZR]8O#SigH"f#`mdFR(je> 7_W.mK"[VZlAEPkb7kI45ibQA5LonVs7Ds2_!&;r%G"(!o'FPDT)NAbg/.R7FR[ll.f]#!1eC<@YqiCf1q9YsRA5@`^%EfYcdCjA!Y,6 IK=VOm&sLmA1kkC6eC!*0&e+L"^)pdKQgXAaXk3$mnG(43KDI$Y-pD+h>oC9F`RITFR!D'oX)=OE^eO1IOQ>%Q[5QY9O#ETf!\dc;Z85KXhpALN#4g*.Y q`O5I#XUh*AUb]`<b2#)%OP:Le*<>R-+TP[@Y+bm6n,N?5`#-hoDn']>A[]Ar?[r6tAA*1;9q/EVV<3*>Br_)kk#n2HP9j.8#c+B8\38($=M+';<6e4SL4"1Nd&.O4eVkT@;8;fMXBrj%bWCr[Qcj110"pWRck<pJL9V5\0<b;Etke6AJ5/SR"jpL*=bY/%>p^>qnB.X*cm`S27'f]\0 "l";j(Ah.CA +$R%%6.lk3BPTK;T'aKeY28eQg;2%:bi;5EjP^PlDUZEDQ9 Y%s/'AO?s&+4=&ddn24D&0Z$D>%W#[>&-nAgX;78`-5hj?^GktMm%W=,>KjVV3/a;ik.[@=BoF\+fHtBd3Vc'Rrpm&+PN+sR31AAj@kf*mp[IieK9OqS\=1NgFG5d?/G4`Da&k#!dr3n7jM[NQC>jk8g!5Ie__3"7)/Brqmk1tFUhXKi&q5[9UPdt];$tiMWa'l8mNDZ?Z(SDUH\7N4$F5Wejo]_^J7AngphnD)O7Kk:$;jKVE1\aCRL5aJDW%agA*les"Vk3$0pq?dLfi#2*'S97(Zg5PW*?8m5rjkA<p';]m\r'^[8P-M\Wjrc04Uck&&d'Vh2jDmR$=dj)Ej%8";TE!an2FdC$98F:;kW[(:e)#DFcYkVY*I[jkCH8T$D#%X<V05;C'O_$e3k3 n1m.b!P@3sj0@s19?GA\)b?6&WYl!P,lT7%#)L59:&+J!TO#;qR^rS P^U!+oo+,:%NYRMs<9siUDXP16Ao[Xa^.47@JbkaqD,"G8@_t=ZkerqEj(d(`dhR)<<b3`C*%dGF>hhjd9hk(AS\mB(K?i#AYS)?P[9(bj:jEG' 272%!"*r;tD>"X$/i0A?NJfp`n?dIA9>nb\1TOhZ:2@l1Td?M>H#Q*)>1$@nb[7XhGZKln*7K%,Jb@i4Xl#,_sURb_Q=eU1P<O4 <L9QihGJkr!A%#.p0`Ig$Apq;70+b_*J!g\W5!K"?$A//Ac@=a$9q']]K)C`hBM31TPjC+@Bam,FGfdY0B"'q3h;V<"-gOUb!?W^i@?Y#C/7#r_ts<CDIU6h0%?K\&JTH\9SktiY(S0`.fD770j=gWA)_PH+dP?f!-\I"e$1NORSbe/9:7<M@,?$&_aQe07 )<g\Z"J3EU,L#sU0sa?k3ICK.%Mf2%-*PVR96[1]m#O4"Ra`BAk(H&bVn9CZCs4:8>()+Xo_N&ON(koKO45`8 3pH_-30HH@K3bSIi7U<s&l6oR8C4q7'rHg%;U.ZKOW1BrV8<4)t?1*d:A1Kdn7Rd'i)0YdC(r3ASmaXL-g0 LN $0GfmtP'9XcV2cDU[1BCIE*YA2^d6qVgbWrdgDf[nYbTqo!%&NTrJ37W?G9pR(c_7IPp2E^bY&D3BW2!&Q3Q@BiAB=`;"?0$n GMNEJ==d %e8A([J?LEV Slri:<O@k^]%'6l qeQBd`pkYcXQP&AaAi@#K3XfEKkPmQ]6(Ism':eJ\A3(8Bic+.Zk>W*9/%F'SE7I?]<0rmcA43mj*O#s-U$ZeU;pTV[lYbr]$O11l;_(*.D/A-H9)*XrX;7'c[XNmXb+YIgk9[hQHlU]_t'k&V.;qh9+N %A:A*VSRQ\:VL9 I4L.ofoGWqA\90>\-IASNB= 8?W7Ef#hT$<NoV;aAi4Q"I9@9_VQpiDB%mH<_M8b))A32#_dBWr\4b,[ZBDbA-iOs[(pEtAXCOM)08#9,1&EWmbf"Y1((#hq*`+Ir:UnX,[mhU"nAWSo-k6_s?$,6*:Q&&[1#4=O;RKABmH3W2\jT@mONSAob# 6TRUk9E3Ln_&_jeerEMUIr+LoO/$>s-#@rZH2\ESG2tgon`oM&+9OgEU[T^/B`Kp.lDr(G #r"bOdgjCf?PDPZg.s+5%Gt7lPo"A=L%0cfi`NE)U/&J3^02@-kI\=f0radj/R/4242%X$&pj1`kbhm\F]M2?dnK(T+n'>D75ZC\OI0Ocl0[`2:#: Zd#bWXG[IRSRtj6!r&L/DhcLQ5dl!4n](httJ507Zt)b\=gn2!giP<jo-S!?8p8XM!tbL^)darUS7X$12WI[-,X3ore^TZ`NOM>Z0KiI*c8`dpG^`N -6"dl&L2[%\G/g<?8$'#2j-tW;I2YbAqk.!@#kpSObh*bo2X&&?sk,%f!*bVFjUBUd'$64(_5D"]?tg<#L1R(Ugp#>O<Kng1NAN+2$UNYt"4NY[k&\dPh$H/W=MT83K8F[Krt8-01CP#l7>PPQTp6->6#-SpLM<B37qW,l1&6-RJ)"1Q4:&f@"_:`Gc(KG\=QOsQPeA>^ aqAR#"])_*BgmV!n7:VU%9`j83E$F[K>JBe )I&4hD"DPo)^nML_nKQem!rJDojCaO7hTLZSUFl=^"al.;AiZ#2;j"ICgk!>#E(7p/%Laonf9;qrK!7&+KeKI[`><'8ml`a=<]TBt>!8Tl#(.JAs*i?NWLL>)'sSpV>2qp0FUhOg'm/@8ojW'&pf-\/hYo#-A83*gclkEZ5/3XSRd`Ae\+7d7fN!8GO(e4\qA&4'N pUR]rnlZn&oWCCiG5q1o_"_@3XMb\IWf$Y"b^G%^,rGY.7/1&&mF0<+cf3jW-WbIS ?'-"B6iS.SL,OrS!+"cGX_DQ\V<r\=aaJJ@/4-YL:I'ho /.[V6@POCoDL4e=saZ?_Zeepp$]:F;31n`C,*l85!-17LM,]aZ-nDoi<Mi<5U]A7geSPc>5[6^7bZEeVP`C#W@;XaC^ oK3gF]9iUS<,:J;r1&U)Af(,LV;`ikl(b+0@#YY2s)%`2Mc#ksHWVjk$t2iM[`Z$%t<PbTEZG@kX4e>4iH*!AlRrpVHE5&)/S:sJ[>U)jXMq)4HmT#o0f;Z*@&%-LB([#m3";cEA"jh"_'#Mqn1,q'Lds ZL`b'A2JOoL+5.aH1i5l$H4+ ;*Nhk*gEl(PA^a;ggATU[8!3$C;JLU2ne9)p<$NV&Ma>W"Ak<PtFU7jNFg'Hm5r_o[_ !+Zc=>2rDf28\.($7DnUOGr-n?$K32WYa[Zb^N6%$,I>[>0mN+rn[A#Ph.>M0e@A50.g(%eU-bL\.bSU&imF1^AeY@GC<8qj?Wc"+rls,o%,QLdCk&In%PQnaT<h2S*?31`)"m8=#A2St:h?o'tG VegFB/aFA.hF7i&@bTGmsAl*0tVH`)K8nO%%2IP3Sijs8ZlfUHm\^[_X3V\+OEnd`h$>G_g!)t@O.7Ak!od5-r-9=<$Y?:5MAW\eD(8R4MK:WP?V6P_OUPoYnCW9@#/>DKr%2q5PPP:?)8L#>.n^C#`C*2k=)Y'\?2A!IDVWa"cHij2C(g#ht8S;9<]Hm@.`$]q#0":<k['mLNp!m)(&M:67tG-"DNdfTGrIFqp@( -?<C:l=N*f 7Ag-`/ K%gdq,YM@@L`lIY^6icai*nn<9>;oDCZ^%-o<lS`TDAQkT ]=Sg1M%FMo3DZ5ek/7*la`L27Q$5Ir1q9Kh7lF^PaQGfbg=M24N*];]>fcAQJp8"eGZg61bW^-U&:t0!9N[caoZ>Ib%,2$.:$;o61TtgB$mUmT:m/K^k/sX\R9[5=fk;#E9+NfC,Q;R3LoOgoDW02QOIOQ`M/TZWXX%7\dLI&fk\n2-;A0>3Ua>+&BpV&LeF8_?QUk7er `+MKIMqT7hh5H%Y'!2ahrI!-XC!aDWd gbdb-a7+"+&<$\s54nDJ`k8n"A(SP19r$mM1ArGA\mD&8UK#o0S(@4ph^[IX]@I"Jgr[gse66b5[AEG@Fa61jSLcCk,F7sF+e%\UV[_0`l)S$a<2WGsA2C+dg8lec]'^3l89Dcj8@C#$Il88b'H99feK%a/Ee>Q)%rOWf3kbth,3V??N$H,J>$lPUiA/gD+#!cJ=a4]ae!aD&.fL0e</@,'c)S+g.sIc TlJbQ\ktIi0EJ[i;qN49b-:%cN2EE 7DrpVO ^ (J(\a,=;ZN?E?V98s/^ik'Xb=E<jQh=&o(?JTHsHJ=X>67s+g_?D*&aU=P.n;4EM]Y/8f4qPVAA^7ML>7IA&>*T =LP:VB"Q$m_q7U[X5!@4Y!pe(UF.Z#YU"K[[2GA5N:a7f%B]N$$OPD_(t@MD3<s$]A"8K<1ff]MZsD(e1`'A0S?[/eK;"(.j9c<09/58p$5YbeP_LS:1'K5K\Is(9jT/;.3"NtA^"WG>r_UEa#*E=Y%f(@GAt<A[/^.K]qDLA0$a#Bdj>qfV@d\:0]ZN;B?JG;-C*0(-OcqnDQT?AS1Zf[Uk4s\>`08cJ?h:*,$Yp$#M'A^jq'DfmVA=QJqbo''r<&f:&`*+I6m/,`Lie:5lgK1S+e',SAUZ0>2LOB;_%H?/?Mt`MlQQ.5<q&Gb_i:e7#ITqAL[8nZ.?,^!+Ofp78J:rA'\`iPf0-ZH2L3k=aiGI[cQ.8QeZ_oW;+WRsNEPM5P@d]k2R&+2*pM9-8&IHDogZT@T6)#*./ZcO*J0TsD%/Q%C"ccToIk#qcn?'Mq#ef?@ZAs\6$"](_7Y>[ga>tPdD]&&a\8Al-Yn5B]QNaN!dkJ*<LIpUE/DE.mdb>!`Rr-;%WZ5V_FgbX$"AMbL&CSYRhS*_WA0qZGn8UYIil:!.[7XsdcNLYm!=@kfg\onU)FT^[(GmW6@lm7=2rGD3.FO2b]+b'l+VAo.]c/sHE%LKr;l@+A9,BP&p##seO-I"RUJ-d4]fhX'4B.LAI+,eclYrVadm#aeQF00\^MU3R4eUj-hr)jTX-F(SZ3AI@=k8bkn"]>&.(Ce"fT\50#OAk`RedN!JqpGEaG)Dr@a*<]3`.#k/56P;lZoeQqt%olh:,_O"37pZX[B$Af`>r/&DL@B\1tso-;2.frdR/[@Aql@D95JQP0+jc#FsrA!814#c!ON2<$bjejUOZ*9FO\=4k_[[oG1L)GVVE.:4)P'^r(JHkV5HAj+`Z2,AHjJmnhRLK$=<;=]V^EV-J@4XHc=>@tYL<<Gpkc`dpZ\m@$N\>d6 XEC.sm(lp0UAp;2 ,OZjbJmC4TWLA#5,Q4$[.[-nXI_/Mk=0RZR5Y*?`4,267N`Ja'a)9CA:p67de'Va4ZTs5*:_g>P<C^Gj`>(^gn<sm `-(4Y>8T;ELls#Y0qZn%%KZ?"4EYX8Q&>cl1]5` V/rMX1?PcbEN=+mf?_mOgpAEB;MQSk^\1(J])oAdGL,F'KB5iR'(:#i<&f[r-9Md\E(#2h%Cg^_:6ISQ+(Hk;fa7A(60^Mp2(Fl7`^283p."Y.bd3CMbK,b#/sXc6Oh:pKC;VS(UI<2T9A%t[*T8V'_AJ)m/P- #+1i#90\;(!)e<3j$pL2L"#oi^9L@)j,^>si;LXOB3p;tW1UHlHo)&GEQMN]_A^DN2=$;M3q7%\\=0V0?FGd269qJSih>/acd9njGn-Q-Y:n%>For#\7'o@Df0Be._`)j[#ah"PD:-+&k0,onnBgH;U.qN6/jVT(.U5Rp),hC7$_b]rT1<d3r%W"L8!M.3=`52(1D<lm#THjK#3Q&&WP E=Rbm?_K:X['$[*elP))Z-(CJ\^*s\UN9J:e=6'<7bmi;RSR3.M^4q, f-@"1hTG9N4UEoBWO` =q-4"Ahn?].lsqEF7ssC?kgR!H2\HDg(-%mD_p,:+M$SB7]ko hKq9@8#8S?A+pImPY-T](-HPG+Z?^JV1_!:P\3@[@!tP/[4O*M<A)^HAsNG(L^gSScOm0)2=;$8l3q<Cn]*?oWj V[P$sp_Cb`7*OUO0[A&7;1R&]^?alSdS'A+jMhc(cVX$dS=c7  $#9N(qV8Tm?fDsbsr+3WdRfW,A.%bSKE+AV%@eAIG* [3os5V'-1(BNj-Cq;5%*QVOGRV;nnV%Dls_*W[@AXs[AUbUWXVSrjTATH0UiTQ/;?(H>tP-X%`dSHZgI*1p8@eXK^DdYgWOt59-DED+56A))BU]mU:n.>E"-b;caM=YaZA=VB.k4EQU9qH@5R-K<!k6qT"V\j9SeiEY#"bBoA;HIUof: B%ACMn&fPsCpBhqa68)6>>eimVRI&,)q(F:AaE^4gGOjb4XtUpotYh7:DCk<`G,d8\WDn@pnDW=%j6"#PU[&?1*9R 1nINUf&LE5*tk]L=q(`+Pp'A^QFg"Ff,!UpgX?9_FhQN9 WGn%"6/TXB0i=@P.]qGlBhG-TV.Y,&/gU7AG2#t6+ZDM<Hf?r4/`#5<ZgIT!\ .Il3J7t3`6fI>hM9U]EaeVqRm'0@aA!_4XWM3-i=4X7I>*e#4)N@Am"8O)!YZT??m@,\CS3g$3!2_V)SoXh3ptCNXA'BS?a+Icp$S0Fp,+:rO%TP;KW'f2Lh3ap0ngKVc[JIMRsXf LT#n>6G39k9dj,4V89ccR<&bCnr$Jft*ALJ+Kd>,E<P3s:/#)VbqbsMO6`hT4J._f[g-t;"<Plq72]cA"JSAKKAAR,\.JWQ[a$(@Is[`f*rjasif` Z]1.U B<YfQQITJ8:-Ai\P>m4p940ItR[1Lg,A:(WA$d.Z)PrBJiTPn;kB>7(/=D[Wee5:T;n:2CfWM`"T#nW2X+_09\pr;KR0j]R;s)9F>k[oK;c&.'J<%,Atfb4a+8?#U%M7RpAT$7:!ir$YDY2$5lL,Gk>#JSbEA,ZgeQ[BEg""FFc.)5Gdm17UAV1@?E'No2ETDfp!(@>0=.Bf/o% lS(b[:Ji4-9RRiQD8IT.!bVtCSh\c\Rs7147GpTR*0g"o`D6 3;iHc\Qh8ojSW9*Tg'.RU@3]ai]O3Dsj-a@_0TZ_9Q?*<EcLfr>poA90%<S?Vqb!b9I'AFo6hH?Ic2WN-f,hV)KAUEUG HA8+cPIACA/\&^e<F.'MPD9h(tSIX9T.%Ps@#k9/<j^`CXnODO7/V/CFAmh-_cert^['gVC7C<U#dE!+q>g3mkH+->"sD,^TWp;W`-3pP??NI%QjO?lVqq3-h(.W Pm`/[`3F.'4.6/:@%b4lqtnL\86s8@QLZ\On14Op:jRkH0q52s..Pk,<0%gq"2%jdS>(RAVi:=7N?deR`1G"N9D 0`c] 30^+mBmYHW(,o<rC[\btn^G*1+rgqP0r&hVWVj_RotXB0l`a=o,\/k!?]\sDV0Y\.`rTL.!E4UkWbVV,V3V/oO?MA;4-ttr?$_26m23.e*-l`TTSX`IQFbWS2\0^Zpr0NbO fs/p?V80c)_!GKkp=rKAsInNm;Ajr(C/Y@CIAQ"$g*Q1gs#<JjIn@N?QA15Y]'+^L'hX$Bk*(.[nA[:jG?`Ah<S)Z,Fgh18cU'e?E]E$V@hIT1]Kg,%!1j"l %("<b>/:*g,VfP/?t`[:i<F@RRZGC3:4$ir*2%$o,g2A@+&NcZN8#OSIFiF%\Mt!EZ'4!e9;i(_'F7A:LaA&*GGgBLK"g\h4:(3',dCkR\"AZ5Oqt_m6n,N5N<!?gJOW[fZZ6c:iJrh\%ir"P2+7n4[sMqZ*EA^YHgTpA=ZsA!B1o7&5XqG\jSMU+i>1pd]g___+t6IH=JNUI>e8SCpa:g;OR:eECAj`6'8nlo@s53e\WB8j*DF4C) Jd`K#IPBX?#N2@#8OK*EjZT oMhTfLjt_TqAno*3`1T'.d/ R.kffa6E9i @=3k=F&h/D/n5Z,c tZ63f$=ma!%BOq]1%n's^k'ZmsGE4*:Q@7/JTY]iA&\KU1q<$bk9N9j06e]F&m\207TRX[4%d4g!<pb_heJo/Ln]Bf64Zoq\WhIRR3&,c>3,sU`cXAY9$de]E@Q!C%q8P281s_d7d(bpA.*`1in,f^pnIElntTT`1kJop*<<2^r?G5Y1dP5BBYQZ!btf3G#1oP'EY%N+'MU2q82,SQo6j,oU?.D9? rZIdf_AB(VUO!M*_7$,5QTTX@(AY7IcNcT>-U\r!$.rRF2!d$=MBkBQ*(A`Om5)LJ:. 1*c6YAI/f4o&%OCp33UWk ,#k15.:I!@]mT^-<;'_ANt)H0)!FSjpBV;7[;I1/D">_4.!p3V\CL=@sl<S=jk-Wf/jN$:fqip63UnU"d(9Ao-OY8_oX]C OY8_HdX"%8SXj?_/r$"<c:Z+4_-(A'%P JYq`\71]Uf9U>elia8UIF Ra]t^X=mFrQH.Z,kVnk`iFl?mB^;aL,6DA_HZ6A9(9n0sZ<!2/@j</'4+bH$/17)ADmR9e+Ogf5>T#d1GRR;tkP>a(q)[`!!Q9,OOd*Ua[>\5kXh,0Qa^j`Qn_Dc,&A\BF Z\]7M77#*e<X[tbhG&Zc\*j*O,ETQ<=$P_qL)ssU6k^mf])>'a>HGXk"=rbL4gF:E5cQFIS.0PWfh^/cMi4DZtk3;f;:]-QKmP##oD>`;KilN>6f&\?Yd-QmLoZs*7[A&UE1h*KCl02W)R#/c>>E!8TiNCY[NLh_aXo M_.7E9g9o]PCS`/)Vs4P9;fXR'3!XTO8*"LI37$RbVf8qYC]Dqp%BdUNX]RO;=OGd:\icQ)i )XP#,C3( 6_S9ksX7Ar[760bhT/R:r`Y-.\cr,Q1A9/38#YJe%L!rlC4mO;KCPMm2@p>C aQe(;9LdA.M .N\*A3,Xkk3p>HU,'f>J(m+&eRi`,.10dWkf#rJUAW!Mf$-6O""@g f1[Sm@@]d[nmR#hENkFb_J^QQBo@g+@2:e0Z2A:^^C5io&\?;+nC#RYC?-I*e[Pk3iA^\P]AB'q5)_ _C0h7r^mbr(VpO^>?0K(ATS5Rah[k j%@Z`[!Nb J3jAkf_S(\>jb-bYke>_@]4A#QW(M='_pm3FVqkBG1Yem83gB!E\XfR*!19aXN.P&@`V$b^+6PV/Ql1@Z(>`dhXh48@JClL=aFea=?dfo?V532QCAKYXbUlg8;_Ubm,!j+OSg?MJEMMX/M%[(\WdK :)ibP?LHj5)ai.pRQ"dt<=mA#E-`t7C5]DOVN!!mlLfC\\9&oj%AX]@1M"BB*bm\ >L.sAJpoI]m"IOMoRRmgD,Rh/k>_KT.m"8``%f`:2\bn1]!!B9Me5\@,?8]59]rrR>GeEJi$knYBNcJ7Ht4&TWEg-[end5AZi>8"WI!LEFTjp#e&jOIJtQh'%c^g14TmCH ^"`A1]Y*JU1JU>TU<#Lb4)lVVo1Lf%mJkS.g>VD1MTjo)-P&g^iL5YI8Li,n_!;3F3/e.\I<@;C3E"BIW:X\<%ULnr#W33Zq]KRYjGXCoU\mK!l.B7 7,/!_XK/jQ4qebJ/F9jh8IU.8hJ8V>]B$6_8NFc-bWtA`f.f,66;1o"QkD`1<# 2@HcgshmE,p&cm!Q_n'oWB'RlMl<9"DBtC;"D[P1:b^8gc!%-e(=?T2'_ZoiqWiA9&:57Y"PXrTU_'!UAfYA]H21M(lR57/e=AO'9SEUFG 5_^"^k>IhTkL<7Gq/gaFldm\F=.* (PA;n,QE/q@N9^$Vsedg+ar<k2=p?Yn$3?(eFU[m 8s<?M*!nd;J0%"ga21O<',U3GrDI1kJ#a8N-ZWA/iRTP_Z*Y/LseOL 3*D[*VD0nXSIE$]\5XYJg+(ne#MQCtZ":0f.ItlfdjR("0C+I0taUhc`J_)TmYNeQmLI_/Y1+ RI9^SJXr>;;&AK\V'!7>1#:_E??M\l",@4da:][-dr!I6.'lrarP!TA06=8*k-Am%osAr;G]9MdLjSj1>ThQ^Q.8JGN-YT;3=C*a9>#[B5NnablrMirPbjlD65!$Of:@[I25EWGM[X \nsIXF=qc`Z-\]^Vq2kSX=G;$5@LQ1>9_4*"4Vi"QmAkDm_ST*7;drHQ3if`J7ng5ne`%8aL4,f:Q5GS`LRcp7Z(4XVd^Iko=$]@V/ta'o -9hdlF@Tn]%86a p$R7Z.7bWcpgT,87=ZQHX:KN"6a^*0A/ibL"U%:E$&= )bjRo.+g`%JtkiVXN(o 52Q]K,XJ8JLf=<pleKW4i9AF f^r"=he!\rB$#,e.[o@OU(=sW@&fZ_gUE0grrg"g'QlF;AlTt72Z-8.[O-"n7Z9L0=L>T=$'B1;JI^5*Al'A$$!*PB/%b/5s[M=77C3BWb?Y\hb<f,/#aZo5 76G/NqTr0Y4N".E#0]j1nZrqUYd@Kf!Yf,/^(gf5a_4eaaMoZ9>$ceQ[pm+\:IL`Zc1MT3\a_o_T5603tl?dg?pr6&\8QkeVih6QD%-3,oT!$bc1[+] 2I"dh+QA1j<DM>rB>79Ud78"mnif$jMJ5qH>lr`ck 6[fmh?%2%9F8eCPJ/T7<0P%kP DFG!V4.N9IG"&-T%p,5PLA6PTA5`_6Ibc4lb9)#eLH("Q<AbHMkX+XiMH:`k[s eaAYjPD)Em(>=jOq),ZD$.'55&rl;`54' ]&%5W0HV[_/=WQs7b3>7[I $j;SDo&!c-=P<rOW?^dIr!aI#&e[b(&UNV_?NsZ?/<b$ 5(EHQb@S-M[&\"DHhoL3-_=@=UX'fiS5FXF:8!oK;+R\9NbB'=SQ`QIKrG9RG.oMXcrrtQ-AWbYr"I"GB50NN+sQ7S@E^TZ&RIL+^B4)+q2LQ&bDa//(DQ@Mg0+gRcZ1'Qh[Qq0N 5* B'rA!!NFdpJ'26Xo)5"<2dNRK5?R(FnD\o7iOrHk)i>@cQ].GPN3D7coM=Q!#SZP09*d,7:WjsF9[6<k!N-/?QAfK<&.1$<-dO30G6tV_h5T(45a"Hgbse"TXr:W1XDcnUp"f*R],61TVIHW+0L$YUS[h1A=c".(D:^eM@^\&UI30.aURP?P<Li.lo7r>72(17NmiVi)LGWON0BU\45;oaHcUOhhrT;T17Qcg6O@H(#4r`C65*<D1b)C(+d[Zfqqgj!aRoOtLHJ.7Y1fi0:Q1+:R:Q&5;IL_X.D;=-MrKQV>m0R4Z1WpeM:nEjHZ)sennKseA;)h@)a0f1@91A:r8Re_p[CJ9[o8f>^2kLA=3"kN4Kc0al_W+MeIk"MhrUT>X":0tOfo"U[A?h"AWa8Hd9Cn_>ULQY"fj>[:]EBAQB-9YW4\nbKj:aNm9(Dpbc[jiVb".Y,A^,F@".G=iF6"$G R0[;L547[.g(A+<TZ>I`+8!]d=$4IY< .b4K3&+0-H7eWgKL^34^bQ]*I:eC%qFtmrF11[jIQo-QPG>_XTraYIT#+T#bc]D,Y[nf7-l.d9JO%jq+=Zd91c]3/N%r-H=A)T;SMRde\KaV`lXS184CL.7LW4S+<^*2hA`PBFIUVXm3Ngfn;jCjt+VCOF(04T()pTfY*n]S^c0"!O5<Am+_b)*8*p[QDj:;/d+WeQYf^5b3^$CnK5MFBV'Q00JlLm_>d-6F/M9d.p+V1_AMJS>1(PWBZXG`hQCQ1]\7MS'im!Y$N\B%cJ<FqbtdAeO_IB4'L93HlPi[64thO$[e<'/Ci75^Zm)a*pU*!.JA #KGFL"P1K&b3E^ &Z8%((FRhS'g.SHL2CLGg<2C=`tjRO_bKolI%YlbF+:ekp54:pm5'e=:9nV0H"tSEd$$\2If[mbLfRM]GATD02`OG)KL,j=d[-!W+$.S$:"K@SVSZ i_TbXQb XrW+F8'Ph2@6GV=)%n5-q#1_$O+3q)gnp*h[SMVlJRI_@JQW_LV/oX;CEl>*("f^Qn-XHj)$ehjA?TmM!rLA7c9N**Oq> DK5Rm@[jes$PclOsE9EmDG:)5[0Ct`5_,Y/,/dbc\:ZG4K&0DbM.<]d`YWcPqbnQFYb]cC#\JL:^n\'s6A^GJVA*.8.k)Q##HI<lWWI:^9.cB:sKO,i?eIA<a488a2,;a"k 6brjjLXNb*24pC,5aoYKr*gN %6O)I"?IW(20FZWD1+1c4P-EnBWtqg6\X-*Id27"rLY5bU.q*sQCl2S':r0G#?bL?;9F*i-]/O]^$qS5H[SXpD?XFTK#0BM?\+-M)#YL 0&<8O!!lblA;r*`V[hEBRO%pb[#5;V=E,ZSa4D@lKdq^R2`>+&P*AldtSe%O=5V-U+`&SDbB8iKa4"'1MA7*>/BA&m=W%U5"U!$M]l+mm93_Bs.&%VqA9%>UL'?l1CH'\#5D';<D;V>FF?ia-pi2A$P=Eq+R[$\3c&%![H60R^2+/Dq8m6^rk.0`YeU.X@2LM_j-.ZN%An&EHGc4qO5/ZqXLU4OgKnr0AH8YDn*Q"o&YQhsEY'^KI&d@sCZqq%#UE?tNA,9_E8laO%Se\R8LW?HA:JA&qsFB2_HI]k?'4$-@-GrNisQ>$gaAALde9n?Qt+&L%1R)4)^Tar\fW(Z9f@%D,=K).sVO^W"WVP[X/-M.aN'.hQfO,N 7o"2`5Qfg3RLhkNjW#,qWaM@:\_=OGg]fSKrh=[\B=9C(t>DDr(geZF_l`,'&"*Ifp?N)cmT,6bOTRCr=A6,s]1sB3n,5PpRDC@m,,'jEo&:=@%Mdg:<^p=d:`8a<_D=bD`f/,([?%n2e\YK]"D*5Zc@\`3lnF5jLjKcG'A4RJI>J#(':t<rG9Z%&_FF[fj'q>LlO\$/L=/>\0^B&W+RG.iIhaJ8rJH5 sI9-$*o=@LKY0@c\8oVO9_OZK]eDqm]7!9";e9iTo0^XZb<DD2)MAAR#d=%sk*7JJTGMZOWK3PFa=e\=>% 8gIpesVHaLrgR?`B\!40b:@j'Io>8&$fKts7%c\>8WkhB$/@It$:oAlR40A2&b(4"/d5+/'-k  H4f=2UZi/QWjF QBX<"/ Cfa7:JNN&<E4a-`b=;^^G\'BTG8R)oCOc_@H,IIG<&3hMr'*(<a.&DhY5U(Qi&j[D]>qPeg<QN/o,!FJW@-0ZHG>5?cBiMK$l>D;mCQSJC? dYN:Q8rhGa4&9A>dtLGUkiYA<Bj=(/(h=O7_`E@'/e[RTZ:U-)fK55@JRo@4?M_]L*Z9^D(SZt>L(N5C-/,NE;^lq#%c-GWh[A+f;20+i*BZ:+9=BrO),&(L`N^s!@e:WDD#fWY8i_^dYReFT4? _"9hh>)k,oE["p-)2b_j:)o$TRU3_4X@Tf-E2`1$&i[Ar)*s'-AZdQU9V(^_mW0sT1V*/W[m75A(_4jOjRAc+\Y9i%fH7$4DoZNO3r#^=`s;6(1-Hf#\'@&R-TGYKTNG-gn&"(jWpA_,a2`tFkM=aAPAt7)Xh\]g9P&S*qiO=+b&*Wh`OL[(g3CU3!d@."+<%?DN#5^YXg,^b+%b=@sA'-i"TZc6l0flWAGJF#pL':>)`_(baUeiis+5q4"s!VaH+X4I@e_8>^OnTMMEaJ7Aa4SY42kjoR _d66kF6iHA%eK&s',ib[G:>2 70/dLeZJhlsLba%R+9f1/XosAdmAZWbC[\g#_c.cJ0VA6gAB:o(`2>O*$NO_E7^1Eh!in[dM!Y8r>j@dtFcqZ"la>,d]49+e!Tf_J&A[QUCr,c;?HO%ji7g_9J)$6kd)ai-P[^C?Wm)M^ <ppHKsrS$D$.?WGiUH&<NlZ6^t75]Y- L\Cd)hC2/(<Z& NNN59)=<WT;l_%`?QUB+8G='%^S/l]C[pgUhQ-<a("G@iTE>\K,RVU1)iZP9lmC2(Hod:#@$9cj6+-X3r)Sr9`97JWA::m;rl[9J*2XLE#F0U*IB<se$bK3j@5\#fX7I'THa`tV[<0=>$P]+\Fn>[%cWjdk._ <LQP?W^AdkdZh\QNb "-e=hp>tc1"2eWIR9#K9Y^5&%jZ$L,LB_K+19gB:AWdR!g%9?e>W/Emc26++H.\6F<':j#P9_kH_KGT88=4TmVGm[[N_)C5>;O3+>i=1F1UAE# \"J07pp!oq8d..g(((deH1[)^[,= ((s_9p*%(M_^7 l"rJT?l6n:iU*dgfaM r#e-_*+<c3jIh\_$A>ADo%N`>Abj9'"FoLZ#db$&7Y_U^?4*SS>dG;K#O's7lFWA=6qH'rC:0aeXFB<^<pa;6'CeAHj7W\A76A0aHJh%T(R pFQ[9U<3q[f]%Km1lpM[;[)DDp6LE'mBAnm9Z:t*&6Ej;X.->K(DV i%3)I*JAX3NNL jgeG7Q[XHVTa4\2OA@*5B[AIF26I]:,Bla4,I*ecIY.I9Z?8*)k`=#]88N;rHN\R_9414grVceT'LM[6IF<n Z=r>]qaa;:NHd@W!`EAX-D5Z?gU5?bZ_TLR!l<pFX72/gN^Q69<(tEnoP4DQAh5ga3.7gfqog]bBqj)pVg^QUVM'fU2[`I1nO1BZ!*9>lZkUaiB?iD8cjVZX9B^'(/OVNm0I_<e[SZ5jCM*FdXNgUb=(E46$a\T!U2kETI46J=tbA_l8"sJ\mClg*+]DRC;Wj#NT-Rjb*^I@MMareE/3<Pc77KE<krg]k]*mI+j3rYA2*k7kEc:9CnbZ3BHQJB\N0=o[>0&6NS0OD;;bK;',UX-GlB'gpj-n[fKaEn>sUb05NM^$HNR-<3V((RDpGKA-PdUA]s<)lV@r&7(n$A>M;D`0LM\ekE\Qjd.';qdO7 9KC6g`Y[N`.$1`raN?o;qR'r Vm@_'JF59($l21"`=A[Y%\(GAG\qIf5 )N'C%">WGA,Q>!-#RTW/?XRB:_n)^(RWBA]r^GF$U@A=oI''fAZ+RY;!EOV6E#sr<iH44G+hZ;`hR_Nj.CWM(Ns+_$a $-mrSr5nt;(0Y)fI)S4GEjX=`Y0n$B(q%%cb1 An;1nAi&-3\#Aa3jU!N<Vs846k^W6M<+bALW@3AA"`ZlQi26;LQ4cg`ChhUb"a4C4Mdm:#k4`A#nT 2SC0/( K:#A`pa5P"[(E*L#D[QH0SkNO#iaaC>`!4"^IqAU)[p$Us9q&"WLp:@Pe:Q?cLY(:Q\Hf"-V9?>akg=f$,Dcj&7AM5s-t3`N#Al<:"UVLG+c&,\k>ejl_qK?j6`&Or=Vd"%Np"b7q$8Od1:;*dfnp[^UJ[,6t)YsVkC@aU$>]3S/LKp<#40TGfNP*+8K*NUl!!]piG]O4A5#HQ@mE2<<REeK$Xri?jaA780/G^C7>rXNH"K[jE*5g^(5R9!Pi?L3N[:3AWOUkH@(%kZr7EMbnRn395W/B57o7d*pN`iHQ hWbkC$j%ge:%dT#6LcPK81jRcAd[%s_$c+q>8hO)l.bbY&l%>Z^c6i>\Cr.GY^eU/rq2C"mai_W?,@ JTTS*lHpL.DD.\BBEe<:*W1db#%+*,Ql6?+X=M_nb7%,%lh[Q,['nTHTgdT7j?*ls;GT;;"%7+bnB<s&sgK]pH`(-?$Do @%&&dVVN.*!%bn*2+-3#NJR[W8b-ocF8=EBpE3 TDZh9MN3ogXFA_*oII7D@UeRV5&)L<hT C+9df)-UCicr?SrG\pOq;cqWJ!oGB0o'ai*]Yb&2;6:ra^a^PJ$ 2 4A1&AHPYOlXR%H6oAgigg?&@dYl]i3id>>EK43GV(j"DQQs l<0A,/o (+G71o"@McZ%>0c'Uc.]PXt\>X@kjXWqP,,,V`^l*L&6(d\<*cc4L),GbBo.TmE6/@7OA^/^gA;!pPs`Wjk!RGG2\nG7aa>C\W@o64tKl7SMAX>k&L,0V19b.YK_:P[DGZEPLHX-/`*-n\AO'CjV+&Sr/!>)8r>&diAJN)I0aWj4m=&&&=Q^Wp#pU_-3Y13Q/g#"EQMpBS_1YP&9\ABL6Z6d)f#arHa!Hj?*X+W0:'WWOGV/1All"Qa"Xk\Oc UC><m!$Q QcI>\AqRtCVbD)X<+9pXrR&`"Q<.0-M.Q'c(XYRF0^%>#TN-V1g>Gik I@1.R.4@"HA;hWF@Kh+X4kt$8*&N`;m7UefMH)W6p]ap'J)fQ;)<`BCa@iqjGCDNlQ&-A@^+H)q0CEYsfBs^9+$b!A?ifH`4e`5YcN4WR$q?i;hOgQTJZ%^tZ^3bV>;+FGPDO;Vd^"l,TA/^&.UI-A]979L&'/4bQ&Y==om?pZAVf8;@A(B.FUY4Ao3rcKMJi8&V6pjTW9 1L-$M^@?Xo=Efmp,!#2B0 iT?%d\^Ze<?OLM$=4q,Jd9`,g$-2P%Dl4<\P<p:K8Eb"BP=f;\Pad(f-.<l47oN.`qNYt@'4b`m1TV3qR Q,a/)Aae<tjA(.XNGbL1Kc:V9_64fTk9J&*4pD9sC_AG<m9bAT)oq7D#)f\eq''K8F^iJi?l9674<-\&bVa)WqfHFgkpT'])k*';jIS! 3FJ?+YEJps;5DOl>ej-I+,Etdlo;GeXtq5Ei;N+QHKEa>:p70G6Ah0t$DC[:IgmKiSg7/3hl#Vp0B[;2dKL'SW%pG1PVQ>U_N)0^D[ALc/QTOaA])brkR5&Kk)K8WAUkhpSEj5nB]MLc#(T9Je#LOR$=gn>EdiskF\p `roU>)7C<G$L2gPH`rmc-AE$3=D]UZ@:UQ`q`=sO)18I9L!pDZ&nNoiOftb^1l(KBi9k#Ek`SmF&_d0(@t$Ybj^jnnXCVAOft4TD_s\%K_IcT'C","lpJjjtl7fGG1;J*@]BZUl54j6VFW=+QjTO[1ne*GO@RsX=j4F /^nFZWSPS=VE=RJ[?V$21hQYLS[c<1)jMEQSj3p;s)_?D_+s9I!jokC8[<TOg=b&N+S=[4\@r\%Q^BnB(M&$EA<dhbk;Xb!\14%X[>3Npbd+kW fo)kBat,qpW_YspA_A!fE/7g@YVTODlV?f+9LKMVI mW-7Ueks6oe4AF4]Qe6I6W!J8M+?IGr$&/r@EA,d4-hU$]bU-bAapk6/*_ 9k(1)<,P']K-_?KtcGg>rm/YDhjO!3lpL,t*D4j"Z63)kB1_A17LJ+B?#(?9*:q=X)VQc\pR,o:J7O"gRi#Dk18$\Mr!:"m*_dh9,8g0YIX`Zd0XjB!S+-Pft(n.?'kD%%`K>'",Oi<Ob'@=ppp(9^E:UL6cS(bc"p!:/6.%Fnkbo)gXSBA[WL7MsD^1 dCSt<8'0e&=p!<='NV*M@HI1qKX=C^&qR6_>rcg1L^\+iJE]!QEf$J5l&_nGWLGI8[B:k\qpJV1N3(/'/H<B^'A>RLb3G57Hfn25*[:*\E`^< ,<D "mmD;h>rr/oRh ##dl`7>i5"= K&;;2-8gsF"=(*FkCoFTd=_,7\^R]SKoRlkF8]sLMXG%Y[+im6_^`7fLgk;lC-)BpohMn0?p"6`ebR\'h"%U</1/.7K-Gt'l66?L>qBC:?<a?XG\7>A0-YHU%[^K.=pPMhB2K'q'oYgfnhsdH3:(LTqpV6;'q9pBsQR?X9^6W:F<5IQW#Sih2J=D"( A0CT9W_nKmV2,EH0$JWV*6D\s?!:8`4$)Q_366,!c(ga+Jd;lpk5gIE\k+&f]o"^m-R<TBOjEUfYIBEVMPRsg4Gr(ABa<UcV6^n-+<F%'R tL<YG=;T)D7FaVR^,#jEh^T9!=Ii$TXXst#8on/lmnnq)!"dZ@OtZ,M4rLbfW)t*aiO8D;)X+KT1 tY\j eX5lbP=?'`k<Z-9[eBi"cT" #%P1'&Kt2\E\/LBIX"\eQE8Mj@,9X[D*,9o;A'Lh#A/bHXI PdL?r8FcZAFXJP&iN%'g@Pa?!_%t_P6S'5V[H'TP9_[fFteS^oGc&bQl$H1?%4'S\o^dSABisI(]L7Z?<*1+YIZ*kcB[o2%og!N3BSI,UE)JGAlg;I5@E)G'AOS$Usn-BQ^\\kW-i?:6q%9".p^l2U-H@X78;WnF:dAcY`sO;9rhKlS^?Sp_dC:#H>)A(M#8%rj"NWKf"NcE$%l"!<OGEVKk[ftkdipA]>;&lkQ nB!cpgJ5hBh@Be7go+o8fd= ZelUGrT)o@.,EQ8Z>@=elER?'\DEN'iJrTYV*7&EQXk2kLI`CJDZ^,!dK!U1o1 N=*qq8[>t"sJn9 r].E0b*<rIe4""-Y:W1B%XbmC,n^S6Gb$9a@U+NK?k$3oJF=[-,@>>+S'Y`s;hpAZTDZHX+.DA6^0L;cA=B!12p#P=Nk8V@[7e7"9+YFsD[$F/TKo=>C8K<[,T$JZYs(k+cK?ZXgNtQ/H*9'h9@f%Ih>G%/U]V]VYXE_^o1h=a:lkC4Pb.L^eA4CXn>st4+jakA@NS4O#gD1V?5kOHNC he<B!b#CLtrP0/i !IjY>P&X6<>2UkP5J0Z&J?/[8ZElsB@[TMI^N3-85UZ /mUYZfSjGL]0@si:nj6;9;I)C20)BO?W;F$!G?<Y.:s%e5Ze3=7b8I#TbITfnK8nOrLs;*(GqG8P"HX(67Q&0I?8`DD;N9k<_(r0p#=P:FV^W,#& [H!tVq#g)rbj69M[;_aQiSN%r0.Q]$jAiIU7Eb\PEJK[(:CHjkg!U'>\?b #k`'>t"C-C'+YJ`@#Y 61KEb K?Z)5FsC>IIfH+RpL2>W"Q$JZg/2Bk;%Hs6(#a,_S%9qQ6_\;N_Dh"BGrOGN049+fbDSANG=*P"RoDQk91S'nQCIBeng42;,>&GA**j?edh,:>Aq\]5q)c[$E?('Wd-Z:TC5DS$BO PTUELG`jLS7_N>  bGiU 76%\QC)5FQSdDf\D>T#"^"9J] n/$aA^8n#t$a]<A]&ogN[l`U3P:r$1bTLFn`nJAP!ZD'ic_^6.8r ZYBmH0L[RC&890t]LcXM._c ld2+1ac3NW(nZp#&3AtsBeMFQn+'dd TAW"9.')G#W1>%-fW_j#%fH:JTF0=l3?oHUVZIpJoHlnM:ponE:,;">)l:Hnhg:@bUq's#DbK`L(,MU$,N+% r@@4W?cc[I,_)daC_?T"tSQd9^%`N__pZC:O[gWgdhG'b\^0ltg'O"koJB)J-V!iI3(1fjq7H;BrMlc0Vc"YKo(C/Wco2YTaU?s:<qL$?C905!*OIB,)kc;;\MhlhaP&bTV"o=7aN+$K%pTAT8rbZm#<h6f=O\*ioKh#_^FS?9keEe<[+N7l7]MYl4S\IaRfi_rYb$9s' 7ae%14p@=H)M@`5Wsf3_0Y.99]=1p'e2pC$l[?%-+)EkskqF@9]fj-eq8@,AG=[(/pLj;&2%b9iX=-9-J`s/f3taq*NMTi4P0QX/qCEDesdSTG*U[CC%3KfY%I48km+?$ bQIhReLjnkkKZ-P1bt:N<LCE!=WbbmR$a(P:hT[Fb$+V s9Ze>YE2rF<qqX5a_;M,fb/"PJs=0/iN<VVn$r.KP@A>qB81/df5\W3[#]'0IYYp/8qQ0[_*tN!3KeAI5-4`d'G=&Z)hpqPfLVJUG_>>>-AeHW&9Mmt,l1T^hPoML&N@T[[deLkgBa^pGoh3rE66BW"?%K))Nn2X*=5FA_6^bT2hnC7/H%cMkZ-&MV'0JFf#_HRNV* .@j6eU`&l2"W=R)_EE0h[-,="/m2='J2>KF[Nk['@K_,\%BHY4]P"<A\0k$Et=+=?/RX9rL"*K*q*-NP&pRrD^cGbFUm#@kTA&54^-+796 F0j]DaI/s[Ct9-1C5;?KTQZqaI*LKSB[OAA"@PY6Z5K(`-hLlRZ;5;X;jAOHhUr0@UY=erF$<JD'$NBpTn)25kF@^P+h=@E,K-gtF3r.(R;J:A[=Arfhq,=6d!p=?!b=lMokbLYc\iYOY6#,QB%Y;CVX+%h(7R9Al3+SJ*WNNBLQUhmIK&/?l34gci5GPUh#kF2nXB-P<,*&"O+ar(Fnhd3*A<P!?@!iAJ#fBVo;0RF#g+$N[#]=UO\T%K#r71-#l][9Hn/,%,e-;H++MA@L&[c3Aknh%j7o524%[N,Alt89rOQt``#S3G;Pf$#D"Q9FZlhM&NlA0>STZ@-Aea<&'BE;]t8No8RVf',M@3<Goiioi9QS;]Aph[M!n_YH""+jSSW#kWNL9AGsk';OjV4*'T*_=rfgP^6hobU]QaqlW9I=fjA%H2"fah./D3+'ccO+qXKLhY\!7 5FC';*aeMU;i<si"raA&0>4\/G<3jI`A(T0"@b,Fc0;,AfqSb0=o:a"I*5`'>ep4#4D&nQ6\=Lloe5Qn-ed&$4W.KN"!i03QP9DW+:smc2_$gO*T2MV6;';sd-06W:'ne-fpapE'j1,f2ieqam>mJVD0Km\24 _\ApdeB&@gq4%fJ3oq`.&tiR-(A bYiC([QPaT6].XVe'[Z-6j14_K3is p!1t_l^;?@SMJC;pJ=_6LaUer7ZpDO"@3R:lqT_(ac:bSd!EK*,sD^s8R0]as0lG17)r.(W%lA(q!b=J:E!rM8^dA1?Zk^ab^"c,Zd(@tD8 sM.[E96p.DKRG?Xl)oWB tD)(><V7M$Gc2l:Wj@^UY"jnW6j#)Yrnm)HYTQl\Eb2^$0 n_SYph,Wls_`UP`i)gT3FF(@FB2kIX@;aDOpI8Gr?hEPJTcn@2]7b0hk>REQCj(OlNi(gC]=!1ajYA0AHo[%2cpmW#=YIga?,f1E*W>9Vk(0DS[spBF,]+a8NWn2f3A)kOs$eG;5 *LGGH:3ZDK!B5BZ+bX][I,Jc47T7;#i2Re7tJK&]_N(e;qSY2l#.bU9mebC:8EE"U F+B.-o3GRnEtA%NM]7ICk(+h;KO`I&\LDE. B\VO+0T!-OiiOkh+.]RE3DDda0Mj:L^id\d#PdKc!Qt/W`:m-aH+8*gq\Zd)0HoGG(T:'8LH\Oj0`oJ(YU^_6ZKrjZ*t(S2d!8qMTUn 8Wi38sSCkKcZ"/_5n%fUZ!HRiaMYF'g^EXj93.-=(/oHe'!B,LGG9?cF[?X[WcE5\?=7% ]OQs6nJnRJ/':A6UGZGm7g/Ke%F[7p]Bs"J+>CCigJ(T%bZ"jnPIU@G$(++1S_@b5%mREh&2TbH5II@A,jS.[MH\1-b<"?Ak'6,(CjbRG94=qV%8 TJ6Z6&3(o?hYfKn95Wb=(=m8.IF?JS`Qh*R?@IEp,o7U-J4qZGjLnW aaf[VF(AD>YDU>0@ZreJ0L)(&F)-U$F@j5msJn)t6K-SGr:Td+9$PJ2j-s9EU6\:N'$=7BfT)bcHS)5AeDqbeVO6r@":i09:':jY_-2'ZY5['(PYtgge0LB(n/fK"i6?9]4)C9(;g= G;" AZlZW0t3bPYs^?t$OtdRVn:nV/m[@A"p.%9!p&AP@G&S^)g="qA>AVTb^=6T-HA<b[1/H0gPDPg'B.?A:,pqQhWUplAG?F6U"`3!'As@#Al\"W!1T7$2\PD]eM8:Ce&$=mbjn;#Bfj AJ3^BH?Q5i1@[omAolU/eBD"oAT09s_PRjt]g[%'Q$.p%lW`'g+l7Ae<;n-kc0G77fUln7;Y#YjEL#btB\5^nW9n 48 @a.#Y8/I)f;qn12Sd+]TgM1ktq8ZoPOk@/dOSq@<PLfFPi+FafSXchYLS`XJfj2e,Jc+(krLgg.M?bH,2S?/QSBg66$T)D>J@j+Tc;GU 3pDNba3iS3[$*FE<$UE(pF&@9a>)k&1MOK1MTY#1?df*dn2C>hH$;dK5>1b)=j"CNaV-atWE_"-\'\OPdRk;b1U^eIA4e=7k)U9rDASSEhs;H[*,Vef3PhO@&?,Ri,>BAqmEZ^tm._A#[@[9%jb!G#<EJnaYcX_[%On;EjoiX8O$4Ss_WZbc57 *ZmY"5gr8s"BTan,Y/t%f/8IT_-gAAcHdh#aWPNA`P>ft"Bn?:^0G4-Y`fMSLdelJrVb3:"KDAlPH#@m1!MX#.VDA6ZLIdnF(6Flqgd[GUtN:YDReU`>pUB<P"PV6VVWd)&1Q_ZajmA)+>BQlGhQbC:`b)A]:'AfM86]'K[A1G3[Yb"4oD.V=NhSl3JH)qA7TTW?)`2A[(.i=S0N0%!J1D:#";lcAWp>,#d+7MgM&[8%'>% P* kIrG4,JZh6ArHLiW<30`[U[`n0X":Z57 [N*HgZ$I7otWr54/Ok\NEQTXVZKt8D6,q.R3mp>&_IPAI(%?e!5?A/Qbe@ g5aGG(ni3aQ%8;=R7B4TgE=cNBq+FW\An6k@VSnE<(EC-dYOXi5P[Tii;s=l&'p1UFmJO[#pNbt8/q8Mq:I.Kl7<G+ATp>[5+g8)\XkA7=1"7GT.4kFYob70WpJIM/7`^?rOW"*C\'\[Ot%I@a\dc?'cjQPlh\h5R/M@DqMap%_<SEh2l<RBjZc[og?E2cJ3KjI_GL>lgJ\f!Fn(UQJHGTlr?>6mA,VAb3793AjHbXFg`E"d-kr59p,"q.g1FgFVB?%[V&V2T:?02JO!OEQ8?_GKU!:6^lAL7C;Mb@2b o_s\1j:e]df*LQ79TSjDhM;AOd/9Amf--]#c?4Z4DA*h&/?MH0(=W[H>U,j'AQ5=Z84qS_]io5sdC/Y!)Ro/#1-8eahG<=VGleBq$GtMgJ'iaA&!0>IGh<<beUlCkm;<l_n*Dqgl=]6g/%^DQR8]>Ar[[r(DdF1mRYX(:Ah^8=<_HA:&X:*+ra(p/_Dh_,i)B7 WFq+ZfD1/-rZ#rM-i\)%3a@Ud]n*+<H&>O;b0j3c7:&A,T=<273+Ib2EmD[`=N&`99S8??o&/@qoP?$"H#EL ER[h^aK(iFGP%TAeefT$Eib^ "eH`n!A5\Tl.DjW:'CC!8/rt;Xi`S_Cj$Z=H490!0#9:$A!52\G@WG =]X`K9CI4)gYNKW0]=OP;HJ/%pakp c>":0rPlQsSSUKaEQY2iD/7.]oCP"omV(OO5WnrAI[&&A`EL>R_/"r8.O/6cV Z$JsGlfKOZ:6t!G\[[;\Yjb42^6ROJ[TA=jbf9AG>t9_6pnNXioVqA=2<]-;[t7LX58k`<*K?\p8IQ0At;LEW0+H6+*fd3SaI'$<mmh5JkDm\`3-p2\Xo[?JbcO"Q8ZtmF4UY(A\'hD;>abi7rAfNh/,V!@M^H@_i&]O,^,hNt"(mg!7jji^R\ 5WC=]M[q_[Mb9.nl!"BTOBj&Zkp:0<kG4mm]&>X],&nJ$\]]S1?Ra^#V#X3](Q9qUlM2F<sTP^AAr+3@30*3lm:_'2#>qQefOt`oLUd=2`-ILq'kl';P-$m&dbNq4_Xmcam,pA6ZI^Z&eX3jLk'W0^hf('1^&b:oT=nOhOTQA!oh'^#R>P?3+L:XM@anPUHL8pEW+.d*,t`Q423Ea'3*,\8?[_7i8SQ,Z8-EeA(or+LtA^)r,Qdf!13HT>+l>jp'qK'!5\S+\1R[T(lAWG@*@WXWjiJ(-C,:$Q*g;ZAs$K+Hf3f,l'(.`2+Ltg=LlUj]A)h_.D4AVThb_:o"E,Ac[$r=`6CM WM85D/Sa&]RgDY7Q%1/8AP\-`Lla-5N?aXpkcrB7VV`E!E8ClT8gsmJb'3W`?C9rUF<AJ2d?P^P2,Nn@*_Q1]e5mKAA_r[WH#S/p#t$nf9A$YOS-qc2,qT[,8FnN7';lCZ(8Gd+p*V"Jt)$Q^JX q+O$+@fhHVdDal#5?h0&cI2<pmRW/g$/gePX)WObhkE[S@j>ac[X^gUQ9TDjZ"(jmU54^H*-E?/q?8cIS:dL0@ b]9#DoU)^p5b8X$*I51q^WsA'd%2O*N$0gEfr(ZTj5]CgAC2n@"p0tBEHN1mYX"?InEGR7A0T>VMD`$`9?cmc+SA!/^SHKc6PB@7-/P'NLQ)s%@ (7\Eh$-;DE*hDtr/PG[]J69FRXG2K(MJRMkOrO(Z`KnNC<)o[<;`1hDl#((g:W$CrPUn(FVC =a9A-p&SQ-.Z9I o(1,pIadRJHa3ZLTVh].bH;#MkK<-en/H^=(/"fsg&B*b<lI$;X8AHo]-HB&fP0Bt0MmD1,&Q1&9XU"? d^M.:^e&!t*9KTC++i)p=_=!)"`6Kb_T_F22tg,AW?8Q"E[DAK]%'(58-$+<?(.#__QLLP&+SVEmF@Jkft'"^L`O:a4TT3e!A6F)5g%>OAQE_X[??,*N/ ,7=',iAYKN OnML3[P^e;$Xq/Y-%i@[)P9cim"-8t<=^bh!srU3cB\&^^>O8IjQm5HNC/dnKsYf:Bj0dN_)i26qR\k8k@Q0]_tO[dGBL4Prgl'n@/>Uq32\"I/pFTG&X`QZDM/,O t]LC\nSP'mjQ:'NA=&ln^;D\nlQL&M;kAniTD](c28,8,18:6!M7n$&RVZce8TeHW#o\G^F%2_XM-/U5UZ9q4lg"St_'MSKisb"#e#NfZ,Ce(=nU] h/$U.Spq*hc+e[@1ZC^=2J\P3bp8$c58j3_O:0k6WC##3=jW!& .f!B,H^\e0S;adAUOo;6ekIP$>Rh[\-nN+,ph ]3C1"AntH(F?`s_SB6CLBsh7D@7 ]SAJd$j'.-aoYTsN)r:=>*Gf:U/! -03/6n&B$JBQ'70?@&G8eDUk:-tPpsa=-<W?3bf%d&Koh4A,@GN_<?4W^k`RGggM&'QM>lYS'G6,^t+WNDdb06]"EpeqtZ"fq=P'"pUgVY*r#D"JNmJXs1t$;P0;%$ORIhWYeAbL@ZSB&G'n4QD*L/nS/^GS_9?9>N]3fFnAGr!neR5=_cYH7$)Pcf<Vf[rh+C"(2A?.m,SQ+:A-I-BU-Kk6prA&+><'ls^\6#l5#gEl0o5l784M_U3.=W_qGAmQ*%%^^]6j$/S,O0"1e5/YmQMqj?<-TR!*0lOrtSkQA99kNQh.%r"dOm+eGSXYY8- )FeVXEH:P@Sa##kM\2&7EY$<s5^KhQmnQG,DrF^G,=/OC%2_K%0OL3=-eV/5>WsDC30'l"h>%Ar#"]CrI`Y2\8o#-\A+qnB[3Nqq!a+'Cab&_3WC@]2<t&.4h L<;A+'+s$<'rKGDI6KJZe]Yh^QZP4!4]NEjN/n9hb#kW#XXpnS2-dn=1gm/N`c6#02TWYWfT`/>?kr66f$mXoJHVQb1L;/"E/[_i"2A#O_@8!_onZ:KfIm25I;qhK"A3L@W2?$7>_J%UAKVto3lMX$-(b7s#nZRWJ C8^J.dqq^X+f;:nS.j'7',)boQ07P:be3Jja&_SjlSF/K<nWp'@7Z)=SVg/!4.7eTVUSKEJSNqCjY5=7sYiN6r"O1$L<O#M^$cd$0chZ.Z8CDp\qUP6q)9RpR&A,o,^jtm/N0n=9'pB6M1ao?B9\mC_6^i.ms ?+tqPJLB/XT6*(AYbq6*OOW"R+<:*A;"?$$!&::e&_Uc[%3.]iNY-B 4GU W\WB9*hW4>Cf]MlAj\q8MYh@X?E9PZEAhdOI9`bB0)DN3M[)i!`5iE&/6cP,K8fd$Z;6g0ZR&MA+m"ikA`$A2.0e5!2g==^N`#'j#rqt:\ _q.1dcbKhVo5QdUjl=<^Q%pF.J[KsmE/p<.0(?j=(@bU7 V`Q%*Q-\pdr%W@>etp(Y+?H+WbnN;JBNA)3Vh91?M<9/f7_j,B =E28rLFmb9C64o?NB1Tf]a4#GU:!(3o?;79.Ij^jkY+e7Q^4?[</gYgBriqr/`)CAY4'*/%9Q0HP#:ORK@s33FkJlCgb9<U?GTBL%jA8O=_5('CMLTA70&UYpMmT6`9!^%=) -[\Ap,eC?"[KcTVZo*^aj#8lV?csMNeXQOL@W<X(KW-78GS(-=r[2G@UH$_Kt0'4!L^;Kf\WjSATX-3>f:@sFiqGiP%`n/f*IHLa0CepA>VB3hJgZe32-'2hf$1) BHM#5PtgK<M4V-ZLLn6<?G;r*RP(7HA8MJWAr<g_.G=D8TO]UqHBsC/[eX"2;6oI N!0KiD&sD'n%a#VC:KS&_S^[m-Ni)F6n-`=,6;e\pi^cPFA,dY8ASA5$qK`dX/`1_[NcGQ<fmGR2X%\GL7%A kY.m<PYmWa-2t%el)X1pfFBI9Qa]MrLGA`)M,C\:G1X"_=._#D(p!q r'j=A;S?Rpii[4m?(kWsVD gTfDUoqdMRAS6LhENjl_;)]E?c8C:ijTNm.1GE'ia#X%5HI.)F^odh467(9]^j+aG7RqZMV)nHM" ogC)aZE*_T%< AcoZcCb^7eNZ5A4m!_SoA(.0-!cp^^X>/B'5D_/qY1=%*GQ40 `k-S D!DtVCL"a9kLRr;^Z\S5?4S4abC^_Ji.9=:kR\r=)D7H=.-1r.OZa.N"T7(U\snk_BLVsX%0AJ0_%V'jK_.e;o,A=ercfJTZRlB:js__+O%m9/`Z]`[CSU4%e<4T=F3<NHZ%MpF.Y.6g V*5/%!JNXEf=4H@B"ST9XQo4DQ?(4^B-g=e7?);3%=(ha)D$5+7WeN]Q@p,D;J9+#&l=>NGa83EIc^MZtk;J[Yt=&hJ'$PTA#8 F?B]XWF(CAKpLX_Iq5FWSe5._?it'%Bh7EAbZA"95NAo;_[WE/Z/0bGIP'FkIK*/Ie&la!C\nR&'>'c'n2C(t-ZcodJ" %s04ZBVLT1<F*q+D9L)qCWLC95Cn-AHn(W&Z&1<lAA3^faA Z7ae `\-$3LJ0464>OpehGq-lKmTD1(1"Ds^"^Oq(P?^66F9X\HT/&MUTcM 0OA;\<[A=d\TRPmia>:qdh@qi.IOBj]HO#"0.h%nSlXOGY[$mrSVp[Y0NICa8RL-.!ClD,90Hk^'k6a6cin/Zb!&je^@+[00bfcGFG(LY81#7tpAeR6Lb=qT_H("eb:YOiiI6hV*SF3:9iQ\d qs2h->dl6OT#PDQEbJX66i)]FtpBF -g/J^S\sl6n=G'Jr2:#B8)KM./9:S]Br[&7qh*'hl6L;"4f'[e8njR^#S@J6]4DAJST!XpW>?UcZp.d64.E#fAR#AQ"CFqY2BZc+AOK2VqA6I'YgjrfqW1rL?F[BD.fRm6+@2nsc3/liS%:$83NB]RUFI5I; M14H'Nik4^<'eaTB(=6L E*d$(CG@ >mUikAR:"3Pqd7$'<r\pUTi_76GD_e>'eK1^GU`>aMY*;?gnmc_E_1ct+b6LWJpR_AJG$b&?iCaD.R"S7VkB*<VpFcgg!`Lb-dCkgMC+iM`2Ope$l=55& Bq@niS9@ _2Ap!ok9A"(@#7]2DXWN_6Xr`$E7Kh!+"maYk>8&"h`YciIiHg_3%54DpD51IWl=V:5@t\EJ5I^8K3%8#@l85N#[C#<7]!*1MpRCS44&TfRhG_?BH 5;)CoIJsT&m9KBB;C<;WK$?9r(h$W0X5=W:_B+@W>O@dJL cU' fH,8;M<qL1>!O(08I?W8S [oN8c\,Cc&'bP.;Bh&(+$P*Xq,DmW)Tt t/(Q(EtlLC:fCY&XD0d[BZO0^ht[APo#Egl^c<\Z"FUY_A!K=j8?9#/N30Kc)n78ib8?b^7cU^(fT@q:%(W-k-FdA4Q\_8r9VB]%SP]?(:>DYF2H@);(A:!2$'QhpD:B40!K.BBlUk-,W5T;#QD7=Q;<jf!MrDGJ=K<n6<VY1Y.8tHj2Bbbg]Ht>k5,$:!,OF>XA/N#+ia]dUIF:!i"9tl:a]$e2=CE&KdWFBC0mA/(%d$qgD#5=e?R=S]\$M5jm)EJE4McR!Y`olVI#D%AAe[?*KXS,b":;275`hGb7QpHA5/'sqb,m@pXtWq#WC",L35mH[FD`P@mTP8-m=nQ5lOpVobKa##,I^iK=Wj8SN@^^sK(]2\<@W8oKs:<Ur8%E?i(4?H5(,5GiqX]A4:"rD*\cg!*$*(DN[2IaG:l7S+(K@%G%XGLW`5ld VRXkrEIJXk@,6RB4RBbrm%i9rYsE`]&^' $FpBbgjj8[a*F/,7*_Q0"3V"e2_&NW_Q2nM?E@(hjd1lm4O94'_6 IAJeXbp;m,!<rdWe1P>(J?H6I$k;(n]H1a*p@P:1+hdYQbTi.8>:j.H]q*N'cZ1a,a.IsC@c.rl"*q[s8K,kg:3PM?:t #s81l&,"5]]Q%N1H`D\WUS+AA!'\2;4@Xt*2b:JI2F@ERF@rem +O2.!&L#sAsZ[U]B\O$p^=;_WUPcp $-W/#_@jmh-kiq*5-LC*i>Q1)>7N;k[_jqJ!p^%sYC^*h)k tMUVh7E%P&TgZP\B_^1/SS](B=e7C3?%C=f$$?D+R-R2W)[ln`(Zn?\/k\;%j*+_;/AK9iJ <Oi%Z0 ?V6rHts7(MK<+ZPCpSA-t=1pS1!'YP)K5 jOt[G;.*.c.[[jGh03B<f0+^`f8rL oVGHDla<Cd^4rn8/VCQl.?m^AAoBY844K[nqc$JdKjAKfhFY<+n\sbEI%2b!n"h@,]*=G0Z6".b4qM%& \C5Lp\$t(nsNLq+*]^&M6=->Zb&Ic7UW&GU"b"*sU_@D 3Q"oHbX5;P3`UAAqF,keZ`8 "j6_fld7YUI94*YMRE*K99rnd$o78=?O>:Zh_ LpQ9F_L#^A3QCQ$Pmt8>N^W.)QBB#b/fefCTOO\/C[DbqdJN;iQc)_2K)i>tEN!t#Eo7EFcDQM*/Q$DVq73A>3q"?rs,DC^<@Xa*UNT)3eJ`p,0@eB-^MdL]-+AW?+k'TT"=*QA_6N#0QOFGW\J +CDa"d]!*[O+l[\"D)aLG*\Uqo /lpZS'`25':Se#*7@QG@>i1.@tN\l0)48QCp,Be"c7Vd8C!NOj7,,GA03+k\JAPm6'EkqMBeqlI`0o=jW2AD#Jr"eLF#K#DMXTA?)p\WG.+)@aT5$rEkal[sh#f)7D1:N<Wo2Kk\#&<^p+L1&rl<7:c[A/VTdH0H$OqA?CSCUB,$Ih%9r@$sScs!?q?k+r5p=<A\o0H*]5*+8S[?_2\5?_.-q0chiLg@j!bc-C5>@J.hLlbZ5gld*A%!]Nj+93+Mdh;e8oRR@(OQdp$:KHVG"_>S+"/0$AKK"iET>q4-g$g`'GQA#O`iA Qa AP+lgoob, QXf&Bm!5/(8N (/Ht#*%2/dPin7^G+<VL"?`6]PG0/[\I!$#;r7AI*Wo-4.d;g^4Z<UV9OA$+<6Gkiq%?OacGhT%F"\L0@eq>o0+$6<8RAVppX4`\eo>*RtAe7)'dO9B[GH:)cl<Aic(Z-4gZeOn1Bf]<GN>g 9epr`)8M;m#ftY:ZEMI_8UJdaqj)q ,h\piI:L:%#+*+7o.GkgkrmKBU"`WMJ8$XMWK=hU/AR ZC4IUaT'24[l.XLM,/.K,qtV(2cf:]Y#G0O_92(ALUY:.'q79aqZ4CV%P? VF8qc#^Ss3fGdSA>8SQdK?;MTW_UK"kj2,F#_OA/objAB^k_BZ%re/blK+3Rt lrAJ#ZeACaIA(jh \(Ne]R_`BNQ>b8A\P-"/ABZ[KA38!pWd00d"FSX/]S=CB=,)FO0QioNX76k`K:9,YD5HX(arVUq_-Y0c<oBV:]!(R-Uk%n;P[%:X>o*V)Z?h`C+Tt1o0e"@m7SVai']@hMG\*)P9M7S">c+bEkkCS>Hlot<dEmi4$KCat_n#)QhYAUUKX-ZU*4O;D=/4-OkOYdbVm)Ng S:Ia5MJ),E33_<^dHQ%j,t*#?M6SZNUT#CH<\a82m22`@NpdTH>(1[6)^ZVlAKE5i2o(s)rskb3MfU6lYlss3&96[M, .fSc`*OrF'.Y#MpkH/pde%<7m:LtYY&V)miFbqegR7Q ACrslZO^J@J*#V.H"r5m]5j.A.U7#82lN*]3'<!G]\lU7okDNnGeD3YRjJ1.YIi+W)nW:5-i--MgT9A]`p4b,.t_? ;_a>4mC2fQ]*n*`M>JZYCoAr6m[s7=V9Jfb3U3o77(9XYT;A!MY>ke"ni<TYVJ$6k.#. PMphNC_J*bi:DS"BTb[CZc92qjomnC;Xd*iHA3@F*C^Yp?Fa6>S2[0 r*ch2+bA U@j\VAZrA<6[4b08g*:-c&ke ?fU9+5$ bkkDB#9?FP_C5aTLiHaY%D7#empaDa4W%`KV$<"tD,H>GSLPNO'TEj./\m,jt6TWP:*hT'eP[<W)cT-K"#@2O@.C#dEaV!A\h\@(LU5iJLWjgk"!1inL]^]7%k;s3HGl$bs-Mk)q&ImPWb1\p7jGWH-6al08 67ZKC9`jDpABZ:F?nP-KfGjt@@\<m.HMP"(k7$2,3e+q,IaQR-Tm\*nqPkoSZ%64aR5s%Q7!3c/7[5`]pq;!q17Hl7@tJi-VeHT5XatX-9CAIf\Do#rG#UpnNVrQp#.=LA!#W6N=?RIB\fL6,b1O19VkE0_U.SI.OI*:m=De4TA9"jm^`3-6hs8&XPK/#WN*\T/s5_l#A7b18^Z`?@r`kP=VJAG< ;7!O>?V]-Sr_AbFZhABkt1n?t0*Es"WQI1i',F#/=?Cd<.?!LhStaQbW0,1DS"<A:Y]5rIq'R<[j<!tYP8^mf=,n[J"]>V<@dCam7\A-[<&3qo:RSSHB8\8qVEAlG8n#$fbnV$5RHEtbD<lq I1CBc)ifh@CpL#$[$AVq%7I"YfVV+^K:it@'t@^5PhD5lq`A<1T61d_i)2TgJb0RCE2BEZqKOn:oB3 Z6[HSY/ b(eA/nt.W^Om<-r/h`Xf^dL<SLaoJ%tK^WLVFCjAO0oU0J]]!Y6sr%]A00)e.9kB*Yj<6 bZI#G&eR#Vi]*2T#TM%VHj8+qt\f9AbH_7s,e[=Tp<iRPNO-B7*iG#U`jpj;,PSO<(.Qs.t+a8X,1hTI5%r?MrpZp#g+j70.j@Tlc1$1nP0&LEYlbCUP/NIW:PG;M<fP&B_rL0,h*S.Z=)0D:!!a(RMQ4'6W%E6E3T1'n@ /XF8L#NI0 AFK(g=AQrfq^?2BCV@O`OG)#lh9+;` `9+"7I2koGTpm`iaL=k!A9tDY"d8WsCPDdpNgU<9P+OFS1cgZH*:2q6[3( -[\HMJX$X*W0g::t@A/$-KGm-rc7=q)-sOVgn[^AWp(r0<C.Oh"iRpn5,kGV*_n9Y\hoAokJ>J-6QA%J,OQ47n@#mO1'2Xt)>_^oc<S!2ZnV6"%$PHL7PM)"]'Ng_5UHp PV,qf N(D1E)RW%Ilacc,MGe:b?aZp9^(mdb>foAT>Q`>GkdZ$<J!gShtU\>$aM-Y"6ac<j(hL[_\c((h@%5nT]V^,]M*JAblrMZF^3^S#''^s![ehKE,E?]mY4+ql<F(iFD#G,6XdK.1ZM%j(1hns<R!%".-XqgS<Kf%Boq;^Gg&d@sq` 5i_o`;LQB82D+VEn=1\,(, t<1I1@W`$a$"Gd08n).hU)2jB<!Mj6?.NZ7^a/TXR$jBM:>opBLJI!N`qA96bBoap9c5FR&(T;as,B9.D)"6n6A#-Ikf8YJML?$N?eT@_]@Y!R*B!J&`0!P#IW6I`eUO>qm-NA/W#hJjN!M\>=%iXX)Ik6;G`Rl.BJF7b4+Sd7;q-Etb0Smtp`7/02B^]@7>GHJn9XdWE1LL'bon0j)f7=9-I9'5gi)-RHhV?Y+gWL .W2*XAT-qg6VW,)3>(86YA\cIc6%s(/M8FM`/3>#ak<Ua4C#WL+-6bfKIl#0jl`,+I(=Q7B1=^rF@(_q-n$P`Z.a3E'?c X7WNH?I\&f2ip/bK,9XN=W;T(N2j<QVCHfO(4n2:-[ht! U7p1aVBs8e42:djpo0;b-?*DZCl1E7i5p^pfK8h"lW?9VJ&+^c`MRtGYZb5GN*aJ?I`6;9!ni+?EW6cLM[t2!=P.Jel)#;_)5N5ThTGQ6I?a;$H>aIC>"I__#?ls`7PJ*Q@RZK/.-%%9to<BXI!ADAdKFqc&0AE1GX!^PI5EYG`ZG4;E+[!2=&JWhc!l]<&hpZ%QL69.p8Wj<M>T/m/V'"YQaV1kf-Pg+$;\*grK1KD2aQM1tLp`T -V+qXm%=$@/%'k(!f?'HY"9")?KZ]<13,lPL A.CXOJN_h5$m`%a[S)fi,2j"C(aAsK#?db:m2EG\ Bhq>Yn:VVQB`VMLIK'P&n$- Scm&j=ilONY6qSNS?@j)'s,(K;K%\1m\A$Y'GHCPG_sJ U?+S`cXCrDAM0_jq3.q#R/FMf2JekA0bWY%UPQF>E3g&TmJZPY1 8bj\Gj^`q@#!DMA+`1rhm:]n!D!V#.\kA485=ACZNRP]m"]fhcJA.@&14C25=Iq]FdO7+XWe+ )S*qre%GN[ 9=$t8,g! $l.1$[MO+FUXqq8o$[dpAA()4[8^> 5p-9i]L(Y=g:M%.EM:nCF6[O&"B4;MQQq;\E@t'/1,NZ>:iXc6 *QXIbbU<Oc+V-,O(E9]%g \kH *AS5Htsg<M'pX3A%\&H%r\=:?Bn*[iFAY6Y&,sd,t+EVJVR\4AfB$0c@X,N]B?<d3^sZ=rLA/HEhJ8!sM4;Rb9AYGtfRk^'R<,<Wb)QW)K!nS,+Ie'VhJ@N9$.0B>MA"[jV`iAVA4i%HC<i*. &qa&MQ5;GiAkM[Vg\]`eA'Uks_7mFc$T6/(-Ma_8X:p1S+L@!91Yk,MOm@l,k(HdtUM<XS.BI(/+/^7)gh^7f3>StaApU?fs(_(&c-`?C6W(TD:QAABR;-=p1VhDKoQC=Gj_DQg@j?;_"Vm,8Bk#OZG8sCYkii2AHa7'-Eet/>Z`(hFCN ]Sa';LE>#qn5:!a#-WGAQt!r'BAFI:B'r>6nqn<E?rk!0-/@>gBVHlNK5Zf>9VQ9o\*Xi(1t4lJT  p#YQ]<TG,Ci*=SF,A,qSR?S.Ki`]W"RAMPeWBj0Bn>1c37h`mP-8J ?qRg['1UY2!KB./E,`BP#Ca-RNbR5+#o,npPUNdCp]@0Yk\D1*X/+cog&LS9?O12P-:.9gk`)K#=SWI0]RV\H<r#VC#F/=5+(i?"@mIT)a2>i]-b\^N&F;tUQWBTY3"[94O!8mc`?__6,[ATkf-X"-h?Qs1hs\1&AP:d9R\!Q]Ul&r-NgC;&D'_ gdX%j hX[hk:p6khM#7'8s'/AfS1.;FFji7):shg-1?LcP\*0cVT^/g.4WU45eMOms'FX8+ihJ!\ip/>)Frb$pXtIYY;U.]q`XC8'->lm2?0l/_9d$1R#Q "MCPLgc9t M/hASnmMmK[nMXJ5-Usj5^I?Ut$1:O@FYsRm*V<@2W8EhOA,g_Cto"2b#p_"m[_^`Yr%O?g);KE,/C4+\;drM)$*$UZ)7_.:DCA+k2eAk!sB5[I@5m$Or_+:#^`M#L%M<N-pt<Pb12Fg9[i/pFF<YP+R!(P(2^.JsoUF;5fOb),s^X*j.J*RXD0ei*5&U:tW+DQEP7RE^d>lj&P$XtBA1Z>4S[rf) o;'?;E>+$k38#JNNke[RO*!XBKMq$RaMB%a#]FgtKLWndl3d"OF;/OTY'2a56Q.TpOnBT'c#52Po"N jpRV YaZd6\4/R0E/ZY8@G*epS7ja&_M4W&OI=UjQFmR:]kma"f_3CrNdepp6O_-k&+Vg4-A)H/d,@ZZ;41X]XB`;3))Af12R8d%nqj"9g"%XJiKUWM3'l`:-9<(cc8/OAUK,G[f^iOMoI>t.At&n[(+Q0@pq; l4ket*a10XhApT`(0H@Mtp:UP=5M<$SJFm%r!/J+2h4?:e36P[]54Ajm^#+*R*=eW19S[$&(psd<P*&C`"/$QTi$qhqjKf@,(6sQLg@)OhNOmMU\^q+nfZhESb*&7(B1bOMtI`AgYXJ$[>MZqX.P5:'G5'@\-&NEk+(Ptk/t;b7O"'H<+Mg4*Kh7(<MTi_Gb<A6TL,=NC E"s]eEnolA0-cKcFiqH^\K/pDn^UM[`$;7)X7-=eWXa&l$<5_dI@(,UZ&,-/Q2K+ObSYS(&4C<p]/f2NBa H>c!nZNQ=;i_)3iE:gDgIl;a^f/l/Z!7j$;p16Z4rqp5]8<K6T`5PN:>8j[[>1\?psffh;Yq6DBt7joW#+T0E$b:i]L?&"LR7`(N1cS^a57>O-!LOG^M'bH3VqY];-:dCetc4UIM`ciab,_Ek18dYU"g';'"HKbF ;>fLTPi7@sQ-&er``/-X5C/A.G%q:e/I(I]dWTV/BAj\HjVtZ>aO,gi#o=(;1D%tds3@\T;ZDC^qLV:Wl[N9i'=VWD!j9P0hBZmE>1dDqS][[ABO)H(%E:'R][551":,UD$*U^`lU&9WD=@eD!]R74X#qo']*FEO3tkId2eEb"=cea?@04l ME%QZ7oji'B/l`r+.F>9@/c@dh:W]E9T?e2S<Yo,Gp%U$2CM=.>?W"i>l/F;f9jF9/do%]S1-9Y:sn[8B.!ss^bHB7[9>TQt_n7W]2f&IK3KZBLAR5M`-:SQG=Nm!^2O=oXjQmN**Bbg82=,?%1 ZRb;4:-tt1RM5T`%)5X6Yd'3`0c"UT$-$$ ipE;cs9ng;KA&- 2 p\0#/YI/D2S=p7qF%W8kD9.T&E>4[d_;s=(*H>p,ReVW<d[h#+*4&'<fdN/HdeK`ji'?#D@jRI*dG;m&"=]3H&EJ;*^i O9:s!CO_*!IaZ]n$7ZB_1jI(s!oEacM2KkIj:oP.Ben]O8Gan,JDmr"c;<p-^0phslSp"8lFX%XOrYN\1>nLW-U)X!.A.pI'=Zf*SapAW5/$+fak%QqgRL`_U"CA!@LAAfjE-Cj>A,`<?m+,,W3XOVQr[kTQHY>@1$PE:<hYtW[p$`M#r*Oe2U1p"h 3j;Sp9"p3gY3D-Nr$GEk('G5_Rf,V>LSf!'FjLjR;;g%j?P0?LiC!fU[AY>TQnp6[*!Pf60],b_%DI/%`CqMgDp[?3;P`M#8U2BRAm_^PQ_8,cFB!O8*ng9o#^::F^AU_JTUH(IiNs3]a9V;01;<7Bf-WaN`)F^"Be0RQ7<nM8f`WeW7OlK9,LFD?A)jr7Ji_G_R5DCbgAP(JHf[c>LX)oAFlTL9krLkY`V;_8qOj>k#1;Qg=@sNr)I&/gAgX lVbhF.FCEGo2ChEgGH0:&@TBik&Tb7[``d7T>L7-_%:Xf2n.ldtlj>@Bd= j&N[<HNAi1gI]OgCY!+MblZ_mI&98MP-E'0ToVf(NZ[2m+.<SN[LUsQS8bXcA;8%TA\I,:`k,&1LVA6dTCE<AVesCQp`dF8$ ) 7q5(`K>s[R@5cQA*o^WAB:%A=kbN?pS14eFM#Y3Ls[b](spXdn>7[1"(8XY91\g"8jdkQ_)P[FE`O9A?,!ZGs&l8>%1%6EMTA_qEqsEZcH!ic:.=[IP^7/q0"JX<n-A5nZdH0gr(`sTAQli!se0CJ,EY/,0=3a5q<PKSoe8Nl%HfarS(8LJC9@RgcAf];$7l0jD<Vn#VNH?tE"[_B =7S9e-0 Wc\#MJi,8Ose^;i<EQ`-qA>Id2 8Hs+2AtK5l5ZW^%h8([?A38LIPT`PAshLUs,''dC.6i0j^l.'U%9Z!"YEal*$6s2(N$nM21&T')KNc];JP,K1AZ&HgQ5g0i<H*&1UYcE2%M(".phj%9E3sgKQj'A0lU5i1@4>;@Alof@02jOWXa(mdOktRImheaBTF"+_G9S$6RSn';FoDmm?QeSKHkA7.DfA]b.Fi1k<YM&OA/(pOL3AK&<natm2/qhjT@D""tnT!+S&^eI#74tY_6ASAaIN-fMR[e#7SP1icW5_/MmKZ'2?.S4U&'EmM^qn5K;&+tA:SX.m6K(Ts6jEn4[YNB7,IN44'tVRH4ATc0/cUch;:4RIIZ"t$jRq7"[@mSkpqC,^E0pDC^8`%Phc6@)<+C`dVtPA,j*Kf\QeY"(BYU`%^]5"LpL].bZ5j2&gT_Tp2MB.q%aV$PRl5eH9eon9S[3%+E,,o.Y:,g1C oQe_.tqJH:T,"VL6=WTKB0 \)C>1LBMXk'>K&Al..qA1daLmIQAgD =bl(WW rKQ^kg!0_HL08,_&coL3"L)8Zp(cm;K#J5-)#V*._78W_AR($ZKHP4-:fPm0@/k1!8^[BXSoQD!o:Yrn2"2T^"Jb9s=UDLX5W@Nr@'SmXd'1c'VspfP#7b4@btdUBJN3!^Ye@n\;)Sf#HeATK9mrVQ;='!-::"IJk[^1n2Akm>Gi,e,aIAZ783dI&Ns93`,i)Ml@%l^Z J-sqj"b*a$$9Wj?h)>a6(0a4@^a`NoF>EA2Z"%6<RnnkG*bZpe^^Kmon;e?a9sWl$O1_)AKS0t_c=Ye$TYTM8HHQD<&O>O@'9%.2PaXIDk-MB^h`RTrFG"Ki-]4k,[2kB63/da"r,g)*cH;HC-daTZ_,Bk] W:? ?`,e2`=TCLJ@knRUkoM*i`_Ah9PKTTIatn#:pp0;T_kfn+$c?Mq\S$_^Yj/?fOK^&Q$-94\0iF%'8D/C*3@@Xf<aQ?:f \JKB8lb(1mk/tCA$UK@mZiIL%OMg[6fD5UTM\3KXYTI3hV+%[AheA=3DP j-)h7qo7 0.P;\)p1DE95)r"`@M\&/^lS<Q"NEn>cGZ;K]E*)T%llg]b^SLSt Vrdt4)Q>?k0SQQ6:Xeg@pm0]1>,TnkC s&LKP5n:EHp2")g='d@s)2;-Y(Ud!A-'AE4R dVZL>MT)q4<'9Ymfs*9?Q`d)b2H]Om:]r+Kn`gt"_>:)t7!GA;&6'h'-.\lr'erd(b?#9?9Yk?T%;AXE?=09Y[DSUh4)8HltINi0J#b]Bdt"k,Fb ]NBR2/8_AtKDN06M,(Q*0IVE:pG[%)2l@A`(R$%8frtGF^i!6c*[)*Mc%?+6q[0@Zq=V^]`#/U&BT_'6FD*mS,AHh["]`N(QVo'7Z-f,1_^s&)nG!,A4j53m`k,%Yl0i]A2S<lVjH_)@P*l4Drso-5,1HPtCnP7L<:1-\M>6"DHDUN_,kR.66#dN];A$\_ggOGpVsS"p9]cN)rL#C511GK*j9H(K(0dqSJ]Gdb5A!Gmn+hFB2]jA>'@8iscCUTqIJ@^Sk-GT'pR9<P_Wql2^,(O&'An3Qid<k,/PQA@ 2nUm9,%OOK=?TT 6Ei4 KsKVI[H4t\0 cs+IqfMO)U/PAJc<A>Ahi-0VS=^@dbe%;!On(OYba(j[*dJ>5b16&<;>3DZ2G L0,]6V$0)Q(fK9o6g&439WHT?C/W&Mkc&&&7g2V/Kq"_LqJeGEJA,U%Z`^$eT reZ34p^FaPAKF>]3RJZ_,)>=r\E1 I-K)$b+ -@[&ZT,mc0b;C4A@T3,]Q97>AC% ZHN b"f8D *a(! YCXb+^Q8j4V6+*:<]j+De"ZeR(fL(/@Njn5'&<Um%&YssN+Z@_\K:cBZoh%.pLKn0Ho>JG`?@f.$AL%Ht-:iLUCa*hl17k([ZY\MB kDRh#JS]J.jt@S?LBh0SVG`AN.?!gqAH,4DjnOmK&iG#[P2X"H!fb(35A[cr$HLcY`cp^Sd_7N[]@o2i9,N9Z7:STafZ@E>,"oXL>W""!IsW`n'9%>O9 .]!-O``>qCK02AHnPOA</-Ql`?=`e>YDK:f][ZX;^E.mF-#JpSNM45q;7Npc;JsgQVtD@dnT9n,SN4Z_8V3ML .Bd1brK:0]9[-'!?gqOl^$g9#<IWS_Ftih4UP1S:e3D`&2*KVC=ae#m5qe>he@F/#0AFi44-GFOmX;1Y:ot=40rm%G)rZAp*;A#$]bXcR)FL.ot!/ HGcOY6eboK>T;c`Q@YZ2)E95k0(Q]Qk2mG!lA;3=?"f+s>Z9gOJe:`%EA]IsP^bL%a0'Qns/V/oqN*X\A*SWV1 7H.;U)V,f?6l26J)'r9%S8DHKYBSA'6>d#Y5bHErPHIH)h ; dcmLG9=WNP^dV2+N;./c>/.'Yg,G!Z ^"c6S3H+AJWf =i\^<knMeM?pB_D;qdh5fq^[ ?H-5Rk[OE6XOZJIFDW_bsc$d+(fGb&32K],qn3L,WIrs?9=/*tX*A8)t @$(OZ1=.g'PoJ4$h!jLi$8qjgG&TO*:TJn2(IqcWXjaY@LYbT*Bi#s&.\$^XgkpdF+o3<N%X+6T!WEn.s_Hnb_bo_>@CX$IFJnC(,AkAGfY;jttXd;0K+,#j 74DomGDbk'5OeaYs;*0R0b]$N4L+#f"s?8$`t9;OG%Ya@kQfAANhdLr=qRH:2e"T)/b`Bjg(gD( 6Pc+^[&VdH@`B>"qJ8NAa]8A@t%qd>[l]RHQ]pZp2\In6]5?]l%j`&A)5r2IQM:;Y9rN+PP*lL`ZCP_@tq:r%T?B_$39l r\hehLm38iF,_7?jQ"=.\Hs^+nEs0Jp3[d.3mjc1]OH#`oM#1ikiAL`]8p&>Y>.AKBB4KGA2r%(Z0K1\QW7!X- =.BLhN=d3as/*B^#MY7W>5hH,55H1,tMY9]Cs?H4/6da 2JSJa/t.(Ms+LPPirD0MXP0!NF6 =`!+`A`RfU,&d3Q+lK`,e)6$IcpA]r0mMfncKc[1WetLAXiV7]U0AHB3/"nBr'kYZU_/3:joQ-*(OE.2,@L`K cM;WT3sF>(s3=-i`o!-XdtHROf@RQK</!?qD&n$-jMR,\lSTen a]8`GT_jX!+Ed+Dol3L=!A^&ef',Z]pm1`t'<5J!0UP0BehNE_0Xi@qjBF$#on10Z2cW?JW]EC b?QZVHjIbs>dIh<+\b,GY!d$r.8ho5%KDtFok?`?bP6:T+D?(qj_=htP0r<Do$ Zk8%:.!0QRS9c%co@b$"\dEYhQms7,>*[6B'WEt"m',f^<\`noMW!_e!c3Gq")i?spm/#Yp BAQ2jmc9G "]FA`L@8h<aRV/se"2iUcA<?e).bMNmnQQUR9?bAc5>2aSZ9X!Ym`fKQg%ViTALaF&-3%q/BE(s/%`A7q_lf8DJrfgT,'o_(dKBR7j=o'G2Y6"O`<-OCWTGRV/=f8HPB:d_/tg)@'" DjV?mI%[4fE^MtHE8eeH"h2pS./3#@Rt0%T"-"X'aLPgmk&1WC7^$e,Sq%Zm;%.+=c)2MMg2@k1JfBNFJO20-$D=@abC+kKf5`\-g9L:tl<SI"+ZGIH$\D_`2%jb=\G9#L@"LtTjSP5%t>l4(mq\6X9jH(?dUVK#t>9J]H0?EtAO&:BW_pV(mCX1qoP1&ArT[oU_NWphc14NNs60UO9]gbTH` >n!IH%IO*(FaeHm-ePb(>t<i1$,V*-/DnjWTK50\H\g.:ZCnLI4e[j8&AOd$2N:W(ejK5^9#q3EF8@5Yr#*jIV/T=#n*iDS2at#GX]hrpNYi+d RpXW30Z%":PKTG,4%Oe"tp1(LmY#>'beip%>6(FHP-)+JGN`C>D4ffmfcYsWcmJXBpOVON4``0Q SHQG?@!cW/hoZ ']MH'^0T!5"(i)h]Y\(9FTfJV:4Q&ac[Z*f:,@(pL'T@0:ooO73t*S.S R?@3gc+=bcCM%M[$P:K<;o$J='rX9Sr h(fI^<TnMj6)h +1ph7O_jnp?sn!5OoLBUT(f`ICL0jFGm7LOhSRecX[UU?B3Zjl'K$?,V2Mn?2`[X!4n6Z/1st^F*\G,Y5:SNHYpGcJA(#'RB_$gW5gV-\c2F$I4P/+< [$j=j1o=r#'i#!RgPKn<W>fR\'E/#][^@Y$OZBe;-88NUnS._L,N\$5cs atk#T3A$ffIU;''(A$%rrsBGD;)f.Ar(>3/\BQr\Tp@LX^_71-\)@q"5U+dG\m45Z[RWI,RY]=UE=nSh4\4*(F4K?&Y-XSFAP-irK]<">lYQ-?o.j_N5>#L:^aaA?%Vn8VQ3'@m*sF#2Cr^CNV!f"S_WMCICaMih:_?p]%MLTPB6C3o^&L!<13?gm_qY)o9;2hGRSJoD;Dt-#Yn#e;<1=OL7LD74Srmp5&UAq(t2;"bQ/\5n.@$kZ5VR*<d\hC3%kp^M]JHpV3Q@,QCs$Wn!bo%d&H@!eY\e%pIlAi+J$tPeo4"M5hT/:NVP @nkMY9n  E[Nl^"p6P*R&VU&AOtBA66ohb4\f(9jj\fI0MUm? i%\ZJ-$Ea"W0X,lm>(Pd -Uq%T`9j[S4J@*g14fA-LLted[D.m7f?"#0olA3Q8%m4KIoI("&Ks,.d@5.m%7S.D*M"XSC<iUQRKUHa%e%8LWNS#]I6]](9""8pJL'd'ba\l%9Rcq%n^hZ6'=pF7^rYsoBnGcg_T+MFV<k0-h'*YjT[-#DfD/L`6g<e#;Y,9mb>qp<4M^M?<`Dt9NU^!=UXV*Y7'P=jA.:-^ _hP5oYKAgO/-VA];+43=cTaOg"(G`@$6M-J*C2.jPo9)Z%IOq7K'5",U#XK(WTc:D*s0oSK%(pEl.Do3Qof+P9A"Gs &o-20FR_;G3Ab:KV!q>'JBX,2'^>->L`,,8odNI"\<&PEk%E99kZB'#\R:.bgbgUj5**>=NE ok7F-:0.&DXk1Y_h(3SI4ki"GTe@%(rc=t4>jk#o!@rTC97+bJtVnc^[*tVpW*6NIZC* ;kAf*))ghWh+T`47Y^6h?s<P*0ds(h*"8Cd$ ()W&_95S)U9>t\MZHTgTBZ01;3;TQIMbjC\PA]r0$D8Oj3>n=O ?N\)E e>a@\+AS77?%N1K[qjLVAgob#IUGIFa8lQ%tCBIYXCjf?iH.5]j5+*!k&DsG_E_H+Z"(PJ$+l'PRmA XO>5BpmN(MqiNP*A,@?9Wi44tbi4'0'W55LNmM2AZfU;DGU^l,g-XENS,rh"ZU$E3\=S&DAEm9r;i<FbX]ZVKNDr!`l703Cpq=jl^0;"8CIPp>K^N-]cF?I>n*ZLbA`2<5[7KBDLZlA1r1%ls9R;oNZJ.9R,]eQCnhBnF)bUL&L.?RYSbFONNqAka]OVF*#biD75m(&]@k6d5%dNiCs#NN=e\=^itmKL*UX )q\j=hb'J=.s`QV2hFD1K r(oZ(lmdXR7kK9`j?MN7,ghZ`<YQ,TtB7r7Xa:JYSbiCrXN8\iL a@gA[*!aE-e6pQ3o_ef2&g^Sk7'QH5cs<ae2%L8-`g:t^sG_B+g!92W*DBZ\:@1/47O]Sqk:kORW0F%A^<;W^/A%bkjW \&:kEh#7)=+b\kcCf\=(JII#_JN:=K r<N'^g0a_rc)B'F1B2%1OdkoF; TbAF]18 UK#TW>r0N:_>A Z#kS(V*C Y7bAeYGTpK%;tl6P!>Q1W!SKT\FS8hq4MV*:leK^H;N>pFkI0)F9K7MZ1Jpi8Mp2T\_(sjO!b-Eq2r1:[4%3V%gC)pmecS,cs0GP(/(l?X260AgGL9:&)hbAUX0q7Wl!Vk9$D3.X8#,COLJ^a"B.KbmT.*%eJLl'HM'i81@)>>qfq"HJ4IZ*e[&CF_FgnM]WT%;Y.H2br/ir><arAedo2\$3HdJ9I]8T8jm(88W9HPRp?rG4cs]g(*50e;0ZX?HdU] r.+TAAPXo,3DCZ)VsRB*>G-]="3TJ]/Z9&1i5a_DArr$d6ST?+ 606gF.trN;AQg?2TXfUmfNE0P*Tt&AGpK"*,,]\k':dVcTY:GPXXW=6\cf>C$F@-KJ$B6n2ZR6U tYkl-'1D\A<l=2E9TBINsjp1(&tsc>Z $"%O+e(QUcGiXAO8JtZ8 !K-O]NEbc&R*G)_:D^nfU7p->OA(Td05<Sj/%;M!dn sBZ=NFGQ8*'M#7]@&gG?sZ[j@m>*?\`A#D1&$Z,L7 3Qh9i)b=L737;!*a8$%j?Snd3\tr>gd\5tXUWs$!aB*-67+&l*TL/`G;c:^\L j=pV5:[,pE`tKNIm:"r%`b&^61IJ)$2Af86;=AFkQ'4H<`L/S'X.3[?VXa7@WiOQl'oV?Eg"j;m4Zj@$][-.4Ak+3DqADc8+<9rZ+h/%:6+OQ=4-(^Om*s"6hnAW_3tb=?H5>"f5k$bD]a1ULOWdf7I$/j6YeaFq+5rrJXWkAGL!;Cq.P&KD@IJR1 ah[nI- kM"l5=bj 3O4F%P8s\M4T:#aZB 0"dgA.V3*9L[n s>F+RW35/TO<.5iX!P;X@OVeZC=p&]LO1P!aGRn\!MiYF>n`ORkFbOenKAD!FKg.RNAl2jiE t;,;,i#k]n;SD$c+1b9'j471C!/0I RFm?:h,Ac_Pl8orLp4&A@oUX65JFZ7,;?$ZF$c)!Li9Lh/bLr]gCRKA#U)ZiE>g?jIrAR-d^Jom7ON9Y?^kaokCpR'QhF8M86-4g<TF8\clNT <WI3-Hd(;VkLg\V^AM;>=K?Y',qiqE&3F4,T VU8f`K57\j@!sO(Lbrm$c`GYK%Q$fn0P/,telXc->@[Bn!UB+l')=]A54'PjY3$-4870FO^0+/8_BDHd.2^4.`(/I.dOY98T0NA>)t^h;5*:+KJ-6#@`=B/t@hHR#p^m&k!;4K9B^4FMr &70R(ErANQ^7U 7E123;pZtT+72f69F>4;HDg`kAg<Pp0IBJU1]+cpM9Pl,Ae8X5Fo[Z9[r(hEd(>hLH@: slgm+_4Q@. =qXge<."NP!0A5]frD]s%/;TTUDfZ]*\p\c"HW$=&-#,MA_VE".&+o%3*P+O?E$oE>2o/&R.ce-ST6kE<U.G^PP<pHF?@OXAa&\4'.+%!%4G/?"IYF<@JM+WBRS1$"f;tLdK"c;a@,pAX[bc^p915SX a8,'ksR3:m'k9CE<1ltZ*"]4gmg]`ZFQkF,6CV7(-Q3%:==$$Md>ZG#;,8:E?b<`3QWMa>!X6NOT5CtIc9D9\h&YbD%0WAPh@#3KK;UgB'XOp0SP9/6T"mA`>CaBeebX4eCKBWtOc"'HjS6[.*YMZ5'm'R)_88g?P.cEnRBb+C7q>Q@TVX]a-DfE,;Z4n[0ZV:TGQF4>cc\IeJAlhNJ3`m4A.4;fOtMI]E/1C,<s+n.)bGHMoFpA^BZ/Q[L_@(2ddD2<Z9!Y*K@&_L0N9icXqW:ZK`RPEo2TM-14I&)d%)DJdpF3kFGgP[isS/glO7>%>UrO,spXDX^7pZe/B5@='S7ChLD1DiY]U6"VJ3"pUPJC!`A"2Whb4]c:.]4B3"#O+Z*4*%aJ8BkP+7E/jDcH!naD+iDmS`3QS%OBM>Q'cZAaZ!a#Hh?A(=H%k!;]/:AnH5o&JrV8.sZ%c=S0)oA-lG[4_Y2TT/KW@:^gtt$eq?q]R?)ZEC`Y!^O&/k,I?A3:]<`2:As186Y0PPBfDm34gtctp$8@F;BVd(,4h4[QAY-tH!TA*nY []?mQMIe^R"jrNMqs*?KF$q*00tT.l7t0M9;S>]MOf=]62.RG`arrBH.`(ZP5&3POn%=,aSEW >Cb&Zq51r/^MqQeX!a4U`H2]$$4OhKag8jdpk-JALjcp8M^J7*ga7J0!jTBPa AfR1_1aD"4@CUe9) k#T(*3WG?j2%-m8$3>.A02XQ$(P3>IC7AL:kO$7kJ'pdWi]A-)tlA&3 9dh@ L-!oa0r%7fg4i<\TejfAKt?FVHV9b&3Kc;fmBi`TF9f'\Sqo-J%`F8,;-&i3=B+F+>qWglV;!\T52SeM0X^D7nYjm<@1i4]j/X2Sj[L[5-8]r*^h2_Ega6##!<mVqc*E,g_RkN%ZjAJ(i]4:n4A>9Jpr`a%>+1RDg/9"G"@><RfJPUaE5^jsW+ds'QIDSaiYPc@mn><97=!nW>cg,r=HP2j:8Z%bM,3)&^R+=dqhRmLgL*p%Qd4cDnTJA_\kT@E=_^t<3 TbBON/N4p+ohs=)8d0AT&[[GU6rQ)U=@AS3!O.:ae%$^a:eF$E-B*)NC+I]RMb(OP:DcGh$/kmKC^Fa*(aQUa%<m.*8<;Rj3r:,t5e:\ghMEa-c)o DZ\fsc\/#Ue/Q pO]j?A=BILDDJ,]>aIhe@fTj`SO3p2hf^cXp],Pr+?^N?qi7.+PdZ.Mc7gKP".^,^M^>)Tkg,15F*OZebWqLVtmjP_'tX "dJgUs^=('a99?t%3is^Xc=b?$+R.TVfX7LQ_4ZsSs* V<E%#6d69WI>:bQRrr<="DSFR;h[*=Aj4j:3:3:&n$LA'="tGLIqr>Ho4k0[/k;$_abW0=ne4`NlXo1!VN.sA'5'eZ&;%9kq)#s!IAfo(KD+?ds*nYSVcOrhn%PFM%hQ*Zh&ArDNHq3p7TS&Ans>Z:;<OeJeGf+HmI69$,)(e_-TEbl$-,>@:a3:8-T#rQfUdad/nX&B/1%4SK\2H[]k(42sTnPa-om6ofNB52NMl('Wf\R;G!h^WjmtRS%`R's ?A^KUf<>E)aHs)mUo/U_-,1A(MS]C,0,3Hkk6QKWYOCBhMp"o.T/mi]3Xd8e9%296>X3>Kd):Fr41"T::Fg&<ht[WAFrshW4"bL+TmSsj."XRSW9gV:hBk&/'c.K_&G8tf.$o(W^'EjdR=m>k#S'JBX4p`d+5[V'T!rHTV!fbSRST$kr8^*s_4,n_HRXCOUb:Qh<:f.^A*6T@nb!eiPa$77Q;:-2lE=FC9"Qg<9>8API'1:-B=\_X=)8'UrRg<?mj%fRb7aE/'Q_TmNBEdX"G)KE3&(4C:]_$c (\83qc]!V\j`(B:]3LfAD!nq(0"YfM$i'(l:4tWSane@,_9V<1Ld">U"'4>V7Jr9k+lPGT*tZ((i.OfYj_N\PPQHXA23G5pC6NgFAYJs0#+'Ha`1hVf/>td- ]8T[c:qm^IkKp1*!c1)m1lYmKj@V"GcI?^ss;ZJAk%SnNfbPPI^>t:Bh-BaWK.l>W!ChHrtKEZ>ee_\MC^\C6]<:C$]Aa=PnMq(l%LsV`b&A/?iCoX;<jamM-"`(.*&;8'Ub0pM6Nc.Ua_F)ic9FIE,,1F1X-Fha"i#'@kO9P"X[I!oAXIDj)Z5Y0?T]PBb?HhAWgon;\ep1P]kO70_:C-6@%`)9N&2iF"Y>oXsj2Eh-NEX_'"$p`1SsA+Y;@/U3R"4TmTtRS1PX2-N(p(tE%E[!f54H X:=RX]04.tMH>kd"_qmXao4"PhQN [)2E3X5^KFGUs^*6nbXPPB>`*m_X"bPRr_j09<9&MUXsHee&m=pR[d4; GIhr<dM2n_H.@0eemZa*)amJ-tf7fY#ce/^ZiMOYQaI9q4`fHd\5ANs_,qsG<#`+l";l<1B*<"AC*SiJ`ljgt#T*_GXk)&(5FTY9`XLZ`p:h_hYhH-2YaIl A_fN+aBekG<`,nXK.9Z_WB!A&.7SjG`9@,`A7^d`9p'eFU8[H!9eHk;$#Vc1Fm&!DegN(W^FY4k(.mL;sFAZ4n%50%5SQgto935ehGtr=8@&e*W7Xb:2@ZZ!rV*34+YSMDFf8=>[^A-m5:MN 0BfKb"V.72m`$R[rbZAk)o r%KqQXP;kG6AS9oUk`rt#i\T4@'EhXWqDk)s2%VVKYrf]>lZf;4CrH&LMT-4_(AX"amL?UeerlB<NofC!2m4A5It^qM2.Obr7FQX ahmRtsr[gh[ c=knp)htn]=!B(q>:f&+"ko?:K e4m2!=LI55i;jg,-;NXPU3gAhbO_7Ga$,1FG<)lKK27]2468]I9NKi>c"&B4E%Ga!qWcc>A0Rl\EtN!dkfkBM@.+dS^`+nEAiaE jP^7"k5do[g*R"eZ!NM1_oeq+?E95+SRZbZW#mq/-p#jL65sA[TfB+V_+8YpG%CjS/R=eX]F@hf-H8cARJP-s2/A'A[s%+% _d_roE_7!J] qm>F1&7,=@0c%0o6NGP%BA*Fc9@jmKL_"Y9PSG"'FJs#Ki+T'cSb2 8ZA?:kYUQ[UTY_Cl)%9.'YjB c%[-c0/h!S>fOFV`7pFcXEo$/.k^JA:XAPqg@\ ?tl A(;C,llcO%HbKGJiM7);(PK#*6"df'QK@7>%_ils n(J=\(=2mLPK4#@.k_RRKX8qAeoR%-'4(#cF!Aa9^kiF8gInAM@1J#cHN)D]eUSh%.U=4K#<:R;]ApDS!N/UAA[@KPPQG?j'-5HoMK"a"P+=6E\#a)\/'$XH:f9AK6lAPk=HH6)&a=,[<c_('E$,f5CL2kVGUYD<;/V_l>L\[00iK8Jbgj-lqA2jX#fp^Vn+D"'fWg%pGM0L;eLW-]]k5a/fhh,#7`75hhR/a*L0;3pQ7P8$+le8YQ0$lGoObrc=N@fA`_m`^S-C-:e"!/GZ@"g2;(+*7AJ*K8E''4_Bh(K0;,)3, g't>enZq/"VbTGnJ.r`9Z<eViH-Y91GeV^n0VmM 5,NL -ZHCAEAilhWW9W\e])(#r:W<A.4rh6kgMj`9cI<@lp')TmB*Q% `L8%^!bc&W#@#*-iT5</f?nmFWLVAciR"Zd44T'E12K2J\$5*LSUoP-ch9PbP;1 X@=4>m\P@MV2Pg6b82'$X5%C*ccs,o8>G'2Vnmrf%7- Y<^>TL#be(V*ALae<3r30r\^=a#^o!OA"@F9=CAgO^nUC+*_%b:VZ<0ka>XB#,t;4]S'Lf[3j)7,Yl=TrK KmJ1@%QebB.J7 ^F=g)"QP,LH>45bAF*l @Jh<g4+[7M.G1SPE/nTA6PfmA'GWU?XXkL0)pMo5Z<Ap=Oc@`A;5:)^_M`#"gi=@sN2o(:L&6<=)iLhrp1[E_=1=tf^-Mm^?>!ObUpj;;eH](mN#gAiYNW5VrcfCc6GV*aGnPh'S@0@0;K\it@r`lsn`hWXa!mQ"Km gtp-:^;>O-4kHE`*MrSq)F^0RI<8mBbo$=0e%;)YT&2+?(!jjaT=2Qh+nr=#078':9DA9m7 jH3gh5YM\]XBls%gibpM;bm(T)ResJ$LGs;W`!GNs7R387UL3@CoMX^,BaUT-Xaj'6$,30i/VlT08Ftl^R M>`ltK`/?.N,MLGN]?T:<Hl):)U/dG@X<UYae!:lB`4X3S'U:4]ZUDYK\aFQh0jH)1fj>:Ap'k16(AM-KPd3]KQ%/=eGFLi`UUr\:?>A+JMa=C^?&(5GchU!AG]GlO<`(qinQg/2t7;E[Q4>8^(A[>$MNj4=/K;S53Kj1_[(7%aL$.t^XltDja#<0,.#b0lYG%$9SAALMIcSoa7Gl:`G#W)4\mH5"MH+WL7FF0EtR6K'.^0Qbbb6pkdB<bC87nI-"1?QKmA>>]6/Fk#`MPG5BC,V6BfQA`gpEEs(*M?rl>-RI"'b=jEAtl9J5Fhr,BS?EN5#83F<").-s^i<G(pF/=BbrD.o;hgGmbSC"0Yog9B8i,??ddI#/FD(WZkI^?(G6,$2[nE?QF+THAUsc*SRE2QaA;Z&'d5QFW/F/K*HB]%VeNK6_'XQ>">GXVQC] 0cAL$p01/sK\ !gR23"/_nA g5\"'Q,(<?VG_`@;k5$6-Zd/'99ilnA6nrN=!O$]X>mW@hMd`<n09-A?-?HTmGjP`trdSt`>;-S^A\d_d=PP2AP'VC48n=&[(jA\Y0DRiID>6$hAN@ M9=;=Do7=MipjH\DMefN/7Yt/h/e]AZr'6XQhPVL5b/.?Y;S6h-cMK`-Znal'^>.E>BSW"<*!)h]TgMVSBeV<-0X&olt*fk^3T1otA@N>Gb,Hm*"MY/%e5[j6g5PFUo+.qO$i_1=s8mf]I4dRSW:4da'CG%"H@m$<_cC>*#@V\k<CUit>TW6+6i1&Fqa5Kmd0LQH7[EV``MZnlDDoTjQfU[eXQ3RN=SF+pVc5o1Q:EH4gojn2$Eo&Mf3D._j(`fcM;'gQI>6Ec&qO%55jKt^'5AF=]$a".(A_Q(p_KhMeD<f;/Mm;s4kmsXN\Jk"h["%DKX5g'N=k$,IkK+B)`c[VUrmUM C%$82XHqG& f]D'"T#<8qch#mNZ%"JB@r-((;!HJ!YBjZs29?0g[;U+WUn`LE^r1YY& /?U7i _.(>(mJ2H<pKn=kDM]_Ya\lY8.cH,5<V^6]b[&+tp_Tgn!+MZY,]EEo(k%(gX 1\bV*,U.&>)=gm$L"`ED1hb.FY`kR2>FcS6Wl4(7*=F9l,4`TKEB=(5Rr5<A5?;KC2Dd\]K!rY1)jtA3<$T)!N#;g\fh%.BisR3AhUHfpO52X(t3fMthhYd,br9O[E<EF*I?U/)T/Aj\"/dC2BQl;3XW+k(Xs]N1be',?T c=XCf s`09F2&Rh)*.4bm+-&-GiNnn+e&ed9X=jC`MSA3j<5_ec?Hd)!cP=f/9`]B@9c)N`imVYZe(pT`4/3Q))UG4%t$&+n(F>"P ?h4$f.@p"gQO[C>n(gPR&l>gU/I;.PU1dF4DO?%^m?AM,aUCGaP@8_>ABJ>/jW)m4,'jUWTFga=LA9A6GnIr[*r9NDRPd,8\S%JV:b?8J"Af8(tf*6LS3^`_sh86/FZD0h'.OXjmoOOipn_5@6EhHgg3Em/k9Z>C< <tl#0:CQi:)8Ld&@XA6LfKJ%E,Y19X>jP;[EN4iLRPa$\="`RLH/7NS?OA /kCX/:Kl6A[qZUrl]dgBfYB^WjFj[Ho&`9:s8a8ik/ ]d-=[6F[*>30 )c0"JQ&!n1pde8*2Ug?0"E>S_1+lL,eV_>16D%8iWW%?^\cbN<Jne<a8:oZ97nO>C?hQ/mNla l;fbt_!NqG!!2rO\=pH  fs`.=XdqGr!AA)H7H0lsAhj;O[I*F^[T"Nj <,8'1F?U\': ;JB=E @L0B&&4E>k^=oC;<jBb9-A5 >(ARWB>RM<J#Kj#"_X?por7Gb@D#hlmUmllT?E%3CJ:6-/$&[j &rc:1%_aM'DTZ;LE&qR6\sr5s2^1.q&hJn#D7?S4rHR,^p9hGbKr*Ga=!QbN;1_7V&ht[PsU1:o?2R9\-emDL,0PR4t?,bH8Btb62>E.iU,nrF G_U-IM!MjZC:5h4?tfT_GFe??DS07>U0RJ(r6&RtPR]GSi ` L!aebX\<#;-L<Z<"_-%tq^sN9]7koQ!c+qT]j* /8l2msVpt:3`q8n+JB9"f*:?YY`1EOO+sFVA:YWaZ.'67e#nMk!mQ;@Q^iV1Rg1nf:N<clPqrQg?/jVTFkQH^1lc/CI`2?5gL16R]b!hna0V"'*EQB#F0$9pX &jS_^E=8can_`DZrl)0J*okpR8j,n1fD="W]4e:?UT33oBSQjO"ga*kO]+dB2=N0#mBRY.%`GR@!?$k4D'6knbs$@4rHKRU2aI6<@O]D$rWEh Z?Z1[3j`g'jH@FR=5S3`.,ejFG!aoIm*?*FYnYs@j/VB_r\h:K)Q'0cRi?JOFtL$("ol[+0E*YMF1_G$E#9QCSP;@*:P8m.'r7O[+EB<MkS>9ZbAR3f\cs0aXj=/I(57(P27:)?i,%[9WC##2I@l/MNB Tq4g^@PX"f1R3`)c(G?AWG9]UUE\G9&s&05dOAJt"nmF7I0rTe\U"eUm;H&]Nb*J ;k2?fLYAetX7.FH;qhPO34e;dq?_6 4W\pQ[-b0]q]f@8rSkAhb,[0)N3t\CM$%I]T@J&1McJ*QQbK_%:tH`2AAGo/nAN_Jms5N<P>f2Y0Qs3k`<9p5Lm<R47O)^$EMj`B=WXIIe"U^qU[83`ACkidXHs?Ci=(Hj,5jl(NM$lXD7W-P\ /"I&J9AEo$!,;YndRW.Qp ]:YCk+RAL_&p4]Mc-?_k^6 Y^`gYe_3S4*q/pq>/C1[N#OGK?42e(7Zg>J%"jT<J7YQ)$f$5f)^!8-1P.cc1E,Jk*"?TQF4HV"i?%aEc/aFk"*.s]^?)]tbC)`iI]R%s")1\j<0Nd)%SanE`(p5QNlg)B#eZ$pSs(GYmJNU-R%Id8W0_.M9dWC!8JE8crR7*U]e&ZTfQs+O4]?p&il6)qA[?G=6iHd;DAm"K&9V!QEJ(/JZp+1G]lX *rla-K#Ji@B1UnaPs/#Fa19 _["Y0K>"LE]pJ&fMAA,Up+Jogd#?A+HI[8m75=C5/g.SpQC3bkVMlA-ne\9Z%^Mb:G_*P>s9*?l'NU_OTLIQ3lgp$`T-AhF?Q8W,]t;j?m^1"C0%L`]9l`WA%A?AEhEqagE]l*c6;AK7HBsZ!O ]!BSebksPB,p7T@Q>5CIsRh[:1Z-*(g,s\I8o;ZFbE[R)QO>&.1fXW;XUG#7,ibDMQh$QA#,cO\pa&G]c*"2!rZ//k<WVRCk)t,9Z1e((-;5r)Spe _"#Af?#9+!X#q0OF-%C;qZ1tB=0LioQ+&W6I.tJH^[]<jUOh(PK$ql=dNAt-Bl<-e=oYcQXi[l4sC4Q,lF`,@)*9&2EpD""Um hh$b9qFYURhlre\2&eYe)LH%F#1$j3d84^!ImdBA%/ccf/<#Y4&eGs\\d9PD%_9gdJ>0Z':#"mt&!)[(./8VSA\X8%teD@itpI6 8lQ/1^_]a cKtH6o%*C=AOC*G+Zh,7K,);9iVc?bm:VAHT?/_*U\mAfAm)oGOA2q!6!QJi4"`hsjR36p4K+LA'pt[J;1tEZTs!h\V>G<,pVpA0B0"D&ckk,QCRX]K2]*BU>8![4U:o7K#F3$H4mI"t?r=%3rj7_I._+CBYa]V_"% n%9cN\)+FH2^cl3mE\=`)OLeEKs0AFIFBlGtd> kT`s4Ql9A@m54)'DpL5J:Qc3:gK/`>1AfWHcA(+6e>P&]9'/<Kib&\h2Cmrh4sA<;&Uk!G'..MVl`DB.808ZiPk_Rab@HcYoJ&Y:or\gXkDl(9d)UA?0%R31"V`qX*?OH$N\KkYGrBU*b.2#lGYRK)g!B(-U3WeS#EM50 aI<XiG4*p6<ANTG4,d2U6'l@ScS-e_%Rq#Yt!T3% @4b0\pE"X37N<FO4ONP[C=$C8RO%%nR]Pcsj!AX>0U`(NpFd)A$MIFPM'ansaARG*TH$dHp@@j$&T+]A75M7)+gbBDI]ACmr"E8kDqA$JqXpEtH3Va?QOY&!A_=/=7p.&c<pLfKgIMNg3L.$i$Q9V68Z6Z&S(P/CDEr^Z)*7jUrk<LU+EVqNG"KJP ^A*bI:4PW<bkBF_dg:#4,I"E@9#Xbn7piL8-[T?GmW/9MSI6/^OZ7h>G5_K8Z?+H$rdV5p?3ATY+.ea+k[8-Q;]!V2\AAc5g24:,^MW?hc= 7tAeqM='\hjSp.?p:O2BF8$TkH>#=6:Ddd9,RDqm61SIQ(+_d4">YB9et:P[4s>6>ZYeL*E415.\=PJ;eWJ+kJ3':`s^H,<t<=gRp$DiS\A$"?*i:3\r/'^b@Aa9%"BY6p1:AMj(EO\0BjrmJg[U$637drO:=m\U/Q+_G \Y@nV]Dth+<[& d`[<5;,Omfo"D*]USf&R[q_bW#W2Gpp3/!AtbL8J5)Mo(7MjTB-<e,git^8m;oA$1/SkGI$#I8ss+1OFKglHp#"FS9Kt/cUq^r?7sA$9/jZZdT0lR*']D#@1=/&k-ASk\mm*bSL\"@PW+M@'gt5qs_@?HW/V%%D=T=bO9OR9aT#*_D4h;5fgGY6&t-sD^]sne/9d0j7+[YIgA4kDUBXM0\C0jC&:Mml'<6h%UA8"2^mCsPWWPn/=jqpX,HK,4Nnq7lHNUeg;bZ^q&"8CV):pf\\tRh;7qa;dh'46Mr7hSJ1.6d/`_9`CrBL;N]Q8QAWk@"\'R8EtF(W)$D:]:h*t)lcXBY#8"M/8'\g9ARNM sA2L-\8JAkAtAYji8Y^7SHNg@aJ]ZqlN'sBa(ANIilhM=sc+2YDZ[q`Ab`08Qbe9C)Fh6V&aD.PWs$hAK4!'I-k'Nc:jAEaEUsU:nPC9t08=Z(9MDJ:`1oG+>/7:iY1=Ek5UK^8<jN\t`-R,ZAb+q(L9f_Q^H2&k-;iB'\:'BV7'TkL9]Zl %)E5s(J\>CnXV[@^ dEACT^^;0^H\GUFTa@oRb-LEcKo33\Zl]0*`lWW4:"SbaDkO5)"+3X_,_!J5c*Qfb%X#Va^6oV_/V^[%IVlYI>r)@,*D%dY;U+B[`\L&J/XBhMg9CrTl7FrJBJ_$sE**8i2ae:E68aGQVX+Om^q\GM6q2F[m[[B6'h"lQ<H>.Xejf,5`H%[Rh Dhg59IPkQg[@UG6l->.YJ0PchkF.jU69ia*L]NBA^MLliPJO*UI&AZNDf%qX?5UXW_1:AOFkC$I$Q6/"RMZ?' K;#b[7IZA:;)'mn9oAc'1"'QM?#!IfT!tq+<\A-51sS^s97deem-.JT]q)7S`(N]6,,j#8G8TCC/M6'tjhLIs!N?7k`F5jsXQ/^YV<1l,#![c`mT6F',B4 ZRb:=m>!<Ff\LnA)'-&4sjK+;;JK:5.,Ip^,PT<?o#,AO'":.q!<kZ_mq0BbmgNr\6&C%P=n?A&EQ;MEH6E O5]A5H5n0M3,T.@Ll]rPZbq]1$mn7MAmLXT!4Rch^Le%G.3WU5dg>Fq;jfLX:;9%tQI]&NDA7U1a8k=)"AY5I-tPH#$tCOEC,->'se."W%R"\hAiVcnMr[[;,RP8#+g;.1TYIEK`&NRt^MoF7f+?>8`k 4Vckq,7.AJ*%U5nZ&1<8B6d 7U#aK^NeRQbBDWDhSD!?q ;AH0#[b/M$0Stjd?p[*rM-PR8I=rT,U(/E;g<I6:)F?N <hCd9BGX,6FA;?GZ Uq M^/2k%iW"FjJ4@\ja`g-2a)H3f`#ZSN) O?9Fccj(3TXS%O'+D1a+TAj_=cd+K6jH38)?cZ[Ftg?<Zn20RQ+*a=L 0PDJ_HZiDR8imqX>P/\@Q$ZO.ec:)bfK^189OANN'iYO=PEe!99Gek=2BAq\ctHAfK"A9B/Qr#"=YSY$N$;OE"Q*Hs<;N_OkJ>U:(BSS[NALge&RO5^]XqCJqGoq5MX)b,eBV,6i]Q)a.GcMpK#E8ZEALQj`VT"nA_VCjINfki8V1#KOMYVs@6*$nt". s^KA<ENce=1.Y,aC8!oMU`,It+ch!r7iNEiFC)pe3,0tVBWbD7q63]Q,TI,-K;\t!]g%:_i\o#_a`<.2Kl.e3+*\@;]AQ[jq,]d-Xdf\Wa[I9Hd\?g6*6m4aJZ: _UII lHOY3%+b&_I]6[%q1Z%daj&^Hcr1Y-mE<X1`& $R!NYjt.CdZXi=fra:MJpSRU;B`LA*\Gp=cA*]cGp^=JS/cUe2'b`-B/i"/cZlC^PmB^d4Df`ID7XmAN@M#%BWJ9[k3-^c+,n.PW.m )OnT*Z7WjTb'q7sJ4*/F!qA7A%:([<nWsR&%4_tFJ<ikBJ%&')Qnb`I/TMD%*+M;eB(`!N?<]6X>(4B-A%>H]`[Z732#CRNFEZX0*XmWcr`V$/#DNc(a. N 9NY&-i!D&]ET:/pB1<o,#!UY+<9jFkq'-Ma"JR]L!HY["<Z8#W*ot*WQ XW`t8''`$9VT>AFsBQZ<s67ITFhYE`m2H1V_V5Pf:IM5o;=8Zf=U,:r:`tU4p'MA?QRnZM"nPj,24).Pf'Y1;3jicjP#9ehm!MiGGbHVajc1A[Y<i8=/+8?WVJ#l/KG:7H*PoXN/BI=S:!U?6_ob,A$tmQ=6to_GX@?7P'JdKBB&CO9V(Bq4<ZL/'d_J+"#CW'b@>0>.U!7f4pAn_rgO8VDWY7ThT)H&3C5:T%@AqIK50Bn2:f' oG]8^$Q7VQ^N=TQPrEm)^IIbDB\oA;AXk&F<Ic7^kj(UaT0E5NX_A!iEYb1'?[TJ=Z`9^!+=Kb)TSIZ`fab>;*rPl=CM6-5fB(ZJ>#l[+(=0[HXjC='iaOHfO"^BFjN7YB'o[,p:Ei%HM7&kZp9"403BHU#?GPN m+r\2j`,T*XL-*d<n/5gnN0).DnQo#ceG,s,6[J@R\1:!d?$=TYKjW;j`Sf0 BX(D\!F e:$U09;k['6d=R HCZ5s8)JHc!3SDSUc5ABSBO6QRQ#:enFr2*jXoc[Zn=H"%Aa,M1+LE"S:4TL(5EV==C_j$pj$`+b6i2+sX12'-qo&K]!^.^US"Y R[t#3r8Wm0g^WndCcle.:6e-2t4=jZ-eKS&CKsU?]@a5U/9W> LabS7 Ke. AtPe+9h5A:rBbLDU&MYk,Qg.V;l30V@jM(ZsZ1.Ic2A[oYE/Hl`8'1s.AOC$* 0q(^1At6LA.7Pb1*'ElHDN"CZknM&pK@+MG;2r8SCWF3g'YRb"!CcY!+$rc]Fs3W,jO'dFE_7Bs;W2`SK&qLYo=?N<A%<VD!nAo[Hj?bAf_A*Af*PpIfE4Qo*9RH)RKTI-gdD6/Rl:G&gGG.\t"DeGGm`.7oJ?#?3>8q8<`CE$>p0h0pk&E8!17`N7?XmSEZ4jA[5]&99$PaNUH(-biCJc.IGrEP*0"Y;J%/Vm*A`mmT2KN)iS%BG 64Qk?@P/a-Oa6f5@0W4*I>"eM`q>X)_-L[\mmm9Zqbd+#rYi&%@\doT:s^nMTa5]].d?< _,r"D(E5HYdk4GdYYbN]h%QZDi7]oc+l&b`Ld>o;]40]V"V.FtPF`(E`nE.]c)=NfAN0<&+cRhi?>[e1/i9m4=RM&<;GW:VYO,$8gb.M;4>_7<+DM+.9/C_[F`DC8iQ<qL#7+sVV[kt$mTKXCNYK5pb'LL)n?r09Ch".]j:A^"=sFOcNA*;d<`K<j,CQpXHLqTUS+?SC(A (CHbJdIl71 -L2)JSJ]8)`*)jZD2apUJAgS@CqF10K=A"d'k^=BYU_rR8DR_H+Q8R`8:I6AD &F@V$ZZMY'KSldU4F[%lL7BagH 1`\,A:BnAMsJq:,Xb9I&Ls #GR":fPSG6)gmj@BU&amZh6Z\q'(]aPDq>@p;:ji-n94iBF-D8o'pO%9%ANb>4($nEgT@k2bGKXUNp=V"Y!ZKfPr3BjT&cOLLA!/Ne`o^+W02Hl3J]_9q)AEqE)MdYf;1tQoA#N`jb"*A2FiY!-E#Y(OArJI+8?#$?Jre6gmP=R#bc>m"/[U0N+l# k.5IN%FrnJ`qIerjU(:D/\ZF5l4`3qYppHj?q9A9M rVrM5M!ng9>Z .$^2Fs Y:tr/AgR_]^6^FRi+8#kss[PBAcW#KGe@m8?PVRq1A#nkFNiT`_QfJtM'M-sBDdDS*D9b57SBX/e4hR+b45<5<+X6i22k3LP`.X1:gOVH>#m<k5'Y2g0010lM%"Cb=]CV70mEA@qCE'ImY:.$JEn=pn`m@CsI8K<KVp 80GVqkLeCNeBE[>jAcq-jU&f$30$II3Jpl76qKf42O7t,s_%NRVQhmQ2Zfh/?bX!S+f6:A2O1OLM4-o+KJrA1d>2B*\F<Jgh24C4*b8P&)[aIZWCo5,%<W)T>@o2g25^LYfP>r,!gB.DbCs>dfAn?p:LZ2V2o+&$XaGhB&MBZhZUH)oXgD6:pn*(0j6K=rH;6??\Qn7Qd-XmqlA.$YE]`54cOA3(["9-*\#t&jd,6eaDA6"D,\2D8"mCM9$S*!YB8A! \<1P9MVXgVjp$V*ELGEY*>2%EXTrDO]^.9m:mmh^nV#,o5)$2.!["@N5PF`:(ljL[^ES^"1V5g,*%\UfYJAlssgLlT-+b%kqJ hifHU<e(8nVIaH1Je4MZh,Ie5lhm7Q=3PcX`q3395hmU;X<<f5An^TseI[C4bZ@g(!a!dcBl<d!+,-2[i*Obo4Xk2b_]sg\">KKTc1`hB&3@Xf/Sj;((5HM%j hd-i>k\4(d\@8t1gY!Q2SF>t*O&Z[?QYdo<mNp.om6&rT>I_K%NG3"-dLTco&-,A\LY9s8SX(K_S86Q[I E0*;!HU-)Nr* d1Dhll_G_O[c6^"3U_6^`O#]8.6lF[1AZ=8&lF-TSp9N^9Cm'rR_dfblH%KZlFl!Q1ZM]`oBi*YA>"dJSO2AU[5/R+`-QI+cl(08btkfVY^$)TL'7W`#,[*Xg%.=sgDW:D-$!97NarD.k"Ya@XWOA:Bjb<B%6I4p7cb6q(*D]0+\Fk%g-2Jfif1pkcs(!ahc.+EQE_e"?=H<`[Tk<ts^=K4bX;5lMB.D%jIGc6AHBc\gUP#:f!89g-%ato` h>TT;Ne>bo=?cSkQ/ n^lSUMn6k^+;@]n`0&"V9tqlOi>9ZCjNr#bof6(12\^@fEqP?F3U!W;6D%WUd[>.SQN@,*j:g;g.hL==7YMI4$[g\8 cW<.:8gQgI$n$=+kT F 0`"e+-mp9Nk52!"srFN-hh9ft2 =[q7YDdknBcb(_??0o] K\+35LFQ+SKUl*f^8G`b1 R4NKb\4Rfj4HN8V7>"Fp-9t`<R*H-*O^2QTEh6ScZ#o<!/D`C>B AnS(7Y/40ohoYI`+!Q[2(s5aSOOQ;0gA5-M_SEF=0O/4FE\!C")m:SC"]@^6=%6,BPGNM2mc'Qf\8<=.sKAM)Cqp="[(7`U(QL*+JjMnVo-d-J"'0,gWsff6[3/<;=j@^Sp? 47p3P<&HU;2G0;G[QhJV.(m8Bi!DA&GXQdZ4>kr9$Q?+%4ho=WMFE&B4Q5=AT6Yr9%?[4]&6<elV0,,p<?H]f5MQ+gkTIDhq$^5PnMgb>[GMo.1o(gj#>W>]m6g:`_DC`B3!kD\-/FO_5/iI+5Ls[p2A_b,8g_aXt^g7ST!"U\<kJ1CS.c*XUDphPBonh4>a&AfN=UF CG^X7I^5/VL&T7pD]A:E0PQg;0R\Hg m&jW:= ;tQ.A6=F=<D-A\V-6Q$\17LMJ6mi<Zb_3btq%QOR>%:02SI"9 ^EZg11\e0=2eF^/ZSZ#h&bje9%.H4q@'p4*YrB4+ .ZbA-CdN\30^(JNk6cKJ\Y"EKH+STS;Z:UVSA1mtd M<Dk@:T<%flQ.\I:2He=A[p'I"?tSSO9.*ded%I]>Xd>*hfa'(A]?p3'?ZNC4p82 fn*eZLC8Kp:)0@mIj2)";9SeqtBB^+42=Nd3Gr^Vns3<-"+WF%7$ge94WS"+%[;^#8Ug%R&spIBgUX/HJ(^K@BJTeFYGI:jdSNX1Qo&a.OgOt70s])D ?S`\<#-[S[W"aQkA0m,FaN+4:_%F +m)a%BHkXB@Y61/0 YsS2>.NlnYZm3&n:rQEVZ'h<4^/M[@$qaXM#X(O7j/_iekr,*7M#><$o:f-3D:mUO2gTK82A5,a.IP&ScI+tIdM$)E%T]WnEooPL&$JqoPY fb^D^&.?\.c:_tm0AE+MB_B.'pnltbInSb[.o8!2)I(XA3_+a?#DA4o[k7]2%,B]I['0eoZ]t,,sXd?D+AGN@-9c/2Q9'p(e:,hh=5JZjZMg6\_.n$#mr9f[-A^kY%@s(@Z [8m24'C!,q)=9*Wk$,,,W:#\?HmrlG];sRATTY!29P`MR3B0Di&b+_h_,jrA0:E54>js>bs%8).Al9_(?6Zn)JLa]S9JIApaomLeo0$sDEe0V0,WVe(WR>Gb5#^J$n+7_c=V$:l([4G*eX=2b3pJ<*j.l<gA2kc.Ai-#A8?.A'\cSZ#&WbG+-l\qPg8m#@CT(dLiHD?P?ONMt8G/P W.V'J:$J/B-4MOL<_eq]r9(/05Pbrmg*,X[KX!\U$600]RTq0Lc8!"f*h7C?[l_q)b2>='>A5OhP\;D1nY%91q)!>*^J0G&1&k#*3Spm^]3r.rHZEki),`@4CI,2>mJ"QXYP&k^nI4K8:G/2Lc!36TC$%:0 AAgP_D']j.)sQ =80_XNqYt?E<K:e`]L6im%"8O6,*RkW?F`U?1(9?'smBAW=5mE( &AA5*V+>#S59#CR`B]rG';Q#tj^LJi'SQ&od9WnN]CrNf&h?n1lMD[nHA>3FV%=J<e8,IY1SfB59^%qcqe`]#@>rh\kj'$&$"YrQ0k$2I:8#.!07cL1#Y8a)K[r:eAXSOJK/aW8WAd8Xd>hWLfrDk_BPN XW4<D/F]R5:Bo2A[X<$(Bf>4E]h:#ai*4)k%*638_nj^;grgpV&H%"4Wti8lG"J@>J+M)#hdtZ0E7<SZ6BWe.^G7Q#mm&l/l2Xm^6F2I2C#<i!pg @Jt+XbXj*l%_h<sT]fm-RTM.Q!%(pI`[5ZV%bbR))5"h!"eMJKRsjYb=T8Al^1ZXioWg]H4_b"-!ttnZWtS@(\%TB:1->^?7`Pac<lPq9!%&5d=LNtAi+qAVt 5?n!g0GgmAtS7Z4Id:g1 AG3("<s8R&i1e_gge1HNEl<_qM_LZNA4L8t2ssC?''$i"QMgQk?Fa><D!0+<).r7Gfm\A%>X48dPYEk<#TPH)$_Q#,d8qi`IA$!ZmYB/rXIS];mFSI6VUAr`om't&oa2g`th1F]?4\G,o%>t3b:4UgqbtAqHbS$#cD8I8k([]Me3Gm@)Fq\H"$V(s1h?>_^Vc;a9]kg+5\)^+MjVj07]=JY6mlk9qZ_:g+Jre%+T fQ.cJ9TK%YG,71(:kV74(Yir%i$,`g#?sM2Q1a<]VZ07FX/A=qb[?1KH!\nSq:kLTXVBmW?WY"c])sTX<\6rGL3FiRU3.sB<60=/,h's3N=>qs5Jit;O?nEam,7757l=6LA-.mGbk3:q]=#oC8S_$c>mGLEio1K^'/h1.=9pBB!9a(-&VgPglM A8c14j1O/'8M\M3UoBUccC8roZbLOep$ND=bjQ(2%.1C"M$L?m$3W\n6K4&7S!g !4,`pEJ4F5jAY$E),dU&jUiiC4><&!rs<)#?JoWcc6IY0qtb9%NF\/p89FWK>WH.Jq&HTdgddl3*T%N]0p !;"2GWLf[O9KWhs(h@Ac[oj5t@$P2M.:A>PfFi;3*Pq?%MX0Lt)hL<j&a/9A3(!>h5 N?D!si%hn_Btp2Qagn>T\hU4gbMiqO1.'gaJ"ioTbn_T+M%hgGPmU$<0Q*4rnt3bqR3)d3kTnN0m\;a^W<#@MtdscIR'HZjl)01*Zae(Viob5FC?O[#UKK^Z']$CjtT8B[O/<Y%6KG-/g6QBZGBd`;;.S;b7s#>p)h4Jm`k=2c*B=R$6EX*8I*[jgd,>)sp:AT\fAZL?G7!fBimJ6+:O(j/4o+CX A")c4B:8=Qk1V-WJ.kDOYK8th[#Rmk&=f4c_f-R#K*?U9%,pNKDeL+SHmbcL(6]Ti<S4;i5NKcJ2&lm$C%?QgT141 iPob=k&RYraoEj [n_gdRDGnc^qW2srXg"s")HY AAD_97N+XYql<$@6]1<?!N*Ys=;?03m</hA@`d?(3[W4>?Ksb@$='?!RMK;IAHH_k R`lWbJP\B;XAH%gWptjHrN['tc6h!$b@ l,?-JU3M\WNQ/M\Z.k"c3m#R@]gP ,WAa/Aj=JCg2egdf cA`_K1nrn40JM+[OFk:<LFV:_r#'XU[aAN#P)S!cLibepIU3[k)sTL8#]r?Y0WgmY+O=nhDZSf)7:s3[TETO(?@ARn&_5^dY$1iBAbW"n;o==]BGCEiRq>2.3J5A<71USti8Hg*%TtdXPj<i6]JVUhsUA,=%PKQ?8tIisP&FWqF^%ig7'i1bM'iI=i!7a*hgnC1tjptFh=A<Z2<C,H-*I*9@% T:@8V8 ph:\1)corNLn'*XG6MZ <U(d/En>4"RH9m<inJGY?t;L:(-J9$iP=GRX%+h]m cdce[HRQ6q3,`nLG(!@RC6>]K?kR<p<ZY-GoH_i_>cZtGHA8ID/I8fRBQj]kB&\mU25<"DTp#G.1N?54i4bX;(kH17'r[7]VUrNnnN<W;*53T\T.qt99Om%KWT1qd*I`b[-f\7F!-[jntYYAMsntXA=>! H$&;>I#oD*&2EKF5.44<GfW@qjT]GoVC>;Y1j0[08Mp[n$+/eg;Xs\sfm\YU+.XcP>?sGfKd3QqC\JRS]ndgoko$'JYSq_/QDfAQK?9fT2BW83L*1E8Hf%"@(]Q Q)2=g47THjh*p(_#o29^/g7`/'==eRQ0[Af0`?&^D]DM\!?:+J X2bG>AiJL#kjd0_1"IB%)P'5o2Vb!s`6ASOobS=GadjR1q:MJAAdtIV:lL8eXF4K;^\iS;f>5fF!TD.tGSJo9qp,]!)YsSn3VsiUtqRq!01`QG he7)E-sBRXgM67Y%rACGSM1EXn\R7$7I__*kr0A8"3F?17i%4?@(lQNCm)acGB;G3I+i3rO)+2"T`J>A8I_B=*Q2:Qk-mdJL^3]8Fj0t0COl^Ln )YtroDj?J_cJ*%".F=Wh$=tO-9p#="/Hr3n<`b)03;iOXGGdDNAEFA/s40<kZt[/dN*7XTj9\m'9>;X@9jrk4=Y>7^V4s4e/]-2%cL`/%H.#C/66h0aNi8W+6BPLi![ n_OcGFibhZ1c=sO^BRO8JrpE8n\15]l3;9Ra?;2X;c=dTZAkjJ`sc@SIAkoBIl$QaJS&,o"B#)(ec*[ehJ&?ksoSn0RGC#ad\@WCe,&pDS0fmJ`KE.iod_8cQYgO6p'-G4?"\-5`_bjs2V)t)3-Cf:SqBh+r_Sl[8FJ+sl:NIOL*E)/P0 9<M_UdfQUpo7=iDoj"?_#e3R^J?)%WMX:pUPZ(IG=UI7g^9AM6<ZA&c1'\k3=P>\H)Z<$c+2fk,<)$+=5UK 3_>]QWK,b3X_k9EI2Zn1.OAJ_+Kjc:.>iLGEUFV#U;DX";n!SG16@3fcP?[6JMlPs"-j0J/:i+1m":VsTss7E-M\Q(VR!;R3#SHSZl%JM Y[@,-(n /;"h]WIIPk+ UMD1P]gobmSKsdM.J4&#P3M9:ErT8%A/A+><`oO<"0=LK;c2Md""n]o_TbOoESkPN>Fr1PdB1j\?5*.V,)RAJWW`@csFaSWE>cT^PO3EL(=K%]=3ld;$fISk^0@4<t-A#,1*/j:V3q ^7j<Z-$)7+"ne#.)Hln2gn3O/rl6K@h[;AXAMM$jTtaT5YYVGCe7_9.Q*mhaKJ-+jNQ<=jeMU0i:a=UlM4q'__]H.PF)rVA"6JPP`!s$b-X5%R"1A&g;)ZAf7Hel]h@X ?JAT??H\XVA!kk;&Bs5c6QB3^BXo3gY'oNkOnOD3St2EZ0gp]!`.%J7CfJH7+pm0$Ns Ocs76g7?9#AnUQZSbR2ZV'+-m*qHS,:I[+?(%_1IRVW9&K_0W /9="$S:dYP_WoOK%tA8r`3hAFlT \:NpC(6]Je*T1QM\V0-J"6q.1AKrJGH-S<pqA00hDVj_\jVO9d$N(.rUsdN'<h]%PqmND(e;+P>0)o,P"W__$Z&Mhg6=.RDj?hB`''9""+8jeI5,O:s4.[ODpEbr7cL8YA#*0l[0orYp?I]ld!98YXkL)XQTDp:?Q"b<(gY-L*I#f,]4l_PO$&o29Jh3)5Bo^FLs`mbH[F@kNNAnIR:aR.E7 !2U9\1RiE+<qBh1bfpZgMXZ,:Y,0,?7jIq"69sP==ksNkrPK2A?)os_K@_ A^8`SYmX,aDr89h2KIrsi?SK0A++,3:=*lo/V]7*%$D^r3Cj>ml!J?ZTWK88Y=gAoqE$[iG^2&#SPp!+_)iRrjUBm&`!e@]b.d^iP2eiVIQgQSWJa]2Ik^mQMPJDO?q?7"ONnK*4$n*jG"/pK*bUBHWoM#\YQ1D1a[t3_s8N"$[!)'ifH Z.DAaiX7e_Ub`\l2a!Jnk0<D@*g/\!,b)s=S:!2odJkf!B_c8(V"]b:E+>'!Tl]1OpfsO]jdcX?PbXMN4#_DE=,V<]kWX\ba6#V:GRH,*>BYKS!0ome=EtQ^Q^#[kW]&%e5bkeSY:p&'=J+EW=iRQl%3# Ar36ir:$M0tNsm3O;i0LSj,@JcK@KQ5F$#?0;p N817&,m^s7aMrdbsCph_d\pEqLjrJ$\73G`Z_janham:U)L\f1O;^-= rS0pMF57Y-fXA$,-rp01<PLIpiQ;h``1rlTF.)t#Cf8*:Nb<#kTYfM..W&r<`pa>>)B?%3H`VmpcV^>TXRi9]mbnb5Z7mR=H_CbK[:aAbaNpJ2n\M40n>nOe' XG/Wk!-'+MU"d-:?kQYh0it<#n):+ Q+km?m1kCs,^Ui2\WlA*#ODMPfKHEG]("XQR)?Ohc4YPr?t?_>aP>T<hZpT,ha0`KB LK2eefW.C?i1_ek\NdA`(RpAM+Aji(3"aa\)9ttWk4_;(_OB/P_clEc:rcX99REgNR((+!q_WmIgV#__k<\1DLo3R+L>Fgo)c>`Q;jokC1t=Qq%#&=\6kp/sA8+"MrD>apNL%`Y@; ZLsPb4_"FkgoQ8*kNfU(l[8$= "er0G1Fd'B:AZfAlo$@$O\Gmnjm$P$=f.Y5Cep(jc1\:5@?9pnn9MmqSoN&WQ.L!5<c7SH30Ar)'jj;EAAZHOcNpF3IPk7a>FsR 3Wa#E!<LBBoml'A5LJq6MI0 N]E<8[LN>7_fD+mq?OK%#=JUaH0)BE&;]Bs780&ptA YMLt_ptb\8f)/AM of6QC?#hmUEem4MR"IE]0:CT]s5o[OZ$soO!:7I+!LAeCGH&?00pMA/Et)ip8&[%@Pa&^?a'F,*DEe:=i9)UlD] OH@+55;U_%VAPCQkVGk5;7' +4^W.=OP2SEB@?(.Bk)ii:+&^LB9cKgj8Btg8b@=B5K5idP;n%C;N)*:^'H_9aHLmU":r&d Ff_OWB5/g;M4L@X:D/2JNE0dAj"Ceg-WQ]*7I:D?BW+#?_FoSUs*9hCUWe`lK9P^fP KGGI%)"sNAQG-Xr:op<pp%(p[geW0Yot\+7e`AS3#]<I0kF=WD*H:DRP4C8 C*T7[/O'T4+#6f*=7AHC&@?RG;[]_i4)4rt&W?Gq[F>bQ7IO3^#GY\Xs@+:0qKBI2+isSbG+W7O8dm%7hbllcY sA(d]Q:io--%h*4Ge$cAgr`6bh0Z8siL`>Kb=BO_*&ld;k%B+5d@AlW_DBI" <':R#<K?^rEA6_mOk]gZ>Qci-7Mis#?=D#9S5^jn]EHXgp*H_HALM4!&L)N.IKDrXkQbNTYV)LaJot?O6htE%^OE+mIrGUKKGXL:R7Ji:g\UT(*dMpVf\:a0co7H2"6qUnb0o=.B3rI:@pKXMA+_UHaMp^>*>Rmt9m]La!GGAP%0,"%MEF\JXr<%n^OH!c_BdM?NOq[DKqdBL_WC'$D`\qa["]D'c_[AB9Q\AO]c4ad7P-_U"paLAQ#L"_"\-U5b4HnsjOsV8ARl4fi&q=q.(JO:iVZ!8sle$0D&_Y'dG/oO>\R>t4-?kUmH2- =N(;="ShBU9hGEVK P'b/T4-9:`'`cT2'_jTU(,)`p"=@)8"qg4C2hh"A*'3nJJ0]V*89-4t[SPOrkbN?KEJ&<mIhUm9BT1$5#UNnGV#&VjL\;c/%heI6 ASV@*#"f*mWfN.PAT^m$fIYl>#4k3pUV=irI&8gAt]`4iM`r>A`3LiIA'E`'V<^!3mRn$sA*Ci.ah<V0&jLRp6DB0/_?A!QEGs`pkO']5@>to%Nd8rn<kG?\L?Yd&n3k0?_@gLKE@=2`<YIs>*gast/>8)GANspk69PoA5G&>;:9!,SO"0OMa8r`Vbl9B+M>!>1!f"+:9-ctYqdg1fW0d(_#/te4N/b%*DZ%b1OU9sr)G9Z9eGT*aMKiTGY65&8*N35siCZ=:W1GRK<Hpp'G<6!WU)9p`_d&Qf^q,-&8@dBhtI[Y].2eEeVCtijHCiB$Fh"pZK06*>7HmAXCUF*_`S4g 6Y43V>rf:rAEV<0[/,UZ\q5?E20A_OZ25!9T!@5 lm4S"[NjK lC:[(P?V/if]#;Ie^k7gdO$mTDH.m4[5>YIi$hgA#*!]k\rO1U5"J`$N1''Y(Kjdj>Za8.12gsSMk)XY99k8_f1GYUFfn!--Z'0I$hLUKQA_t@W@Un%C;NrqX;@%M"DZ(ZV3UB'B<#Y@,$.OZaKAsmQb]^Qep+OCC:Tj-<q<N\;ACo2Y9P9 /BLs>.W?P`Naoo\W&9T2IL$BH\*W_%Oo:G=Jlr:fiW+S^PhWmSog;i&lO0A0<M=D&D3>9jhl69Fb_,Ghn5TP^4Uh$i"%;Fa<D %`[A]b&8JM9hAU)-mN=f)=qbKYi*h";s6D\]&<a7#i>)P;>TPj;W#r\%=:H\q`*A)/EZ=eZ Y?'E75_`*pL6)H6#ed&3\r:Ug1t*E eT-_$<e!%E\_ oD^:C#kCm>IOsA8!#$b5m$(O$bO$(6kWG@\Y5jl4R-98lt\Y\[tA)IJ a`ALi%)Q*%j,!tWYi3[oSc+HO6oZ>;a>BH&[@=kh(g)?1%s36gf2;7DcC\9)n/([6BQIOB8UFV;K2Ad%8E+6MgNeCff4G3?cFN?-_joN,$<=X+m%YsA;OU8%?/.Ub$fFtQ:(AN9m70K2XEL%[XA-L0Q)>4(OIWAp]qO[cqL>N9&X&qk]]&)?r&UGZPQfj4["I,1Z#QPq\p%_J 2b'nn@p[<1M9K@Tc!<3D,ESNOYoK<+gV\<(ADKc#Z+:MJs8hj_=ae/2m5S5.9<#$hbTNo"^G6=]e&2J3F]$=hS;\Q"]Akt4hj+MlcfdCWL64MR@-^oj"HY=,FeRtbD\[S0P:fo 0R3O t<UkS;dkUN:h8+c`n^NmGXq/ NY@f>"*#k#&aq.5bbOf8Lfq\f^=[6AN)&oXctr\mNAJ.dt\F7a>GV pnj2+.Br\&:0:_S+Xr7IQ`bU?Vf[:<Qrqrp9F4j`^iI7.jllR-'+s7t!Qa,?!1V.&L 4'0<CA`WPVgeW.H9Kj+VrTPZ*? OLFdaE oGn@d;T]DN0cjaX^FiP8I\/OhF>asH.!W6`<_(?BUIGe014HDKZnIDsY/K6`hq1jA["gY^DH02R7#cXmWYh!s^_ <TVYPn;AA7?XVq[mA.tEb(5P+VY?]G<W0T]*C,mNL<]Z@FLmTn`p,mjh8l9">A?@E44Z.cEr*b;cgY4.FRT%28's"b/@6,7-c1.kHnb2$ZjHd'F#E-ir?g/Zf\_#AdeSl#0YD4C##4Gj>$D#dbF9;n/Q)a&cNlM;]&=Yk6k(DQk(=4@'RNh+=*e+kj1AcpC%N8=r2;G+<So 63NJON+Q*aX-+)PL!0%k(:j@.5n(/)2o][O]45X8-Ai_NQISKY$(&r!3:5MM6e"8tED@6FADKQP+JB!oFsAZ@D"39`glmmmR:UW@'8"[QH/g4tr'Uep[+Hg$l*,8,ZLn)s#I(3WPLa.1j(3\_DKW!\BIj#+1N[GAk'KZl0f#(cc=)?9Xc^FG*h[516XRZ3-bpie-0>$=9,4J$0XL!6D0)'Ob"dgmP[20BtQp>aB7D+-B\%4P%\_9Uc:bimEB4od&b!@7k@V\UXltEi<G&O0s!,(6/cb#+#:4et>s6.R,&'; U(]P>&XCp-;XQ\-cU5]9&n)qKZpfC0jEA^++9MA@%LEqS/7Es`Jco=Kf@,Xi.4_p-j+=5eqP\C#qp'rKebkWK6ij!b?V=4-dr#dgXF`9qYWA.nsn)Cf1/0Ag;VC<m6E*i )_)+X:Er\=dg$;L8m +WECs%3=O[+hH'h"Q>>e>E%m27a1"Q.#ng['MR+o;03mp_%#9QlAGKak/A/"Cf+5,Ic)+!*IA,h&jG9(A8"[YMVMCO6m)? V/hN]_c1BkFTF3Sso7pGQ19;m>iCqXEr8:Z@6!eiepA.8K^h2_?Q5@r_8H.5)*d_gH;41/__#gnT8%KE+dqqV cpKtJIF C9^Maq*,]P=Ct/Z-c&E^;M%rlA1"t8Ji42gR\c*EoR)J9.6?e3]_^G+K$:)[6!<Vro&s^H:n`j262HXj"Jp__)=)D N'.jE\A12ae1Z< #EZB41U-%Jg7t3,*h#"=iRo@sC^je%&h8g$BdL'K1L3 5D9.Og5h=&#:a&2q$OSNY>Rn>9\;O7(`8)jBj[+4>AH*Ei@fUD^A;gbla\d7MaQtH#Q"`qF:"04oiKV=^Q?PEI%9eq$7"-2pMH3<\X5L5LfhiojbbZWd?#7m4VESC&=K=5,6Zje@e(58Tl?mmh;Y)T%EPk1r/`H4j%.\p0s%X!Lp2m:L$7KO:B4F/qi0[Wp!O_lO$^*k-lY@QfIDV)sWY*5aD.&ctt)=l-s-l4KTHGR]<qb!T=.8%IBdBT85(krsg/BffhOHDkCM9[GBo6]0q(:eO@&<a$gVYX.HFn\o&ZS>FM?g?P3s"o2[lh[/Y_!K3gIUTiKfQ5^!pX`md#$hL*/S3=hVT/B(P^%8&l7l"'hprAqWCl]c[O=.2#8oJc#A4]F:%\_0K,>1MUd4=;I9k2/Dg.Fo3AJ\q"JZs%XdL7UXGPNmSO![L7acm_XG3cLP886lr]p0N]dI99IKZDEHU9:oMAj&Q/NH+3,28S`g<>RDo\CtZsGaF.m&d,H>*aA`l/.q6d3T3[r(Zi_EoVf?q"UfN\;S7n,Fm4es"OkbE3X71]T tMn(/iQEP>EBM'M@8pa2f`>@VODBO&66T>.3F;^2P?7B%!V%R3#1n$>a,PC5.s'me%T$igaenhZ/';-o&YA+7g]'?9R0 HP)q`g&.N"$%3a0d^B01<O23R"iY91[X*?@7V4`B4_kMCSqjHLDfK#hgMrkVqMh=fWP"" ak5729trq-Pr+/H :ohn_13aM'd52_=$U&()eda3^^8[e-rD[+]^t%<Ge6IX/AD^<;C%@]!ZHo7L+@qd6=+:-fK<:?8cS>.p*oAV*A#_9D2iY.Y:n#kT7<Pp8rWFgP1Ib@]V:BbW:!PT$q$I0a+g&R0UT9D_J_?/A;_0!9Uo,Tca04-c26e1.>N?XMp%6(HC@??ABt^H$%ABY(nZ!<"'Y#!$qC9>mr9/3fdl,8rl[ rD_fBHJ8?oKsd\3hbU7*dB7g:RHIg5lO(/Q?m1&Pjh#B_^i?ZC!3+W6O/PE$ZKKbnVHed4A9aePnRZ4_X02CShsUb`4W7CK4iB0,!2iE2(1 gHgQ(JAAn25(1T/g8d*I+*lY5YG?:.0j[5iUAjmA5>\U"&Y%>A47N8,M\)4B(iobO%XGHK^Sp$:o,BtCTsOp?%cnFK4Z'D-f'4&g;5r>GB1)HApnC?DOCYj*eF]N]1Grg[3+LTKZEj*3S,d]MUsrX+U>EEDJ"%K8lPT\hWM\VDa# 2m&VY&SMB)L2Ot0>2P)Mt-j,UYn"ei>WE%g30n*I@@Y@L.P/'&t">8Cp1',b4*h0pXQ-TQ[_!m>P&>H,<b30ASG88QGojiiZ>Z8V;-[OMTS.:O\+0<L"c)$$Rclt,Q@TWq,AmN=j1rsRmif]G^660B.dm$k$:0EPQnYD"Ag+7M4NfiS[W5F=qPM?It5sChA%g$j$@Ye]CIGBUg?qI+Fa/B(j>ipF%P3m]#k?%IJADme'c<9Li(0rHN76C&pg1kA7!G-,.+83dAE*MN1e57FCNkkN0Y"J69C\k_88)+NC`Al_o[G?XQ1VAiMS!0.]*39J&P^+:D"4VUN)A\Fg-0TpC6j))C2\>;/gP%;C_%NTk^Q^2EJ7Pod8Ia/758r)aF!V'i%Kb)MGGg-Zd#<QFIKOVi0o!q7e6-$Sig<IB-dd8e,7qJ23/`H:@hi(*7X@NPi+N W[pqI`XKX:I-1k^[eC*SREh1'*:ZrZp^b*?6D!C;L#+-Y\A#Rl6ab8Pq?oP)&B25SKYJAtC4=)X2ETM[)J5E1*)+j,-fBKHAH!Ym0ZncnMOHt@<oQVZ!8Xb3J9ESs?4$8B?:Ad*>.HHc6^h/^2nE #<P[*md@R,5:>WQDEKkJmP/7AE'pT?RMRN(>?jId//^M8\D5A-rDDNqn9m=Ok^%CSZ1V^6SZ<9=Wa?B_Q9>lJ#isoZ5-aS-BOdILmhWf*cih9fqn4n)/9V-le Uo-'=@Y%>p1qcQPPM63TP^nQ iZpQM;6kfIl&o[6_MWA$I3*V&nS.tt',OLKtnrCS7Lf.'p Gb1W3RJ[V]7^e8UV_T)_d\/d f+`a6t&dTjV,AC*c1E\[f(FM?B4[3IAWIH]=K;n&rK5_Ts7.T->+;2QEL^^Of`[mY<dVYA"?KmJtjm[YU[+M'NP9UcL=*P7d8km\Sjod4A>3\Gm<Z&F3<+Qi7$`0DaIVafL3a^8gbs+m]-FBseiZZlApU-Qcaj1DkEm&:/d(cLo/+H6nVcH=RQS8"C>o)kRD?`hFU:XkOL."K:a)<-qK+e3^`32^7^Ah>-V.?]07A&m['DG7Ssl<ApJo?6cmc^^!X'=+c(VH.R'XnNQD7/(/ZdkDcL=cqkX#Qh1]8##SEjcnY9LK@5=lYN91_SOSXA"T)/9B?eU8 PS8Fg`+#SpOLZ2SAR(]/IF'kM=4#0<HR7i54\@^\+JGSM/PO,dT:pe*+(C!%r^Vp._CSSd,R!2pVe^*t`@p[[Vk,cD=Io?rA>HD97<>@rPA^$fE;st ;UK1]8)<,qSAOS(jtr!sh&0MC0Bp8:dMq)7d<G;mJ&to"t"g;%)M'2dPe!)*2^mE-^AZK#Q&d!.R(LJW.^Q43d9e[O(^-G$oJAJL2%^4p>7tAO< &$=)]3r"#,XiA8T49/[C5?md_+Z9do<;Lb8s&4U5_PaJtN.,jVn^P$)<'q8QE.8V=r90QcNm@q%\kAIN!E^kCF*B0+'qMf'2>Ah(/^IkHc]fa"`JAS^$$fRX&o813E;hm'U-CS?LR54dTU82 %'^_M<d/Y3tgMl!NdS&&&Sp#?_6pU=I,/Uq8EYoCpF*;Z5N8S_d:Fomj9%E72fo@%X%S_2 1*M1q3*ClM2@3,OZE9$'(N[?tZ^7ClN`%]t-k$[f"#ZW/6DC$WF-&M6)^7[0?bi/!L Tgel;P APkH'R_snZ#YeQ=OD<X8?*+hoG1[,h4%T<'s/tF^Do-0++JA-0J'\0##="sbr]@(JLEHZ6n="m!kT!j4ebJrM3U=mVA.k;#$oD6s"E=Jc0ag19`+P@Set#o3<\BkoI@V+`+#^9NFs"i+Hp)r%U8@:%?9Zr;$kO*<l=%?,fE!8k3X#*aaod&4)O/_`l6.ec5`@O^&!%PD+-o%#'4NbD%rQOV$_;T?@\79rerL@q,V/s6M,AA</SNkg@T./[gb]GoD=Uj*/_1[U^B5d+PL$H,LC6O0Bjq7#ki<<Zjd3/I_pj2^h=<_Wq.)W[<m;i5"To[7!MJ7[Y?=2&;@`fCMM+^*T%dLUHqd1g+0;f>sNOMghCH'tQci%$WPfA,Tjkob\\,5=qAQ#&*#,h`.jDlqi&qXS#e%94D_Tfl&^R=:0he\FO!!ZLX!=d&,8oK64Ab*rT=9IQ.lHJ-gPb<D2lI27,OT(jY+<OG"C$!A=?1Ss*,,fp"=^#aT:BfY>\-K'njG7W$`moQnl'UsIlA7Y>h3[$.50@m\UFFAbKPekPsUg2?A0*@-cRT=mP7TDER0\1gj+WAQ`i\Ts3Z*c1?;jqQ"CQ?^L4=;./Wdf[`cEsFdr6=0QD a18&gs"AW7"nh$AH08?M:NkAGRYL=q_6".6[2Q14nKtMpP'C2I^8/OR!JfrU+'#q[*O3M4L986<pEkr $harIWNoA5\9-pb2@$]8<%7mMkkZ< &\rT3';I<R2 a]YsDieV,`GB#@tSkVA9CS\KAU?%p,\cTRk 9FOAahlL^XpBb$51CT]4'4sBbGt[`VBda5FPg`O!AP`H6:?aFhM2t=0J_'>/%E< Ao8'5X8 O[&@L._>$ZYe1PPY9-7C2# =YUqm+;/5p2_Ann]N-lL*l:tX6AjBiDc`e%!g4;SNaV)>99N!?(KPQ0Q:,]qL>;,.`EUo<&a$=PpGhXOm)t _&;8#HfSRMR? />clW,HkdXYVdjr`:9k=<[WZ7EJ^2l$\=`lM%+_bAn<q9]\>d[?9>jn_n8RL6kpI=kCj6Qg_!6$;,27AAe#bV>$3gXJXD`>G&VB$#H9A lhGF-[4.2?-+;VV02.1c%2Ki3;W[I2>qa0J4WM`K$;[X73<j&dWeZ&sY\bc0DL3h6Nn X9t<aC=Z:tH,A:S^$?cR!(+Hs*)W41qLZ[N$KHetJD<cHXYCZ#gY`1<UeW4(4(CBNKT?H//F.)p)<G"<5gt<br,fBk,XE`7:TQ-A%d65.c-kUq4YA!LN,FBic$fa:NPS',lg):<8EQ7T`X<JBrHp4F1'U5Al5F_[8;MMl:Z9;]`4c]e GaAi e9cMc<<Do__t3cPDb0A%Lee)*@"c[,I3;BP-Mbi\SN;5.MF4.cD>`flB,t;e6GRh)NXkqRt*Z>f$fN%_o&6T@0QjL+8R1O)m?!>YI^K3tWc95Y\;5S*9?Qj>]h._Zs^EQTJk;A?YA^Q,c81pA2Kb C<*dEQ@PRtlj\A,l>W>GBA^=D/<9jZLV8pB^Qe8S#BiklO0EQdM]Y8Pq1^M1;rmMHLUP\$ Xbodoa%^'9XkE;MmRtT;!W/e/;`^&')H.sXQBQ*d<Wc>%EKgop`o!(mi.EsCk0+lONOnlM+8?W't^k)L\==d=l[P5GYAqgPS7TX^C2B:p8\6B9sZQh?j7jrTk[q@!$o/G&n@5E>^7&<U8&c; ^3B;n?,+Ea?sfTB_iD8%Ers@>%A=-h_Fpm&`\/)3?GSTBWjA/6W9p3bf5<A(O% g5ctVrEbd?K:=G'EpI"Zb,-o0/A;?q+AqRN\&^Zn)Z,ti#\5t'D_;NSt,Gd.EnRj0%JIbAY%fMmA_I?B04#>Pm)Wj 2M>5K(<gqi0/Mc9kg=5!aMcd\GkTA^1n.DBj;0DiY>'Y.V"rYkNNDVr?!G;qES[Q7)&_te dYVBJL^LUrQl"25['"`GQ[[k2>I02O^@0sOtNq#Y1:T__V('?=WI_0&TOOBe8f\Qn42^n)?8-YH7DEUq;lLLZoUh@ih'Og:-A"WAK6S8,'/[5/B TlH,%,dO@; A61b85dp[a*F-70:9d`pih`]V:_XcH\P)t k67:f)85Yh6C,pXq>#aC61*5r(agq4K=(,2btc 3CVe:>tKp&mp_;<8KCoG-I1-MgYfDArQRN@1`ds50<AFW70S;lZ:Ger$V43 Brg(]>@g_b:M\<?`,^@n6_1>!?(4U7AHQ3'LU\=l?CG$-,VU\ii_oUnOGSY;f%[mFB]G+e&q/P*V]h!abF`R?;DLen/K;&#e/@V>qZ=cK0ja4KAZ&WlmdCDeV)BA[Z?=4SmqV&lC\%g/6kR!!H)'k<(Sl33W')p_5ko_<OmtFT]%eF=C;lUEJp4NW;BI& A=&cXfbcXW(@XtAbGH8< ;Pfnc&o25si^!jHl+`XO(pUGK>,=q&nS7J7F.U[Phh+4f I$C'6FNtAn[@-&p"Lp(!q(r1FJYc$Zg!9FE.e0JnY"blEYo;r:'Asf(P63)8]7@!o &"?dVC\$n8@^N4GVL K6!:'E(GH E8#okq*n7>,kD%arjH:jgt@p_&6+pTO7;*=01q] Zd@D\\DN@/G3-<[Zp]#">q40K#E@LH/I(3ZQb!NA4cO0B'@+D#@@9naEr1EGW>/4bBj.`lablik#;?\Rc+":=$?MoF5Acg(TDi^ s#)R'lTA>89e.bmM;jKbJk5A(RRJN*i(`d&)5]$VcQos1/#2phBoXB_&VID` _+$V.@8AA`k@Ank9)V36/O.3Q1ikgAl$TZQl5X 83(?dTPs?F!Mo;9L?AX`Jbm[s*2bb`G45<-NM$Uc/ 'rV0!gHW7Bdb`2."U?5`[Zp%Y29@F[Qn_s&nnqKI3o@UH9:hVoH?/+eF(jk')AGn%F1dp/PA 1pstn8fRi:^AL^9sF-!")holtVOrB3f2p6^LEcG?!gmdh9tP<tt5ARprEeQmgB481HkU7i sJ?6+n^DtH1dJ!m8g1rkKCAIqEj$Nt8HnD<7k;]\^M(`0;55dqE[ALIqk"C\<XobsWsNBn (?LX-/6D(=sT+K_3fAi1/kn1&TX<d27G<:htfRBI9`.2<fmRVPF#h:Q11&/T n7fIPA6.gTlXONt'[KfT=MnG2(fV8Q`@KVMabk1t-nX`HJ(V+)#%%4Wo&^l\HD3dN8E^``; G0DpaRp0; fO@SqPHeUDdL34o"f_.&(\9YX*`)o3XtAYi5E&T?<]Y&S(`2=+ %f9Q4O%r*f\L%V.s8fc%W!iY1Q\8[PT.`nF#QO<D1,<]Ac&Q[P](:Qs..\E')<>\`YU0qc>_!$""8^)Ebf!dH#D74(H6K"e%+hNJIfqCh/3BYAPQ\Wj`-AI28K:3Y2S[]Y4B#3_N)X8C I.h!P)f84RgSPCq5B3^J@Y^E.WnAC\sFJ;!E!/bEhsFFSb^JsGAA7E;&%P]c+P:1N3`X5?:A,]%L1IPV]H@Lclptin_V&<fbHPR4O6JV>%3YCo_#q`k,*ZRgL PJ[UWm;C8gkl?c-N(qRHO/pSq>ZOjA8&'bm5e?Eh]9j-slrO.(WA0>"?pMKO`$3MdSGlj_kr4#0<#Mjd_bff'0]O?'gC-dAFK2YKrA%MUDn /=g>K%$mIpkWb-oTWY33@42n(*PNQ:rcKA./AOFI)o '-12j,gQj_&fAH42)VeIR#"]>^B::]DQgZDLTG_9s#`\4\s[BH6gM(k: +2n2QFZ1!clKpAjG7hLn.F$XE^L_SVO*bU<XM*\W&;-DXMjXaaU+*Og'-)=m5nH45B,@q'\..8E!E#;r4'Z@Sln-[[Am<.:CHB00Fa(0G1K(@dH:2tUNqGHpK$W(P- TgRXh*49m.=hAFsq/gS"4[bio !A]C:7(+$[bo,gY@8jQ-&=]3ohbJl#Q1e4#4<$Sd5Q$E_<sXD3glOm,WV]+j,%p4$G8m:Ofk%UmNdUk\6se0lqt?e19UAHb:4Chc;S10-UPET]bR'RpIja)8GZS]#95_COp!ec<1jkMS6,&5Q`gJFK@s[[#-*WqVm:Sssi_[7G#Wlb>+t ?[QX:-cgYH]Fk_?:/sKq22<6L!0hnU-m`la2"Se[s3TOG\U1i`>8]*,j+A^l 33gKkB,l)a );aoOlC<#nf?hf)6g[58_'A(N`> t2f2/X]ZAS6JhqllB!h2"Qe483 !ne;)2$:jbp0G0gC\t3/&b2nRoa;`lW@p']OIIBK&Y)'l@$I2kDEMD:*@:M* )tCf ^:JUa+9,r+FG<;;R*klWW=7O?J2"(4>UUJ:=ismVp!o!.Z6_4aM7\op$0YAK\34V-LR*2ISSKr*+Z/j^L4c!'>%mW%QU"PVbHEcE^=A@B:#L3g)N'e9*m]B8T%m#p[hCQO"k`8,B^j4&Z&<nX*RA#^HOR'M=,53`*RR,Z[,.MThq8BFpCA(sBh5*JOC*(9`_2-$QLUdc$WZP%&`!dg]),$s>qHa-EVlGbn,^J&hG9'9AI=Y3P$tE_4AW4Y^T.p@<9d;9*658d$aS'5f3lXJ'3!_U3M`8JBC-G(FeeG[Od%he_"%Zo(,qo(c33 .m;:psTHUiPU;^lGUshP%5%,l?(]Mff;n=[1Wgb^Jf6Q,<i.M"GO#@k'Op@GlX#>jIGqAGSTVHpWL5bLj'.sf.-UBjlJKfqd_d4rKEL,i2\/Xciq_f>44PRpROh<#^&H[$(RYW*^,/Ol I;l^9&fj<Llr`)PF4MGeQX0Q,rP]Sqo!k@5`+N]i'NE&kV@WE.KMn<m>'DtNl[(CdED]'QdET6dAei#G=#f,sqR@AQT-&1Pc)]Q_O&6#i7$iP@>lFZ4],&-MfTCg"*'`Rq\Hh5i ,L#+6$:V6SW[Tsi^5iG-t3Bf^FahQCOd=q*[R45bc=?GfJMY[cR"R"j9,I=EK;bh:l/^S2b=[8+JK;tZp0p+V%:D<B,JVXfo6Wo5LQDls<%5^fJ5Vmqi^\,5X224ZK\%:;3H`8a^1&-Ao>/Ck,gN'E,_%DkBe<m>g)a$nf!!-2=! >d7j=:l\pIc!A'I$p/]c"n<L/NE-1Qo4Z:@ST1b6)dQG=NUfIq-,db`a^GSMm"._AY9GS?i0*mI0gOdfr`lX?Z^ZF[1-&l:5EV.!>P/),.nt@/,7;+KVsRO-acIj`g;m[^#BkIZq_?TA990PGc\.ACkK/orpm:'/jT9[jn=L8C^-f>DhN?YX:AF&q`t/)=g?2[Y%A-2+0:Z`Qo`fH81%/d[s9oqo\P)=Fh`Wr:ANZ$1\@RYHcJ<:'He lQsoo]rJONQJ"3(f\YqO@HF!(=<Um[kImM3-!U>U*]\1Y9a''A;,IIPeHPFJ%2=*1FsmnZ[ANmVXNPRgJ]B3=g)37Grhd_#mF15;O&8<#?(W96;ImlG6EaI pY8AASA02NtR^I!7^f9k#.=R=cV`-,_&QqIFaI%oTn7an-!pO;)N >JTTeO.)6'C"AoK9RnLJ@+hHRj=[Nb!(JY4*rrE'R0)qSWPb?C;Rl(F9l?7:XL[/DNS#+QI^B1Z7b);nIVsoOl9CAh-d-- \\PIN!S*>-JL/&-RGcC1/ aU([@nG*4AN<(.7Di6cLgX4qq\D?M6.@Fmod%de_DD)&'9-_m&?nrGVEFnk.M)UW$i%kPAKUZ%!^+,2@b'Fk?4skJGKNdUgZdokFE" -7']_m. YO>beJ?g[Y#A5ARiT(U.KVe4;<E#\EbO!iG`bakX:qjjD"p5Q0?4-\e!(2QZ>:QBgOM2^OZ(GK# li9G+rD>P*5o,H/O1I&+jtt\Hl:Ogn4p@@`-"Q/jcmS_at&C8lp-c`DMh!4JXYN$XKYP(Z#%gHI EV$qpVP_Q/Q:#5j\=bi]bbPUH6]sV::AK)HT^O)P P"+AR+S1OTDd$[Dl^!]if4?&HP ITD;-/KhdmBaANA#$Ll4<&rOIA#[H583h$0C##K"tCp'n^_>s4=lrM\5SED,/AF^Gn#Q,P38AD+([Ka);gF43.fV#2iYG]9m,Rn$t[;<4kC:7DAf,R*bZQE P;5MqT$M?_HfD9@];\?U5qsqL8-88l6d<C 0H7bW,,^c[#3;-=s(H-"],TQO?%<`L1Z pXPs$%Cs\-$'BX!PC)16#Qq_WDTaBN5F"U[;(SB,6`rH6o`9<]$K.>_8Y+ntTHYEc21Cdaa@Ak=]3$2?CN8Y)qY;H?XY p+*;IUdr]/5^P0r\_335Ek6K7Ftb$?Zbht\C9[<4)^5]HpA$T]DKTes\6aiW9S+^+LnGOqBN,Wg=L7pC:;h@o5WU`lZ^s]aNsHJ2$n-?LF5.a.]4,r45grfiGJM:>[c?PqFrm:C6*gC4[MB244"jJ=>KV*h<pDlGdMJ2Pc6rS-'H9I.0@VAB[eqgq^+VN2&=Y2&UOn>@IAR:sA!poW,4m r.VEq5lgo](gE3$>d\r<_.U[WQ4[OmeY2jZYaF!2TDolY'\K;S_!L><t312rDS>Y9@p:D2kGjCdA\IA_I##qQ:Vmf:dS?O?=][Lh?OQ50S*nTHOoq%co`Eg$&7<=0f^J\24O@[Wl%p8P,E&oQ+S9Z^m7k.`Hpr ]cCLnZk<c<-iSdIeBg7c;OkIf);j*i$nTV:i^DYhQULG5#UF8%TYHl=2ACj-mkW_$J?@#0B#-q>S+V:&NHM(drJmnra+7SHMDS FVt=aB$XD% HqbAg]"BgL*!lGngo-!DW qI)=A[(jM:YV+jsZ!/Pd>d,6&'^$cDn(U)lDLN\V(@Js+kDN0DcRR\%(@:o32!p#PP6dDP!],@[>N7A VY[eOWZ*@^lWXZm)*+P[B]1+%oY1h[hma#L@#_LJT5>e[Y[&)RH7D^()tHOMCQ-p-k>f-XppYP7a2E>9,b4$Y3N1CFWr'He.?;DDHNqAO83oNqlFR@A[?HJM>4r(QJKfM;Ma-++]'D<^G.E+A;Y5]eVl>'t?S9%-)`3LDd-g''5n<t.aR6SJlSt'aR6c?;NA`,bobV-tJt4hm7sA#'p`0[cMiHbNp ]+g'oa4lTED.MH5#6m=0(,[#Ah<>UQa^`#a'HA;d;:Nnl1p+SJ"(c !$27gl@pHI(gSSa0*9&ggk6BJj^o\!oV(h>oSbg%/BHeO-mQc#q/G?cGdN(T/1&>JML>@Fl0=I4)<e$@VPa2U/!1ec9S>'?9pA&\anPs=2+Eo9c7=i\psA?5!5%Zg5,-9n^jsb/cU,N^3LL<e\rMD$+*cZiPHfF<]l_?/pr>(hi #9lYqB,ma6+F]j+jtnHme!nd!'i-- >eBc/8nsP,PmfHT2ROo'dMQJp%6j]RdBfS:^IEY'b,*PqV!Dg"4($Y:^Pc"Ta!(k5SR[YS#Fr6JpGWALb:JGc__UGiAeXIoYAE`7( :YX?cH/Mt"c-WXD '@Wc:$s^Bod0RiiQqI4@ANe@aY]:+0XHq*c(p=sW*%tP'=K##Zo>CmpFq8H S3^L4`<0^mFrnM&[[`p:+38A*s1/CT%GU'qQnsTs7B_2@Ab`NNa:a@9A<XA0_NVQ*n564;f.PTA`DhU_*&,N4^^70s%ptqQ8=Bc9/j+=(pZlf31m2UTL8'M:#d\K7aA22tH&ICct:YpIW'`Cq>atSl(BMNC<,914O<14R(V*W[VCsJfjVJ;4<<_f@`9bIV&LBHY]RN2T nIVn-7_Re;1OkUg+XUoY8NsK26]>c%eaq*C@'>K4JS9Q@^`Ik!aso]nLZ05%(dtNH3t<6Z/ZLP@Vm911nMb't,d,.\\4Iq9[(tUT\/:[jO7PQ1(p>%/ Z2q8][fT0^:dq]J8Dndps8(WdVSpi;o+loBSpA<(KW[_B)[S(B3fK\&0eG8_^mOX:@_MiD%%YTeDg"M0+#e"nZ&o^VLJG'nrU+1K.^c(OZj8c^IH;%5?A';=sO%fYJ#mkknh`O\.`&nO]g"-^1%d7!;EBsje\tgXg@ 1LbF7(B)H<K?`$r=#j"F(0rM6ga`%#Q`P>ndn)UAU-SpX?.IVRBm)hC5^3XSJQMYo+L9f%QkYrg;:JBrkrB@op(;oXJj03j1[:'@VIN*5)*#&O#T\nA/-UGM-)_AVo/m4N,U0lg>cTBs"X*AKp1L7qfZ3(Q/7N2B5kb1!B_.*3^1cq#]Z'^r>JcR(Toc]XZ6l+6@&A_<?LFrPo^S/L1j#/IX@fbQt<HJ#_oO0)nr.-&Qg=.DS^JG<BAbnUhAdJ'K%)moA,B/>PFjDGs!Xs+V& $=kNhG!d<A.3!jLiphmV2926ifBat#+,&aUR&+*$@"#\d"YNsAVq(C#Aj[A55O9L2f=<-rWfMb<<A\)/tl#3#]IodF3sMjWXBA]I#N4:<^=D09gQ`Rp%h 9X2LlEkgI"Ds#tb8U/S`DJYs4V'$9>e10LY(<3B$L6Ng%,O+1KMe7j]qCD##blsi:D6^hT0L2E/XC1##jJ53>:[m*!<@8smWH`ohme=E<nVb/6aU?ZrM1:_Vo[# sGf8)Ye/5=26D%0o-:V]qfLJK'i:<I#O+jZ6)KEoa+h_`9S^(pX<$A$!p``q-O`UfEt.P#/EN>j QOc4*4[<YH))(9e^8FUbh\\?e/AShNT-r.Z3(g5KZ88CkJE&.?"X84e9\(7J`*9]@(]B/F#Jd_55o0m 0'> S'?Jq000Hj)nS&C-h<Il]`q7F>jpM%OdL"L93iNbhBl]69Oem:CEF4@QoJJaca\+sg'#F:=fgU\e7Uo$11jKI!)&1H,'8EWA+-*eRP2j,-oBCf98A@$6+pnajZo7oaPX:`NCbtfL-<ZDiG`$i@78m\E#8sRD0rLr2aN?)>/G6bo=(o&[V6e<7B=! 5@JO_J`Q.>DT'hVtM8V-OeUSi?MF&]--F*c,kS@&;4e:S_\VRka?EWj+;*aOEtCpgL$r`@fA:i#O\@p`htpaOk@h-4,DU<OjKSIq.Z>LE>%!-0k`DDB,,'^(GhA'^8"$8-kZN5/32%YDANP=eUi\$pB62fL",W%.lSaQTa3.?1"Ye54\9RtZB+l_e%>RT5bE;RcbmS+e%<r))S2t>0 &&eg/UnRGIHB"]6 ZiA(hHW18NSP40lcp.BkhpC\44KA3 G][Jk(YFR]t-c9\17nV;XI?4l_>IsZR3Ko9-WNm7*fV=5i#8FE9f; sq7tdJ9(W-WBbl+s0-F#h>H8K/Z_[B\Y>%5i&VUIT&T<62^%cH6D&r%U]9>0M^OC1\P gd.4?^H/mp[A<i/+72`!Q][M!).hMV9Lg0&Ue RWH:>^TmW<0L4W>1EfDOG:M6r#/+TTi*of>6k5BTlGm[)UIJ"=8]s@s=G9;^!=7?4'KI$"9'R%l`lpGhUBJ]8ncn>D[?'`"fCLit50"Yd^Xt^Q!Xs!>8=j]fn#=Ff9F^b`Q7p*$!)Yk\=hps2(4"sVC"o]k51ro)`1_6[,@<1DnsYa ncRro!mM!?$n5/,?'rnO@gUMmaW:Bj%M<G &6]8^1Vk7\EIS 7.hEOaf!`@JkdTni7VZb/HEI_lP%#`,sJ7bKioQmU-\2An&C)8o1A@#LI?6eXh@c7nXKCbp'l7-6UW0S=UbY[^?K!(A-ss5RcH]8$rnlA?255qQEYR^gSD/`3jh]8:&jre/<#%d1nX7-$mH6AM!2S=dUQ7')cDIshAQmChF#5^<[t$+QXMqGf*5ZS\kMsR"A4+&^\si$6l709m217`F9&_r--k[6\?>bI/2NVWgJ)-W^_#93Fc35fm"M*&H;"HY;A85+eKZQ&o#'%^QUID!nsAm2+IRWJ78+-Unb/<=bpZrO";8mC5$j5q=qcmg<JD/D M"l:IWY#9B-EE4\q0QS]e).[]ap9M7U'a0bPA)$Jdk9eDKA_]C#))`+oI)jl^16eA2-qqGD0Viq5L/40;?5V'kM8sUqbimrjr7$8 *W2A^]\,]4Z+jFG2iSQ*3'ibc8n"Bm@`#<"4MS2N9HZ6:/pd/D*_F2OD"Sp]Q1*84BbMNg/REY+7O^WJLA3r\jF?5?m9?hI](^ 7pfcSO5)nO`,o<C5SG=Qtj r7.^j?8$6a5LF`7-;,lED^KOI,Uq`,7)o9'+eLiZltend`rq&]4YM.QK2hCt/NKUXH<HAU,3LE$9.<Q6I#VV'AcJ4V+l<FQ+`jbSO.<KtM31^V #+LE;hYMe5;ZiL N5]l\.nTDUoP, p/0+KAf-Frf.`9/m4.;fWC0e8*Ak*8Kid=rcLQ/]0#,n&NOi41=(PVU0e,XUWLO2=[gX;[l;s\5DM-/(W'M]1J\79.JtP, 7mEQ@G.=q?40Fe "\.^4q9j67(k%*9He?Jr$Dr4(@LB3)!'(g A<]#/J0TU4io]AkCQC#MI>)WJH`$D?>l+0k<.+-YmXZ(,T6"6h1QA,8>D1p'!(N=!:3&gO:C$9o\>95oW'Q*FC-iK>`IW0:[. 4?FYIM[@%EO;legl" 1F?,GoqD[B5*3NhfAj[a5oOk#dWAq4!qhpi=67X&-N+J9 ia@d.VYOe.M`K@,"""fE2sa]I=-1o$  U88DcaJX,X(FTXOb87#\W1@='4"3#.BsP Qm@s>$I5+cn@>DAn_RB.Q!>[q:p-c_Ebt-Y4leG0)8nOQJ7"5Fb7N(*+)9WtJ1a[=j4A@CJ!T_r/[-!gL[JrT,t@k^iPAX=roY%)G=W:mjl4-rZ7bK+(B[R>6LmlF]]nb6m;".rfrZAHWoZ_bO)b6#?lNc^ 9.[UV+M1Kgap<)9>],RN9(3MF[Z.Ro5a ^bfm,09"iFK 9S37-<=A-2n1Dmrq4rA<B2T-%/GZEMM?Xnr7oC>YbF;LJ++UXR_E[jk`tdmUEaEB\TR`^ZZ5+A0i_?gHjfS^c!t&pHFc73YsNVI<QXs=]J"RF"AZS1%X2k!=7DN`DYr@A*%L@$T>[)_Ut-6'e9U%2N&&r>\HX`'GPo716A8R3C#rN[<2,V_lZ[?QkL4>R>f%EB5_*Y\2Yn2q6K@"7pIlen0cG=dC$hen\dj.ho-0'Ik. c:8(+lT$Agg/`LIt/<2PH3>$oW!Y+DAo1T-"YJf"_`nWT_)-F0-0#TV"\*,/Ai5VT@iTnbAI0ornM+k;QZ7hkX*m=LeIHtLUPD!i=f4E0$h3^C/s+".a0/jgoGb0d/?:ID0jt)T%EL4PLgpqkWU&;d2"]Op#Or@7c#p1r3K&KKUs!3^q:W<rjDEAdjfKj9DBfaGkK]UftriLm@eTU9^YZgAq'&lNr=e>-B3eAfc[A_=nR3lN\hG5![te4+Ealfg517GpXQ*cZs]US@No>\OtkFUXqs`e-7#3TYYX;;p= G`kGU6JJQjAChM9^P+S>qb%ZVL*a5LrMqGjB.h lTo@_H8AlHXl1s_LHZTj6TDPX#KjrO1Ph,rBN* 3 m]dQ(#QU#5s3shF,*rZAW$6Sr9T;4WAQhtZ%3+ r.$%pDG8  <VB>M7AEZ*m'k-3N@(KnjQJ3"A*EF/h/qst?U:Jfjd!)MmP1!#PGL9:J'$sE M.Hf]]Frld-%*HUVWsUrD%?K"F/gXPe!LjmKNSM1tJC$NK*o5Le#ors`6iLUg1=2MaD02aaZH[geljDkU'qc(tg&nL'102,0V>W?pZ<aJ2X7@_D*K4\jmDU! h:iJkSl0bA7`R_l\_W]Z08([5@a"ZA-j3mk[]PMZ`$f3(;^Fr./Ib%d@thJMa3=tcLm"V'R:hkKk 7L`oiNn$U('&*q5ng\&(bA+(XWK=[^kEdd+b`-+/W$:t0-AA\Y#m]ZMdg&-n0l2"W#_hMjBC6fhYP@ATY0H>-CB!:$H\(:dsB@b=$R+"Z_*ZFTDkp"TAD9bs.88&8?BG0oA`/6_9:Xfeo3J+[BYcc&Dk!DAhQn/HO&mEGsZ1A1\%+HmZ8AO6`J T!3:Ggj/R4V"?gW2sXh.2G``LIHD`N5lC8-4X4-9nBoZfAH!;Fg(<LATBWW<iHG N;14)WF<AdA*dQ]%#JSJ-^U9I#h=@k\*X)Ij/>mq!EO<j07jU^?0JMt38[PB:3HH)i+,+V#1,UNOs91c-i4A8*8+ec0KBSee(Ye#;o%&ebPaoR4X*eP#lFiL&e=A',3V>!#i%Gc(1ZE05MaV>"b"OT%F[U?Y<ghXN^:[65iD\*&2VNb!d1(aHr9`c@V5DrKa8\rK^W=t*jhp4 )?g^ McaP"d<m3f0[$1Q<=)LDDH8;& ^X^^?RK#=e)rqV^F^lB2@EDpGi77t=tO)GaAXT$=]`@!!L$VL,A*cA\R'XI_18K3WoC6gfpRcM+RAtYQF T4)!FX^JmrM[AbSE#`gZgMM=Wi^\UR06PK&fegH7NfWAr^3>B.to1!4S!WG#g>I-YU"s9\s*H!RTr]jS0Z)4[9ig)'"brRKb[pb7 7M[k8.B]i.*)oHiFnK/Cb.qr*OkY@jc5>^K8g;gaf;Eb %"#?EZM"6[H,7a+s(XFiP)>dd%(lLHTUK#4pZ]&sJOa,&MRZ!$2oGO&=7Rj7R8\pA/p)0a?d6htHH19G)ej6sGfjWsS2@*1^c"$'0LEI5*!X8CdC%j-99XcPs8X+s7H#H06$*4h\^KfW.U$r#Tb+E<b`OTUS0 ^P;h)/?IGb*fCZn.]LA7A^`C#(h`QHQTVCZM-/)VG:j/^Q]40b!jN!o b;e1=06?jb6eL(kR+en:g..fC.1%9?Qs4>G@Vf6sphpDc:A5]9hkD7`ijZd5^00%_N0B$5,RMN7M>Aa;*Y5.F;rFnK;E'):<U&Ks+Z]5>kMST!r\G=<W.@95ZEM#((]-A;9@-fH9R 'W:7Sf?f/1qO! X:G.*C"kA@<ZF[iV_ZnhI!5-dkP3[E-P`lSQ>^DjsGF>!^\4L8>VY4$VJ\WUpegjJb5ArjLWkJ8@W'a#p(VW%7S*YSI;IH@A\O9)Y(`5jb]F )5S?,I,F$./qc@SFmHfnFf!$bW+erR:4Y99,OB;caB.E:G/B>AnN)jF^75Y)Mh'<<\1PSGUta0&WVn>&c]ShG#qb+)fNmE(tX/13]n!'<J-#*1c^feCND=^TI:"ssoO's8GF.C._rA9(202EWB0*r384ahQi,Y6-hA&g1S_1Gt'X0]'OFbi[cF>l:EM7@M9VX_AT$\[IsHA#NQ+0:gZq'@W"\XP,<*tJKl#E$*0O[M1$ii3;CA.>bT8(1pp4+Z&mlV5lspqsJ)?P0eXGmV%CqZ:P9`KJc9TQJDqa_AqVgGKJQW`\<%>&@"2W+Gg:akVgpl1><:,N[K(d3H[;d$L8)!9tn7 >P,YPND1Ij%bJ""!hm!>VK"AoY8[)gG\@ F1^G[m6=5Y]r$knf'G)e20=.Kd;\c*^*;"Ds*O$K/kVOp%PE33M8C8-3]Fs=HH L](7Xi;BBRDW<`p<&^(IXmlpmT#oQtY>:T[?W`Ze^_' %2.93Mt. $`@c`k=h/nPPf\Gl655MMb//13lm<foXJ 8 +AqqQW_!LBF_,,KLLg@bNOSkK(?YSKPWc:+`1L0l4D49UQKlkCB6MeYp@Y@Bdh P9P"WVA@pn/cd\N&LU^96PA?&FZDsM8$R'YAJi_b$c,_# i2g1+BTg: .%6HA]ZZ,U2J8W'Qn!Y7TF+t $m /5R#RCiRY=,07#S:Z\0Xl$4T\\@>fahEaDQ(+47_f6g)I?ok6C3DmAbbj@Erg&S7[eK\[C2-oTtY<`7AU+?tAllfsA9AL^+5!EU?bD ftpGZD(COq/dchiOg;]F7b2-Y1_mcq"-d^ 4d75YdVN9U%AZV:99,YIPrG'a2-Atnt<q.<&-gC0+1*%F70/$kqf8lH1i"9/dIOtnl8cUEAqkc_\lG6BQFc]p+m8](X)i0:9I=8\lJD\r;5et'3ed&6%$g<[M.kCG[ifSimQ%Qf @OD(jJT/aojR\2_jY\U?TI:IL<$V``i+[O%M7kh58B0p_5QUrB[ ]3C11$<phVV^6Ga>!<_^nER.*g\cp, QW=?8AnA(A+eLrOJ9Ea4UeLi6NAWJJD[Ai$+E?>R*o(WHjq1?"+n?`",]k3kX;'^SeI)]2."\9>EqQo2K?^iG)TmRKp?=Q\DZ"__Pf^MW<KFd6MTqG<VL2*p]I8O=,(T\^hf+-WI6El;9Gr"'ZA"p+?\+k]M8lV.Ddm50qO**ffH4*Q,bkBsTdUT=g[4*pF!\ir.jAN\<9>#&+Zpj=/B<Gj8PaFnMW?AfSmXqKWpU1&O`/A@=O`QX+30>11Wa'Pk[-.:pr_1.L9YfmM2NT/X(6-NEnK'Fl>t\__fN- ;baPoO<4E8tNq8UpS.(W8t[_B]oSl/A:& %(Fbi7t55lf3LigGUo$s.I-_=4,^I;^)$d&92Y$_5McBg<Kdq;UQIs` 8^ssWMa@[580862!#5C?S^T6;1cOOnm#(K<P,jn<H(j_/9$@pZF,hZt]7)RL(V=9;`9paOi7%VW63t0j9T].9=\.4\]ePD1HKGBoq 9/#Kk"QWWcoWk4cC@=on8CK`1sjEE[YIW EE6r9)$>`]bD0npl#ILcRXjMdP<&@H8HQ:%W'!p-b@PjasM$c!tKpg`J-d</W!tLM09@5KD1"*:T"+Ahka;eG>J-1sjS5+cAVplC@Cf^<AqsfQ^rEFgjqd_<o/47_mP"mPLk!>F(k%PSj4/Y6h+!:2pXa#55p?s0)oMCTgq+mZdX*%,g:L)MPG`qi(PmY8V=9:DOa]`H\MeX2BUd%GYRWha;j8kkdDkb!o6%.c!^o=L,U/Pnd[Bm35ED] @N&;,!'_laMA8k44iGQlM?S'F1\a[qAlX:$4@Q/^jMojWh1jTBTF@$SDRlP,%"W7X.-RSkeK*KV;:tDILZ5tQUB!%\P.HA5D>m!q,qVD@r1+lOFE<>n*7D0.F`HHhn0q d_0nFK,1*b^^40GoR@1lSKZV&N3FQ(BDcl^B>3!PHCY *E1bHs*Sr[j=BXR-k/6V0#6_R>VYs:VoaBVCNA5,&r#;c#F@A1$tH/V@4HTFTHA>%IJHFV_=g2_Ue;-b?)pM&0BA`(`h,f.XL8U2<F(IX?UbDDV["!#R'\4QmFA9EP\fkE:qE%^(P+ZnT'P4]22'Q$ @$H+C'4S08@G9A)?HTgXJpX&Yf*GnhAJB"7"f3/2j(NrA:C+i)g:]Ss$ TsfFbZ>CB9Pt#]HNdPKsQ"CXQ6MhQ )poA[s&c F"RZq;i!=DE[Xl2MH"DYV`D8<*UcPWP&i dbA[((MNLT>hFpROb'7W9*&m,`%@"40b>aJMl! 1gD&sDRtAd3I2llc41."H37l5N;$hj,qI.M0KO/:Q3.f\hobc1^e?4CTh/HL]9[A)-c d8OZG J>eR#Y/EIhlP`22ce&C-S>c'f=_m<I"\/ABk$a3Z\LI5Y*S:@0L%>S5m39X&(JL0B?>B`4;1`nR$DhTHFQ=*UdSa3:BA63T/D5-Uho >6iZAH.[.q$jVZslOnD)T,=(''GYSAA]t"M+)H>"k8; U4BWgsni\beh.YDfrY(,LqTUA:K.OlE^Q8f8%Fg_+eZFZfn`XqhQ]?%@,X>,nd.V<$]KLbA1k^[M'?,ZNBrEiS7s4to=(bAJ6;O7C/arKE$qk5>2KkO)ic&hPmV,@!Q"2Xi.rZ-i-tT:t^t,V9%rP8I^/nRIR-H^bKP Un)CRl&BYVMc>Qf?tN]8'`,-j"!["F%"/P5:QcTgZVlMXJ']t2A,Y?(lkdTZHnio,ta4J`QQ-Ro;)0[.=D*qSFa!KUsM n.A[VN3FqG:J7%THcciT20Z$-.Og+It']r7]meY3A$`*)UN;Ep9h3hEULe"i<f:6Q3GCQYP#)U(F> a:'^bR"q-J-1\V>CmA2liB.fQ\.>`oNB!l5Af<[7_;5Veg]JS3MUq.t2j19N[kMZIkodDDhXNNl@Lk['\(\#Is_V%M,li?I]V*EI@\sJQWRe6/QH:k<NH3Zgg^W`Y*@-I^_!DUae3?_^V6S=]\'8*Ehc'0E6qLPgL&4X*!DP\n&Uo94>>9[pf^+[C71lGU<@Vh>5Br&3Ss9&,9M?QXKL0Am&*VJAAs>pIUjNE"-/T'@gFoAt"*d;]qj gU-/lRArHsq]5q =UkR6(Rm'(++,o;3[Oo;WM6U;'$2X)/qY?JG+&1Y7;EGl"E%!rk,:]A@j8*]A!<Utcd(TZ7CkEa6_]lJ@9>3'-rBrKlpBjX[&KS[\@0oKa%/@F aGFpV+Ak1$@"YBk/,e-Fh"S@JP]IX[@ZRCkVb9[_ks`jDFpC26A\>A"lg.q_q/?BVN(PrBgJ.(RG<if(tY#M47G<m&TE>YArD'Sa.1 <kh?VXr')f3&=BXS?SRJH@tbLbb+!V_NE'DT=AAq S9hAOSj?LO$PY#*;+Jt5hW;t%XT@Cga[KtT(*4Y]B 4d=*AA)J5.$V`t+(kccX'KAisn.jsO$d4TY.H*^abrt`&UepA`nK/+CEjDt,FM>UAC]P=_1_Qs6%bm$UgJ)..gWeGilHJSqjhYAd+'8jYZ)[YelNG2%T5F#."W:S; m_Jm`#) E';U(]\\hosFM&ib[@OdZj,mdF-MHs"mI"0]JST6n=e>s^]>*A"P/a)#n+t9,V-aI3Djm(FQ70R4E:g@CJ##3IgOW!qe:hNGW.m`0._f":9&ZmoaYX`e@*eC9isRL^iesY#</Nc55Znl0_/+tVD:9"bo#qrmLf?m!C7KP:3q8P+39sjn !ENR_%]%P4$T)?,6D,PSnpJtC+AE]IVb)+=4Z9KokDLA;]X]``a!Y7gD@O#<YWR?MCQRH?)5++^6fOH(ZcB<FAmNo.][>gjoFR0K?$!\Xl8_11k&;+!%V/QN/;G"g"7]4*BX(.F'#[ET0[b8sO>:A:daV2\OL8nIQ6PVqcVmTFb)qs%$D]-:Am>re,n6>MI,4<W8sYBGhI#KfkloZjZpBXiJ3)^SP_[[`ca8S<MTrf+a9#@;UZG\h6j=o@%1X/8P<([@,B./Vic^#Zjq4\_C#+%_P=]I;R-/S*h16<GPe5,A=<3h"3""+KLG8H*>DZ%;CPf%51m?BcB%"]=snZ9BU9[ X/d8-Vc=tqI&P*pCTrS8=VW2qQ[_o'*'AENb$ac$nTZ^6oMUl>_EH)S(8>Bn6K);0Wr(<Q4-(nR.E@qAn'3m$*4_XB:I15Z4V@%UY5 (MkD4i+HkF&trZ/5)$Y&p8[Z'A5$h">cfhGpI=8& on-C)Re4A%Z9&L*XsAAWH=5:tXD-)<C)ZQb$BZh%Wl< LDp6=U068HIT'3Ced,:M4@Qr>ApKHW;AM5X;dd`R)3<85dG>KD fF+`oDgsgL$knXJn!(`Vs o#5mg=J*@DK<M"'CrspECPY.%t.J0>NeAgTgPEQp[(]me.4H=`:\HnM*j!eg*ZIb+EfU_op"/FWl.i&TI]gU<&aBmh5:/HG7ct`aFABOhS);p$q(M(%<C\X>85[[$X(Ab>=%rAZ^?!>D#kmq[OA;W*^Z*R<XPn^ Ji=k`ZHj6)g`Vpi ^H>#O^Q@]HnV(7T.\@XpomAO!o2fP#nsdcZnIl=%eA*YoA>W?^J+Xt>ZMeAcHLs<gABCAjf9SqT4*[g6j(.^DqC=fecRK32iUcQj^)YSp<MM4H"C1KeAn2r5W`(F_ q=qP#qGKMh2l[K*P k")41NAfM0_7&Pd.c$5['?(kY/_>`D_1JkBtWt>F;06h5#4O[4+p.tC5K)L1D/(0:^d(lmalVL[Elc)cR06B^%J>^s3DhVi^Z<Z6iWL7B`2SPe+q+5]9e2bU&h&2nTag[LV]S9FX:jR8^n%6f5$MTB[bn\sS$3h(5ph,Jce@0lP'hcFhQrChf>o*?C+>T/"J4-6'J%#A(o/?N4*lc5mOfS3N,$F4AfNMj8f)l8p iYFiO@2e"7/nE,GCblomBE7AJ9!p3:AMPme &Y`o_Ork6`XNEMo_otZa2N3h4.+c$!+4dT5C1os+jGG@pom5=sbo+'2TtjigZKr5m/pMP3@/+#%HSt=)SfU\329H^*NA+,J\OAZId;&EP_j'j<#/@a,%r=L'1BPiH+.LgHOA%'%?W+lO"RgJ)e<K=#8?Ybd r>`o.[WPo@$2)GEF1BG5%lZC2N7JCB,L5h!3bIX@AMj5%W2LI%;Zf;fH1`PTIQD'-r@48"V;p$iP3KJ.U!*E!n76L64O(oW7?CV7<8:ZhPPZ\E)AL W@5e%8Oe2a9@BqA;>k%Xq&ocVJb,+UBig5<$7d6P&[ \-oMN(V$L9-Zh:Ad5:BX,MA;DXKIfk/^N*$=Jk.aH`0@J[!j$2=:"n`MBMQ9i7^=3*,1C>(CCb:d>D$E:<SS_sDR#Li'__=0@P:8Adk!@fp #VCV-.7f-oomcijYrlRXn=GJBJYJ22?V%('PY2M*r-WoYMO.^J"ptAmhE:S*D=H&$2HjHj((N9@m"hI0G3:GK!YP?n:\9@* m4eUI9YWUUKmY9h[40oEmd+dR&RNtFKHdC`-$C\.JpW-F1C3b-XX9DpNSTZ+o(Tli.a<?IaY#)InH4d5%DI8/9V8n^ M7l-YLA+3H?er.9P*MsCAl[>=+H^A<j=\5qh!m+C9fA5A!RV]M49;+EL$d(o9^t;K1,7XUWcF R$<gLg"J`c6k=7tBD>p.th9KA]j*FQ%\FkMeNiq9+?D"mZS<[Qi(O'Ra"22/:)']K/@NJ)pTdka:"<Be8apUmBr70W&R=NbQ'G2+%hsSLV9Wo:WfdWR*APjM0`n-nDAtDpWWgB.e>5kfC$/ljl"[[G6A,o(r_eKjHPWiJt0iJ@3FS!^'rcg2:]^t/(6BJ[trtWG+DP,b6;l?K:[]/RDo?>!a%e'*NOcmqj #^3N#A?N^M49AQ%-cZoMAm)a"F2EM3;Aa`_\:D+(c137HS3F-2T7VYJ)U6'Gf_i-NG89'R'hg+GX`j<r:K$VK6I,SdL/giQc$E\Hf[^C%C <e; Y03*L]L-`q;@s/*797rb)s\0j,#WWr.He<8IT,ES-3-Qnbk>k!7"lZo8@^"*Y[\X\lI(pd>-B[5PP<jN1>FK3rC2Zt\[q->#*e"$f,dN$[k5k&V6:C6KI8fNAD_SIW>@!k*o Bf@&m;H<jhQ+U;+gACds(8qTm2#CQ7J?.CWAVq?7Jce5H@(CQ$IX"7$mATFZC@i>r8C]IjVF_#Zc>q'5P*H6iARNP1B&)$oqWLG#en5kT,em!Q"c1J / hZP _b*6-pp-0CNBP#?^K?\&\a:`T1Y.k:hh:eangGB8K"HY?n 5Hg6^8:oo>=8tKoIPkWsZUg<L5jc/AZ,:A$]K%oM@"MBBY=\P6:;V`!to2CFRDeh6No1/R@n$7rI+2!]F!;VpFaY#)f%#4\<-r6CKbfohI+sMZ Gp/"8(4_WJ>SjN,NIY\nhf\S$rl<#WmAf%j8rFg,K4F3`]8$H=8N';0n+D$oGnVNKU.F-Y7r0Hd*&`K7A7E5X2;OMSf9,LO0Wl]h8U/FW_d ?Upf>?XjPaO.iN9:DQKA<r_2fo'eNeeW12L%-Gl_*$0]20q+;&[391ji h dAXOM7)D)=ZiF7;1+JS>&M>t60_t)/;(,>^LK&eIknR/^DW#U/.m13+4a"V?>#ABV.>*^;#k9&^ZC2.sS<K 'Tc ]()h%]%D_AZP?Jri83@.[C=!VJHM[>o:2aH%=iaA;*TdSZ+?NK:O:LS.[&+_?>`W&7Y**P56Xsd-gE3rO_b=qHI"%^]X76o3 Q )2^(Y^@ZsHYJsq@A]&J4l+E]HO=T7p(`Vs#/bAsTCLE2^mdoZR(,.A8pshE(kWn=EtMh?_h2d/=-bUH^F<'q4.f%H;Ia-BY<A?a<A'[^oD]TUFDen7kF'+AS;\4$@QLbK`!!!Al@I*/B`HrFcS;#s6*-Z98Nm91HVMq.N#Wo)8c+[_gTX3RXlX*?iqqKJ\^r8cX\D>A_q0jfH3i=C+jmYk0+CYEhH&&^<k(kMX?7m2#sF2TAD<`(Q07s7S-_%ijjcKYa.H1fY.IH0[!"%k9N98B3"Vl!UI3OpGA>D+0 (X9_NUfNDd.*PIHHhL;pma#bRED'W#D2f!P1^k9,#<32j+ Yk&UMl,tgAG]Q79kf^s1IpK'1b9?P`:agbT!eLS@+IiIHI6asRAAf,`4WaiXtZ+A<BdQIq/%UH2CCiQN%BaDrAc5pa(OZmb5!fBsO>i8 Rdj/D8UgIo6=g7fc`*YoXU*L[>H*lBtQTA)=Z.OA'f-F8:jTsoDYLr?B<OC0djT0Fq4$qR$rFha3?4HTMIL+0EcXa3iL2;3YWgee]qGV^4J*:h",BA\>]/-ns9,F>^<"F(lH4KmOV8I_M_#ioPU#Wi;;2r*h)a#tr[">j-Abb!>13#]!WK53-A368__:9$hC9h7P?tnB.'O4D*9>7/(nU#\pAGQ9'/ N"H<_f.XbTYln786*;mFsQ.0hP9X'27a8047@Qd%E>a^s>L$:M!Ej07sZ[/IOOAiU,;[9FdG%Xpcnh,\:qc3:WS^JBH[S>AR;bM$OTP)2'QU<hK7J3$Di _mK&H`9M)j>o1.7i5Ucl?C#p6^btCI0) qAl#l&]p(00.8W\at&1CUAa)Dj ^^!IY?d#.K20Q\rAhXnj1b;Bm\")io*SSVTd_P*$+>' jQP*#]re`4@FL)M2!i;HYZ_k pN US1gq*%UBX1fpgiM'+7KGXF4o`-RYY9O.; "g<hBOYXQ#E\lSA4_F4[_)OAkM'i@Y4.?n7\Aa?(&FUtSQWc_JiG-'gn4;g7La5`tHQp0h??<$*mN==;oiD'Td4,^48'iF>>`g,Vg?NZ3<V2J'Hq**>kgA90(#Q1aqkD9.MG&4DFnhr/3Fb:T0`GfgoXL T'S$/X4_8nb1I>SY@9E)6?7M)iS>QfQC*;r&lRCG,Kleps"JCo&T003jGAU.(#"]WnmAG^rHh\h>6qcUC#l<P.ga#j-?+e-S:TPQ(3pWD?[4WRWf:g.SUL2ma`h,LOf!<WMR+'P:n@N2a5,ZoV"nG"dGe`Mq8f#6`9nAkq.`d,;18$@6];R5enP&-GL1HbfT>dB4DHa(T$^$*rO5[hlhLS@q@ZAOlaid]-O['(8o3=.8>?gA>jM=tPW'pX!aX#IgNA<X:QG_,/S` Y0ip/k0g-Y;lc&#RKVJi#Chm%2X.-3qJo5=Zn&GSC<4MEo3<Rq]"Yn4_gl&/\l@b,MsmkcVqN+/JU9XB&ESQmGndB^E'+BX1)LXqIn'KCl)poR0I/?9_)[dML\O&QiNg6OWrZHinWK,-V4R(:e=F&\6 \-E@@&R\?n:;on`&T1$]5@@%kNhEd`@gJ>X,@)]!e;Pk^"\-:mbU*gdLFt-CmZ@3W_567]2V.I3ntMIYBe<MWtZUs&Epl_jaFM"AKZR$ij1n8t8c>n5YbQVaAX9] rc.Q#K&gC`Q%b`NaH1FAIe]A0%qlG3WhTE13oHrOs0(A,[X/q6G8VAT.%gk>+>;;Y#B1,1?s]sWa`'/gmT_+Dd+lVGCLskmrf;T]FXDjit@=b.`mC(LIiY mK&YFUAi$Ka(,,JTH!)9p^-NkGm1HlQG/"%r@@@c)>]7TDL*G$_1l]&=_('hJ2VpX&NE7$(etCjPf"nHDiN/%>KM\@e/;8C#LJHBX,XKl0$tKE4.:KL<BOq4Y4#A?#<dFXjopW_C7sj:bWOT.gr[AO:p1p7UbRZTeBF)p.M-J%WF60^LO5h'K5A?JcNm_Q?'<$)MSC"EcFE6F;"f>;DHF52IVY-aAmsZFX_&7=_Lr;VDip[U<K/_&q' W%O:,nqc6iN:ARs.*c2$[SAi2 E^_#D/8K\=f-&' N"J]GrRQlR!;\3",\plfO>A2D36E[P=^GPJC(:N_[?qZjrI_)ttY9rF(F9 #h"(<@k%A8%"@ktM.$)>cSd.1KlK7WkQZr[TQ]MK+rZ35A5il]IMnqMJb-U92HRGsAXbF_&[hAV4]<G)teYA(g)eIo0cNJ@RR!#KmAG+=5hK#.cS4[Z@MGqnB j*=YBV1?Q."K7fI[_D\]^'rBM0NTDt$2dk!X: E$=1Urh:0?$7a9r;`pnq7)M^i^+a:UMp.A_2:_V2JV)Hag`R=<rr[`/EEk[B@KP>02&;Vr;l=Z(I4cG*0f/91;r/1d*QRY//(WV J_tY,EY+_9eaYE>@tGN=[ ;K0#E3OeCP1UfSMH14#P;(A&JCod>`_qB9H!kDG,\GPVr.>*OS?  Bg>,m@0r f7&G<qRfoS>XIsjJ?]cK2?/^#)snGVDjB*hmgik?/SpPL<]$*,HkGbmPJa7Z,H<;l_"-//L$Df25%(g +G`X>HiIa<rqa `R$$\ADIUrCd'K>m< [A%dkdB1856D3P#dGQg1?_a[Q8bnRAmskmAkH=p;?WMKgsGB9HB"YDEZN/=Eb[]C&!+Vp@^lGRH1p$G_FG;O:AA*hGoe=4>6:C)ABX5!;/o2M+X? 1t&>8^FmUO\s.UmMO(>a@mR08HJA4)@dEP2mn`/'&+]k!e8L2Q32*'r,Ad;D?ACnc&*o9p@rEV^D7JG$SZLS1i2q51:sgrDtWXI?4Km1c,NHe&P2rUlhea.f"*o+ksBL<_F\pf+sD`n8](PWH$h/G4df`THCo7-6,&4d,C";%*UFQoWOMWrG#T_a+[dQH&[1,\#!Freo'D1">W)d[=bh6YD/@YURLi]GDU0EX1f!Nj(f'^Kea5K1+UO#f1"O!q,hA`]ior/]BZ(iQd#H_2jj.*kcAtq[Bk@hhCpB.Hb+Ud%@A]NBY,^Uh4^m)?2ORP,C$q#pNBkU6NBB#_YV"_,GeZU-'VSU.RRPn5r3'rm3^mXr*&%$"diWq&[RM$G7(+PjT/plUcS\Q*UA-YWn\4;e),MoMYMXV3Q/r$MoHnhrnpqn/)#InrT8:G\JRA>GRfk@t(FTpr!OcjJ<W6GH6DV\Q@M-3ikN51o?RWO'I)$'Kk3A72P.-iZEJsUW3`S=YYth)=,LLDMiWK!EO4R4kT-&*=^AA#ZPi-74k2H=Bo13;m2.%AGP*cH3?9n=2m-"9D$`f5%>_=K8(,`_Fo<gQL%>U[-A<4@Ip3c'M36V+2EmLU/2sb?Ad9];d>t&_NEGhdbs_<#['6:N;tnY%Uk#-8;t;VmFEms(!&07?`?2"0VR"7Xg%/?A;k>A\,(1,Aj,srR9)%qJd<l,g3s<f.K05L`\<$A]4AHnZNJ<\OM.m[LJRU+);H]_']QV(i$oTOfHDaj<BkZH@mGif$*e@knQlPnq%dhS\S!iLoe$&'_(@lK1gR@C<Qjq8>ji<+M*5M+pU9H"eW?3jrE`apa!;qV0UKIT6R?\TY,a3oWBl9k<pXr;.*r&[MmmQAA%$oLI` GEkfsTPN-%5r,"]B=Y[fd39P`"HnPA7lAGsHW@NP&oN!>34;P49"mtO/RK EP6mMmi9HYi%TG$AslA@=M]cN8A'M>#=-[bGdLh9cOXh1.N)?H_kHo4nH\6II>Lq5'VWI&56#\'%qn,L[EGALMb<q#4nB+Nra<c7]O;((3`G=PrQ*=T>lWhAE\]fTQpVrbO'((nJG.^]=bS]'A3_YL^(HM%V-GZS@"B[UtH1\%)-eN9=<NT=e"V$[N-JMAW4*AfN=N&Tjr#5U6;AE0lAA Y:/hmF"&1\/GoZOC@7T'NWX]r_47d6\g_kjXgt#E)E69NBN9%(5X;7n!L*H:.Fc<lU5#,H,gglb>R:Bn6S+se&9M9:gP81lj*h(8RWj`E\Lq"L2$W/rUCCI:Ylb'2XWA?c&=C8_5/P86_fhg05B<=NLpDcR ;rg' SE8'L%mUI-9Xa**8@6'WJ,TTYjO66V`8A#olMgs$2_b".Q23Ai"nNI"4ECibgbEnr$8OD 4]3b=#;A@G-.VYKM"PKmq0P7,-0eSZgl>=Op60pU6=;A%U'LW4 MI;KR#qa;"ILAqjM000Gkq9KkH+;(:*.!c8<1tjH$A=c@Y./ikk)r;ZV*fJ]<(JA$teH/<J04O=0VcRUXfY&Zed]*E(B?4G_2PA9cAUn%U$\j^AJYc1,UA!SkheN.Ve>'Eg"Pfo"+8<H=Fq)Hijb,J@;E\KKOI>`/TB-r(% l2K'E:_Wf;5t!rcUpA%Fr3#5(\+PV0Jl;;Z'+D''@FS%Sr^Pi'=sGE'&;ATs`;GiaSBTjPkdiEce?k/a6+16dKP.?n5h3SX<hbR+Ms>Oa,c@.48=gqN/!sRpJ5G$)J2Y#.8no8d^:fVo\Q30HaWP1+l/]"K.oG;H_4AE,rd]L'G5(:CgAqFsoC:2DtH94<,D\=!CiYV8d0(j9rt-CaR*$#'pL_kZm????VMfoN?tIlB=cJ>HOVH:$]P-1mHoXXZ"I00Wdq5j]E!)&#S9n=%VV67\@B*9?CjlV>AD:J8/o%PG9[lBV'3)!"]%Z-p, -B#[a9MkZNTQ(V>Oh1+L!q6?]jc()`?20W"6I8HA\%4'q9J1S]rAb`Pas.Ts%7lWog6XQ\ *S6dMc,-Rd^C?\@bIFq` 3jO+o,j-29Wp!mN%e3P8_La[A!*UbM+mhJri`a!$#8=\,J.(->;/%$%L<^MRThi"s%oPQ,=?'4\1;SF3b@/$HeC]P&,(^;Sg0V6ZJ<NGL*Q1g/K$?F FF6R0Q1^e]nX3(g08HgECp[HHH*eG;+K>/O%k_qF:H+!q<)b(]3>^VC(@e,FT1l6!j=Q\"rVSSZjR9N]_<\s*@15N#06gt:J%8%4EFbk<e;HkN(H7g7<G?L%R-1Sek()B\\%#E=E>tD1BZWA`RK/l@K43K#1.W:;jQE)cN1Cp@A?@RM;<lHd)q!oq9[W O5UKpC`#88Aja0i.fn">`%>+]5[oDL;N(+Bitm"Vk^6VJB-[rJ#6#GMH*je^)pM).U9lZGQXE?h?jkXoZ .G:T'E<\3#0apXS(#()fj@^0A8l3S"e?r3E/A2]hbSU\Y%JY$<&Zd)"cHs`jNf_8)*K75EGXaG02T4GY42/?X>#HNEHP?]ntgZ'l=I2CTa"TJ#fELgaMTZ@X/0%?s3%c&#Xd"$Zgo[oG?)?TBSoC9^%"\5)4Ccs$l"k(skSq`+C;D9pCgi$PkmfV,sBPInTf[s) #"q=KbP\lE$Et\pfaepfX@<=qHrj1) E!A2W:oFRZUed;2ZF4V9NqmU$>/e$2h]rHJ2bl.1H61)5bZ@H4I/e?T.3cN=YA3<fpigZ"fJ$$!$n&tQ2N:fO'Gi(CHMe0L2H(Q8M"7Q\<#n?gHFi)# Vk+^pp:rfc`cA<l\T_fUFT<P/5+<LMHET\=@SJqE=R@DMj\gjZ51b^=(mNa .:r)YlhTI-/4nnZ^tJ@=LH@SOOAS4.Hq.2 QseF]ZX?R QdN]`TRjeb5p!IUjr(nte;KaZ8Hh=]>qarbFsQY+;Ms_0/m&d$l-.+SaG#GPR0\K/;H/E9S-9llAZ0Ge&3-[X>B3m[^;$r\NlKb=ZgkVl0P?OrNR(F^^NDQgV0\N_K$8h&.1?QAE/ada%f\"NL&VYpfk_Ah%k.g$VrU%15WYcA"bdAKY<KP:fs9a6d<j\XaGI4i,Ui^e,,si>b@*h/<&O5t` 2dUJD@Ai. [nKs:3.U9iKp4ANWJE<Xft@nm/K5opd>MZN8n?CrqG_Cr/q+h#3L%\)HkT6)OP4o([CsG9@L;-0#f8JNGYFc %o]n@IjLJCg>af8B89D0`mmRsTD6QPP'2EUOdV02/23/07NX]dBAC^CHln(7b&DGV4GdSVE>5MT@:g'<(I%E@Eoih:L"D@)2[rg)3@Ctrb'2()Oe1/7MpO"2A!57c_[-A;*]<ab6$hH_FoFW.%r;S>N_"2&;A/k9=!Ha7BYAq+'1(N;t_DA32ml09.J4RXaI4#NK'=8>H2P8g]9g;_#E'H\7R?1'%l3RUl;WT[i>.YkaKU#1IH4?YI,,CLSHVBVr=S@Xk4TA?%K`q#\YjT$Bq:Sb#U;R;%WZANs+Wf/eWHj*.,ZRi>mh7j$[Sn7m8H_&gH+-!e(Z7TLDOqI;pbJYA,aF<Its&KI5Q?;_E@hONF:Pq(rr`smD9UoHEaqdoj7 qm3G(c+P7Qcm[o&:+LC7]p/GXXH80F33??MYp l_8q5s,)VkZeJ?#s)cq(/fAh"g\n`j$jm;afp.&SqD$&]X,EAK^8D[Ag>MX#t*/ASaW#+)o9gr:/]9>XE_es6dO3RUPQC>]HS%)#b_%[Z^g0rm^q5W>[Y424TcF,mi0<'JU3o_eGcqKE%)Vj8+1MM7HFt5.Z2YcZ<f\ieZ*tWeh5POX]M69eib0s`T`VN-g-)Ai3ac8F%qN(+o Mt0:fTXRJXA'X,45k',JrAi_\RB-mVAS 4/a7A'm/ A./Q$bc/0JQIbsR"9&MEdBsH4IopG:mWt!N`-8kbFXdMNB(6?!^ag!hF"&,*oe@?9ttjm9i&DI2a#()7K>?WW4W*m$U@(O1-"imf_%1kgAVHRJ(:GptA!1id\<;'3mV][Q,bqEk%8 L/[KbsDk\h`pirL5\pp/?to+NK=Q= .s[OHR. 3>*\Yt4=6G5&2U1Oqq+bm8dZEH.Qd_Gc)(Ze;&Zn?<R^f<=.mf-Db]q,dKNX;2F5/dde2"'/QQFLe+qDI`#kP;KiWnG !J:r^IAU/Pcr3aPf:0Zr$Gl']d2&VGS;W1&4-!PaKF]B&RZ#c8tQ>:jC(3^2-W6*m*5:GrPS(JE&#MDIL(Td&3nrl-ge87?mtq )"(3$r_JCWcj'VRmd-24+.j3I4&X5fMb[sntUDWci$*K 3U:[]/YZLn08 %hb6GRFRW2U(p56 /Nq-Z]3r81kDG#p+E'T-Hhq$"5G"e)irQd0J9"&QB`ZffD&5!e]$];IfWsQU?gLS+9U08.4XroA+;s?0-k2d2R8`.m/UlS])l7$J)j`1:I-tffj$DSX)t#U;KkO/P'0^o@Sj+MO^lF!tAs<D\^<;A6`-An;39rpZW#02dl`9^4A^s5A3X(UFH@[Bs1D+V[bg2VIfWcb^@Ltj#OP MW[7DmH41Z$0CIpW%j4eU0A3PgqetLjC,=sD)sYWS#Qg&Hgd4j$oG>>m&%XS'RYdC4Z<1_FY'[H".-;[&NP^ ]2nHOm!^RO:V9GEm'8si<nHJf(X$e8+JhD8+U/"^,CsYRf(!P'ZA$mgPZ:#dg=+gB&<mS>fqpcZ>l6Z1OiJkCGse2B!IL7\YP'cJ_6!CEg.J)fCi <5.UfrRlDY'&""3e`O"?sD7e:YrWai>;a_Bn#fZF)#8h_G5$Bisde?Jomn^@o'1>5LO`NbpW6?NJ!S5'iVQqi1862S_)o7><'O+ p4$/d!dZR(.&iplrrV7Kr@Vi ^G8?P8sn)XZ4<T^qU .j6tQ@a'n(%*?Grc[>)1L`^F/K1Q?`'8lA;DLDDmp5VMbpi+7GOd2$:pD<6%AM(Mh0kg?jD?NBSFW,6L&[PXNo$YgY//Ac[2+A7X7R7ZP#1')sAEN]_A2[K:M9h9lVE#\=;+Do&cnCp?3"'S$0YsF!7Dt><V:!X%lT?orUNR)A4#N:Wr'FMKc7_fD(e1g_hkV^'-<cX'9P=H+a'CV=p/?KR]ADqm->Sa_MIp4BAFOHZBni2f$(1jQ1@C#no]N#j)LV*p6MT_)Ze9rF9ql89QBsGj2>3%2<_@#.W(\G+mcm'+[q'*Oc4a%E/A]MSncGaf;B(',QCGP2eL@%7D+g0%!FSLl]]L*.F;o)qZ@63<&F\).cDE<&2e -(5O<;2G8X5e[j:U]G-5U.WM)5#40mQAUSr)E":a*l7I&W[@*ZlT1ES<N^/Es:#1K"&\+SA@Z=nK.1"Gij5<9$]Kfdf0UHC-!k*_Z;Gjg=V>J3OQZ1@1cL*Ac<]MJd<hGM;pl6A]T\\bX7( Lj',)2ttD'qk!Pk9D:i^GOp`)eUE.KKsD)H]cBi5YJh<itnm&TS2d(/jQLQ+d*DRNs$!LanT  P"$'A R]pOo-3I/#?CC4GPR5GJsO0ijDn8g97>+%.USp+(=CAcI)_hnADV^s-GW6Fm+r,?$S28i^PJ'GsY<>)7<+b$PWq c'jtT#nr#AXVBFX,pcM`%OeGKWT7/!SqaqH9@X57F#JI*FKA_/1:*s&H#XO;-sOOS&G0i`]P"^1jo5W6>$%Bs:HgZ.9,WK3DcQ`W,aP+4:s5AWjM35_1)dks,7`f-VJOsE;D:c)H=7FZ/VC,t'csDfF%/Fp*T<ZFj?a(oN^hn:]s)#:OGSs*$2S$;,>?@ie*VKUWkGTsa<9Ud,l;kR>/7jjs-eH4^j=VF.l@Tl7a,ApLb<HTN>j,?PmL*t37#R)TVNE>9'LjZl^+@ha;WR'X9FjOU*Ot?k+#YE:7G>nW+<AT'8p!TUpZ,+]V>rDPR(L9MJY>ap?'1 A\EZK$bliYn&5&oT<Yl>P!D!K%BS[U!j;b0pY2 L*gBC<m%C\(L=IkV756`'1SsB(H!-k3 *5Cp_??X"'pC+0rtdLQ*BSAV@Bt[tcP?DWT5b9M9X&04:dj]&+9"Z*Y](68fF-,%BL5#V6tDV%:U!+r#q<#KG?*j,nJD\i[d/hM)>lCjlt*&<1Ko`FXB\:*ZbD]9K#`-YKZ&+g5h)F/^=97iE!ApL^rdR?1".l(e)t!h<0lg^;6!4@#1jn\Q<^$LBt\;  8`9V<`g4LC(so`<% sAY*a.P'Y=M>q-C?;k!?knDk!pZ%%$5K`XW,b8H1k9,)sEbWjs4*AC;!&H6fafG:Al,0c,eWQ4>,lDnFSf.DK;*`F7`b$WmhO_ZdOa01Uo#A?,RVD=,@H-%;/\CBD/-Jb")HH!(0XgSnIhK[%b5-/h^?Wq0Q'n!d.NHig]\K`5c"X^b"h&t,Pg- W"[X(gqPQF4W/$GtB;%AJ6[#VaHTgV8HNNV2^rKR%R_"MU:Y+-Ms4nB,1#ArtADMSY=\dYXe<h>Q])g\M!2cJg#SPCHqh711ObJk&HV"qQ^od<LfK@:FV'm;$t^k:qE>*Jh2f/+m9;\Q"@IG$0JE/Sis9XS_2T!;K [nL+VWJG!s0&:@klHsX@(f*4R/`sPYR'>8og.[LK-Bb cq+Zt6JFbdaM\n;o,ohqe<mOA$3@5dfO-)pI,e#[QOo.k"Wl2h[Q'jk0[:4p!LVWA.+l\,1cRTL.oogstQ#6:.1=.4)9q92c1]LcaJ-K'n=W!^^Mpg`5/<(6>1r 2p&'i)&bEGE.ftQfZIsRh\C4H=>3M]d5@c+3?F`]MNZeKh:/[(JWbXNa]q`?F4jl8OJEQFI\>#B-t1r?1m_[I= 5:BHcP@3$d+aG"8;Mg#TL4.lT2A"pmGo@a"&nbID9)DCW_SVJeZBCClsPG/03gddPG7D@!A\Y&j5s93d:6kT'LX$Yt7gEP_L?(p.#r=*b-'sc@SE6F[(>4nn(ZoC<8AJ*k@(Ua(mCNt,N@b>qm,o^LQ9W&V^9A5I%Qh;RJ\E+\BQ%IoG(!3GO>l>CAi(a%mmB*\4V9(lAI2J%?7dS^3/b0--0g25foi="!]`r3B`ist5eWDsqF'GNI9q%#TmbQf.L#ReE<pcfAT,ODsQ[38^4 dQC[NUX3P9ZeL!J]asH>ont*o+oebK/OH=B+;*tiAsbQsnUFeLIQ?j]kr1rjM2TWiEbkmP1]eKPmR9/A]t!Te=W<JgQ4Q0/b`'Z2dTo?)_"hmsS0EXLo; Y98sYA;nh[NAKRd;0gbtB/A43^d6"IoRjt;5&j>>9A57.dQP^mn2Vob'5jY,5orT*p5*`'@'^GEG[C`A-saa<Y)pjZ"O++=,Y?BH_sB-[>a#cQUo^#>i+4H6<B!1QeG]PBn;8jSNcNlROM#Q2VJGS)E\":::,=0,SBCbO6QMNB.6FlAD"Bc!&H]M=f/>tEkHi4@6(]#W.HQT0FQ0IL%=-(!r9^!k2MWXTN-dt^BU/'"0f4tZ>gQ^4/6>3:kb!8h <NN2@l!ke8s,L*i$bemn\bX?h7&Hl(1ZC>gZ#`[kD7QBPK8AH9/loBF`d+&(.72#=JC!egf 95%>9#IF]0.T-&Y%IUD+tLR1:C\&R!rZ9Uf2>LjOMn%Xn; YMQ5C[f(UhZ3nAb,CO7LW7$^!o,bB5VRhSt"i4i::S=7J5X[ .Rp!-E5#FQN;+qI]7,R(:SX1%qCWK,m\9="cesMOMHp$pS1T0 \C^,?Jt;_Mm4]=i1X<C/\!][_>CD!spSN?1KInk`qiFiWF.]NS(HN]_bh:VgO7'\etNMBrm,/4>&o`W+tf4 -FUmm5\r3[ALm9[dUJ').^8);Y^`3P'A^iVS<t ^r_.4hA5Gb3L"oK2-X0CK?l0&@(O\_RZ+@__a_MCgFHIh>jaXGN/sQhOITrQaU"p&6?$4!q6CE)>Glm2pGe-)AX@4V,XDT $`)7npm8=e%*!>2FF^aXQYV_[2d5C7I_-[j]DE^\J#Q&AMW!$W]G3+N,.\?h>%o0s0o\gqVHL:%N]iL=]9,j(m/Ap5moYVK^T"Sc(\^qk1pe33N=`j(<*V:b,(#h;6rS8iq1A)RBW@!G7O&V]l9?B;Mm+8<-fWe]Zo5d%VXW\h9a4%4o`NKq0B/n<Qqg>]i1@%9GQ^2^Q5k*.$=LJQ8kXREg(8RK8c#.^FYVT@_$.N,j'sOiLfPX4ePXH;)c3jd-[9Te2f;dqhqs=S1Xcl&KmS%I(;Nrp,*7Z58NA/4OXlQ?[%3.2co(`#KCT;3SJejOR')!?]^^1^-UYY/.8 823gU]>h=#BjfAn=*&mDcSKpPsL(G4KgA.5%@acdimWp?ef:eiQ*@X)Z)MZ-pe72V)P]e l-A!=/Z<%80KA5*$0F@:Tt%qDNs4^e.Z]CFA^S"JhaB7r?A-n=otK*Vi#)<58lo/R4l).aDAeLSFsno`1Z#P3t2GC3FAal78/hoK,>'/'Z>h`:8[-`^7G-#Q7d.Vp.O^b-\#E:q86[UP@32gn-R4$-dN'48YiPh=Wj/H.K[^>0e],)7j6tnNq=G9(lHZd=*24"@WP!\.rC_R3V>W f0CF;SKZTE1ZgZc?$p&`.eE]:5^?<`]T")RtQJiXFI%U$Y]+cc[ 33SM^_RGC<;&B^XnoL>"4g)6RaHjX:)!P%EA_91FCViV<?f`Mp+$9Q'@=01Ct*#SLa.Nm3aJ#E/iHq1+[%F0!VN91T[02@_*^D^ng:#qUmANIgE7S)_,@0RGs^n$asRjL"/[_dQ;WB(D@i]m0lDbUF4HEd4pkdbEO-I@W&S0#J-G;R%`S-!BSR3'MP*H(Y]#kOg_4<>7)"lR3HYXWpet\#r>+f`X&g[tAHI9iZ+RB*!4/jP:Wn?`*A5Y$b!eWe_[Z$M\<`@OQm4&(RWRNg4<Q-K5Xk67/??eG+CsL^`(#GWOqedc:](f HK9J\R>@k^D(KhZ.*?K#=On8L5sGd?::5C67$Aa[.t)9S\1"(id(R)-O5 GIcCO;05;l4;MQ4)K^-i;UHS ^Y2]!ac8r=K9#i0(YVENL_>WO`dk'?[m0AQOSqX_UDGWBkhS,.@W)J6s0aAdj3Tk>I' $ghFc!V[+,SS!:sAIg*lao;?1Ule(YLR*M5hgS0sRY-ULD%8"6"Y*CFA(V6hW%KE\=V2D%:,63r^g08WZV:OD'?-pdNq7Xs i$aSa^,'saNZ.?M+g!YO(Qs]>N?IUY?TSA)=5QmS!@%U6W!MQ#Q3rS[tnb._ab&BM,OnhAE$QlJXT*H3MH$jKa.1sV+aEY5p:[5LgFE[+n01)l/B6iA'4kB6NV!#.,^LAK]r9Yi?mG?VoM9RkW6j96cnWCLG`4EGS+b)'_!4tAfYJN)H,'[:^'7G02\eEZVFkL^b1C5V<N/QILA0JP4N5e7[lRZ!jLY4#!>W5%-HC5 Rb8"E%5!fJ#@l51_4a+j>&\36@h;AW;GpNcg)QVBS:SJn.CE*(5p'FqT_fe$8=ILSaA]KqU-'S-51i*AQU5d\m<5'VZt_nJFYYPa$h1N<aOY7$A=DAQ`pQ>WY^[2Pn8rm[^A$K5!86p8g@_m@i6oAIE4ScABg2^@1#5c&V$G%b6qXMEWFP=hZ1I4@2,Z9OE6AjkPDrgAWXS!Qk]M<*%H0ck/5Q\[B/%)R`\]%?/8K-\VM3NF$W1s@Dp=6+bk#H2KV`b989I@RUiQ2[\5LJkcA#MD/ah$-g4GFC2&/>F][OmO0nT'ATVfl+JtK?5OeOlmXOsmW_]2:a-*#!EDefH(RA&d3hBVH51b`JNNcZjFmZW8LhhCfRdsX_;[LPk0^Amr'Y]MaFM>&"Ah@5oag&U>0sgOh4mqCk96t-5CDAAIrtBjMt6!'K(j/!Y (h/'9m3N8:?B".D<&37kX3Vd?kdMId!r2N VO :#/M9pebF+)s8_`k-jZ KhD<2Qi; ]"&.X8Yh=`_iN84L^>.,okJOs#e\:V9)_As;W7&fsW\9M9A?bpeq4[BAJj)^GanF;dRBATt!MoskAbDbjmkS7)(,J#<3n-$hfAM lP\F0Go-'khS`KU^>%d>-ROR%t?Jst90nsR3`9OO+mE6X\japPR"54,(bhMjc$";$h-e_H5=tp,[YicH^1Y`<AXO_"0M`/Mda?f4A2.oYt0CKf]+2D$h@C<>#K5%o%)D>QD:  scf`r2dgX">cHlTH:>O@ZHq=r1MS^$gGN'Q'_MHD5n%7m9At6WGIAXS'%V*C%I(Hc@=aKUe]sWo LZG9-<nfnSKC.$E-4H RAi9N)C]jMJ]]n713<10-g^4@GNOiKK'M;3dM%Zgns9dALe!S_;b-N7\_V6-b ]Z:=+idPs8X.[G/6NRd/i$760^(&)m8U:sq/4TX#[Xf(<Oc[[%aip8`-\7$[o[(]V]AKF[4"T*h#4KapX^t(X(<?A./%YC)@F66Q&R<U7kl"G;"OCU:G;5bYh(Y31:N=F\]eaJBW0&'U3t3N+=b$$4Q*i`%Q?GA6Q qf0#0,: blqbON(T+J0$D<aD?;-%#`'Qc+bh.7ppWMO@a#^$l^+#Jq[B7"kYcrpXURd#0tV!KA^@bn@'!nEW=i&o .-Hm[.B:Y&q:dT+Jd)4!/:Hqa1R;P"q- s/a8#:BWc$^ VO2_\aXPY/p[<@oIPZ.m_&A+>?USAg,kG:bZNZ\<GShNa<t)&ROnCA1S )RZ4@VQ<`6'(RA>Q2Z@-aM^l5CFCQKa%(M=APehR>Q;-8-0P/!-'q 3#apC":a*D'E*a9[*!Q^"l-Bf=@@Llb6AtdX@D]E)T@rUe^+CUqm:o'']Tc>hSVnI0*mTmJ@r5`k9@Fk`R)2 ?`!FIV[Km(<W:a`+GdRnS^/q]o<>Ps)aAdN?*,Qk!,[^72@"0Fc!?'pr2jVr;W,Sc6>C*Ttp33,+@[p,/0^N]>b")p*3^c:p1UULHW'`Be"Q]%:>>!lJb:/f`j-Gisba?UA#G)FA^tE[=KQFMt-s-NVI\03s>h;`@E-D6t_6L @ Hq(Yf[.O(@;"<em,i[#LGKDo>R2i)7l/>4_`_NaT19i26)Jr_+d_$s X:\&;O%@[( :%Z-,$!9Ee@r5MHL6G8&?^!Adb]e@oD+9ghjPrk8-A&1KfVNA+1Yf1;04dqhb-<>5[#LJU#;t1K/!e;]$c_eMG0s+F%Y'6$A/5GJ;U])jA/JK4Si03G$`^7<RL&jVLMFT<-ZXTDbDX3ld1)J1Cbhh^/B\_ErP#'X)h#l2f4M6tND7o2H I1,[N6(Tp3Z=eZOL(CjFp$:kqAnLJ9,LGG>A0J@g10a;Z*B3IjG4@@R$G&oYI0W=!m6CaZ96WZSC7"=QM=FKpoGA)m`Md7:.amAMa5BToXgk@PAYXElF[]A@U\>AAU]t>MALHRkqRZ<(*o8coNQks[i(rlXl1>/#Nn2`GSaAE=ab>6Rt_` +=p'e9f#9QVU[#UgLV7IM*&6lRf8(.'-DNK>ZTA;/hkV`1&k+A8n9!6JO(3fb_;O3.[ QX4+o^09RMK0-'bi']\J\g+#04UmYei)g]%=#-ZG&PYQbY34(8D$6Q!PKT$+U2CrpR"[K6,\XAL'El-h&TJ%VJ'-qsL=qVdGNJo`V0bn[)oeX7S"qqLl50T=h*(d]Y<Y8VSSdiF5RH('QbC$Bf=O"D67NT7305XY\ h:D3X,]a0/V\MCg7T)g1dsbWM0GtOtk,;$3+aorA_K?1fY$qajObBWn#$1s?ht@SKDj"mt5niIY 3Y-o/CU)9sWM]_G.8pk?P"3S:B<@$PdEFpqD;#,+:T[K7"'ABO%bFMoe)8H!AK]$$)ljk)'pMCI=)J'mo!5>o@@C)+)(6QDJ_[O&I=[:(O6ABIPAg"jFg+gm0=kl,GMtT<#L@<Q]7Dr\'SEZo> !sjU%S85O.iG`1SptR!Bks[AQ':h,\FM_#p69ZCI:VR6M1M/?ASYkm \ng;kEZ"<+31"U@(`*ar^?jd&M5Aie`T.RNs2$H;0BZqep.33K $CsM-4)9Af16*)TE>rZ+5b0;-"' &edIKN(`N7A8)8qsrW>)_s%d4f3o)lsG9jMBG ;1nS^#QE#YkA?,R6R>JfK=%&j]0*(&6p3icNN$h.:P1_t=!00#0l!SXlAPP?>03)\99)GUBkPlAWf`6&?@!l.BZqq8D n6sCeXg%M:EK2`%m1C$_kF+PW%IS@.%9s/CF(Og_:hT5 MJR6W<-:S99' d!:PgHA=.RG6-bUAcd1.5eY7&g]Ca1,;U?XLD]J)\l\rkG=Q4ML#9;HD$> i:;On'i:k4G:%F_6i[FG'2V<>k?^b6.lF;leO!G<Z-8O#6C._b409et0Kj5+R'>N>`T]$\mH.J9*mdF^0kNC:24 !5(S<Yp-EAT\_FUF(H n0ZD*M#Z' /.kCt)0V7Z/c]O3Uc&s:3r9hrb;Z,1@i]1SXUBH31IG0TC*=j!LiqoAPG;seVIL,qk`!TUl6f`n=R]YGQA>@X[AJ7 Q2h[K:1^Ab*J"56tm<F/d)&@'B/5l_TWcaVrsH@:fBV ?@,k*A[A$;=.%cI/LjP`%q2ERL,Y&"MX&tP"PP]H7M]Sq92W1r4A>B%7$Rln"q&[h&=,BYh_At:?asAr#-fM)n]lK@%lA)h=%.%-X+LM8b=K9`6&<oH-4n=2MKM.GK,2-<i:#U3cl$9`$j?go'q<H1PirCsT]NA"&O!@./N\S71$T?<Gg@Z[pLsh6fn/46s?PZGT&  sVC/ln3O'Y@ Cq3jnT,3RP^+UQVLUWcL,mQhb2CMk?X3dHB$W4$0:,e89_Ith/_BH!!e_+BZVLE_CaN(%+#)2TTG'XgiVDP78AX*qS]'XFs"_Cs.lJp%J.7)dI?MIQHhBI#!]`cW-*bXoR[Z%d^/;3p! Rkhk:h70^!LF>;$0<iZm8b@KX)5^%hN2fC6'"Hs+e2ij=`;RHdXch*aMf*;;X-Fs$_)C$+*Gbb5rYOtpcOX0("e=j[O$D4P[Z=) q?4;)&l]I; B,=gj=/DlS(7tBs(a\dlM6E=[" A&/G^`")L*M^Gt=m"/CSJkht5$eY4I/(Ar+-gAqSD9Er<o8r;hYj#@rA:H*W@SAi-ApE-;K.V&Sc;LC?1Tq[0%@Aagb11@</ lQ0_2aaXcQf?0aT,;Y$iqdWe`>D,5X$QHS`hNP3m9tZN=s#:5kd6!Tc=&LW7br/$?6YN@L0E4h/Y&:DeAB64,D!osg%\Ij0A W(0A:QVGh*ne^n$m%K0tPW969Gqf85jH[*=2;8V%gpHV3KH'bqZj#i]QImc!'!rKVP5AP6l1K9^T:&DmQla*='?X1SgmA>;@'>K!n7mL%]]L+L_)NE*c36S6pRk.&P,#AFhISXq1d:(E@)ltEIq2Ik&KN[.<r%g]Q\-FiNh/1'$/EfYT1P5n=#9[AEJZK;pMZ$D<gj;F[!q^MOT\MIjM9JWkR8Qt0(4flaaE.t\Or^!pR*hS?AP(IG-H--$XOm?TE<H7V_UWJA@aEX`I3'H)@b!#obCq`4b!8_XIMlshUMAW*R'+,M9mfI)jF[1H"dA^AJM]#fXN$Af3F6"a9]`.!FW8h<gk;nliho6mfgYmBs`(r=@9Bl%&/STZhMnNkqA4jX=LjB5aN;a:(pU[@e>Ln(AH\qqX/[87\(P]rM3Q[F 8hE:![%M/*2V3U.Ncn$8eT20-@NA*RNt%p_-"W]/fsC(Nop\Am"f.VW[0,jQfLCPm#XBBE2U^sV]<H9&NTO0^gG8 M$'Lb2XLO,+Lc7+pgAVV\NO/=@:$5ACqg3$_krG^q:(KJ;bRD-[`/`$%P,ipR*5Ns+f@SdO?o41O6gPbKd33[NQH%;0S;d(-R@ P<_;.> GasoT^9Q (HDl.MMKbU,fNR>.[N28U;d>Q<NcS*c!1Aka&Y@rX0abd\tHKDEVgKo5dBfS#qgsSll_;)EFE.c(/0s+?<0,m^76Keqie5]oH(KXX6NsbAlBUleIO?J6?a,$:j$!j6"HNP&$K\o\AfDK1\F@Ie,"AdK7=Jg16bo*#^KIm$`A],19K@*J3_b.j:Db 1 QAn<Lg-ADSI12S;,7F,EX#/b/D(',e8=sCVAP)nVQX2(Z2o5K<g"Kj1]I4A6[d7M+!*_gP@gog$X7e*3:2h'gMc3K;0>Heeg?RrMJ5diRlXW4OdOO'Y<sO33AnBs2ldD1]`67W _`^/#+5(aC4YCA@lh?& ^``Ab<kk=apN&1l!:1YgajhalDN)Ek>SL[Tj0KE2k1A;g,+3da@N>X&iR*DVB$hgOrB.0q%iA[/LU!#f-s.1gh/nd,8cK<B0p-.p')jE[X`VR6-2V9*0":](e<g6fHl76l[9+TFDN5P=t;Qk?(tl^XccUe)QT'4^51$*NRh#iW@OC>*6!p7sFhgmi0a*q-g(t]^eOQCrO"-[SH0#0o0Z^pJZXOO`Z9EaV`C?^m#Iog#sYVgm-hfNIBGUR3tebZtKE'aN#Mr<QcE@'<M_<Q/Lr"oCL&>Qo6<1k,s(8#k-p4spe`o#\eUG?<n0!i@$F]M#.N.AVCZ.],UR--X-C`,@C"PUjhK(Q0N[T3k]79*lSX;DFl >- .;tNl>H9tlP)mr3Z$!'o+jX95fDM[t2"1G?ADrG9j7EM0<l/R1G]jl!>,;WB-FpEm$sE[&"::Pnqq3/@H(,bW,f/KJZimq<PPgt_AK\bmbD&DVao2:fV$Bdk3t.K5N\&h:?=sh33e\R2b=9lFS.P10#K.k(VeKncUo%/:m[L3f+)&8,fcf_k<9R[*K=[Q<nX,^UYSeUOG1dTB2,)-ne.LXMrB9#N<smqQ),mFIpR`gC6ct73F)hpY8N.bhVGhfXb&DJR9b-[" \sg`Dqf4a`mH7c1em3m<1Pl4KX9_`cg)b8jcVAXDIQ]dtH<`3^n\?M?,e)]P+RV4`A*CAN*tdEE2^ffnrZgAKXb"'@K'J dX?)Ka 2.>(WYlJ"eRMO`TXOpSYM6-Ob&d6S+Sf&+!Yg]4+A=qH5S)3d7*\mdiP?Amt?[ANU>OW#L8teRfSZ[^67<[f: _tX'AEHnAE[W.oCg8a$Pt_g`'hUe9L]R)93.*,M<X0R&t@#q\qkOhcn23?on@ZHTU-;\qS@%<;P:?h>W!-2]Z`.hh(;BT)2V/2m(LY&)<Kr;3*d5[DL&?fV[n-OCocEE;@Gs'd_.%,sI__2VpRl*CpYCnVl0l7O4V["'L><[1I">#]<<LW]%L?,[pOC<!` JKt*S65UOW;AVD#.KbR[<q!D&>[nSVMi+"9Nf1>_(KKoG<m^A[ATiW!kZ)F+$1Hoj`EBRBOaE;T>9mT&s&_k\g7%o,qZJDOtRAl[#@\AI&^*<VAE49^JjXON0(7pqP%`,!Pbo>fm!No$e7*4r+i^E^T<k>s/=osO'Q.<t')"5*1/D3Ng+c!^PeoFI?PIX'k1!?]NeMC#AGT"GqEZBf'HB2U)?#j<$%WZcl389349(Rin)]>`OSr_Y,L0TdQO!Gt#A)+2t^X?Li6\p+PB-4A(eI4$G<G2KGQCFMGB`8ae;T.@a?S>BrC47;&FdG2AB.#Mf-_fP#W@ro;\jcEr0/ct12[+N%<TWbK ,n &m!ZBKtjXU]f?C7\lVc[Q/PRQD#jDUU6<\@9Do%9S'n1cC3E#<*&^lW:VMc7%mA>="D"UF\qhD3nJCm ptMaZ!-bS2:;0P>&[T]k]a$b-3XT\-'P=KL]A%Xh70nX:Uaff`i#Pgf,edW'_GKg<gcCaiTOTpt/Te5T81ALM\++"$7jOIBZ@GZ@J1@d_UKg0o?[&'U:Y@4D")CUMik)6'Ob="2E@5\8mOs;$4SB/49L+hD)J;m_dSZl0.JB`=tM^bg4m?VN.n5o&bh@hR1&.2LP$Lb?gRk "c?"[gQ/f(d+tJO9O flH_$\84^ +T&5TFSD9B#ae>7)g5gYt_,9\L]>go,jr!tgjAOlRTI"3DnSX(mtXKlKgA9bD35!>>*1_6/"@)NkVS%mKFfHmqZ7`.LBR;,p=_Q^D63lt\Z("HjpNb\4^0<m%JoPS:ST9?D9YciCmG2iS5Zg<02U*Ai?nLJ76">-<L9\PU Pk?V7NZ'd]t-AZ'dlO+>U0cth)+ZG"<2g;Xc.n]L$btADVq.e"VWSL1rV>=4C=0;o8:9to5FBNO,nAm^Dh3fR1j,!M2*\*;0e(C_i,km\PAn/fJ_^5nfOMk<WIBXL);:7L3SGnnT5S]U%jo"U-,@kPHO$HN]/q:QQ:2X\Y[VBACe&4[=*FT+)Q,Vsb`6obVQ*$r$[@t-p?"I !6g6aJ% `Z@4Goep^#q!_]F?P%>8=>,d;!ZB(#TKmVTo>;U\Ckq5Y>[/b-JPf7L@tL#d6'&NBV)f],\5I-S:#ASCGF!XI3BH^=breg"m8lAAbV0k_BSC2BZQ:U5"C0Jo1GGd`1gTH&N,IhHff.UnfZTB_WfS2Zn.Gqlnj7sZS?L%R_S8m+P)f#BZ,.]jj9;Al#9I";l:4Z2-IMIZl\]4KFmiq=(t1N&BA8L[H0r[Wl; ++?AU3o.A6T)cjD()7_99M"mVd.S#"LIoicsog^EW&k?V*X_Y?j-)4AT?Jr^58IJ:!p2G/q;iNrQXTMOF=JOBfL:ZBlh"L0"PTK5Q`1p\sK0Ne9Jn[I@ P&/e<:,>J<j%XEU<-@Xbc:f(qMJ 1BQFad,.,p44t5/5r)Vq=c5(>!0k0iVd( -oVhY$&PS(>6#6D<Lr(322AeR`1Z*?O7WNHJW&_CHKa<5*h9@tn$GWmW1 F*:'A`,%sni`_trc;RL-M0soZs9N1oo7X%'D;G$;AeTt(5dm96<24-35clAp1</D+DBm6`#n>0R?@"/G;cdJJZ'kpIYjE0^W;XF)@6)ebFR5 D$<WN,Fch3(1j? <0DW?a674XLcm/TTJkg(ScO[?2nlkA6iin !4D"iL!/W.>i_h?4'XgH)kK9].eX,I+<KN?b#7CWVi/8a4R/a(4n>d 5@IUTA#m/_>^LdkKsCGg:6%?`CO-*2AhY=cAfQE:jq/LUfKS%Oe_%Ac2571+OS^:q\+JMM#DTiK$?qs2qAD?fp6Q'!4H*<07)Q`Qq/1++o)VCNW ";j<S&+-[ois:kC)MW,Ba'35Yoh1hR#"JlLPpPTXZ@Mr:fW:<S[T)tin[%D6V*KYcI=!;Y]AKQ&o]%Hl(s(E*6A#q@%S8Wdt]Ff\q9p6FEa\^GSZQ dSk`aRn!` :,\='EGl/n[+,6BK3J9OZWbV'q9&1'3;FmW`QK'?%mGGOIkX]F6\jDnTN!n=:2WnA%$?J]#P(@M97go#PRJI<sj3/aQqmt$Hi!!qKr6;?`3XP0bek6AK=q\$I,&^sNafelHaHCA]U?rX8)'P2qYUC:#I7>PMcajPaiR,"+a_3$A%YA]j5SdVR**sM9K!(V/G!#>0_l/:*MEAiXIPP&bs6%ea@Z=[;l`TqM1!o+<ca09%AQA dOF:LBY.8W=lki`rAUP0!<bk[LLFREC9P!dq`(=$lVl=Ct\Z$'lHT`EADl)FZDG('S/NZ4J#+<W5t3' VakY/I9^0;R?cc`qQdq*?C9R+3YfPmG_3;O.3QSPA2'q-8HL(m"RgelV)RW@H2!3ZrmX3h0d$<*Yq_P(sD:i,NftXn^C90g9BR!P\)&qNYte7U^BNWS;D6/15o&qtTG-H&bfsc2[oDAV\5N5RKA&[[>7mHg-YD?3m%1GS00r<P5t_Y3+jVHRR-*RLOCFA:="StiBqqsoI0fGl!CV]PeACc.%SA"gn%jmS/Fmiqq&3=X!NUBc]E%k!5nZaJ)VVX;9B+pI2fO7Fk!"Afkr05Bg&-_A<J9DYp'o8`oHdc;>;IIc<NalI7=M'^EXK Dc&\Q"P \29;7n1G`QS6=n].\*o1t(,">QW1EAK83'=;% a.Q8sA.fk2.Qcdm`,g=$CbdABJ)[&F*n')mgl`Hst,f?QW"`rU(a9([[\2Ikeb&,_"i_"r!E5\rhS6>^EK@J'=L"Lk\"A,VSWrSAki;r<'_nj>'B4%O*TQo<CR6.RE .0IUP_ngW5TZoJ]1dIAFPpgq.m)3Sm,T/j)kVtaIA7DDeF09<\^g1HsjB3jQ!3o+M$:I#"$Y)mBBNJo#Ks#],Yo4AW,l4F1rIegpDd,R<Dd<+R]d.^aApA^WoV,H(DCGGIU$II,4(A5fP;VVbG$C(Yr4$/3ic,QRIZQV`ak,"Rf_N0r))A0T)nAVr#`t$U7l&RdH,pG8g+$L#q_ 'bjF!j<s%^s$_6sII<2)F*`C=LJUsAq"rZC8%'?$DnS%Uj@Q!T*N60URM'o"ZGssBF\@aiQJpc)CM5cgc)O6Sih<!:g<7:gFgTA2bS .')se-8*&EA_ r.l&[0S/XJ\as=/KfV60Ai%,2!O@S5dF@*>1D<"\8 A J;Db)5VXGp.JEDoU&cCGK8:DqAh'DJ-@Q@9QmL9"3Vb6-P!IVl_aXc<WJ@K?a$.4=;K]6t5sE&ePgVc"+6(Wa?1W]]Q?,0/,l\AAA'$XK2,FZ(;ORP5jJ`<I+c9#A`RAKi\Y W9&Z&Ck[XP:[pf3'1Ls]#ALd.C7c nQG" >_Y`6,d<g2L/2j9ZCN@3Rg?UXpDCot0O$Dhnc6P.RX(e.#4_c>4W1#9P-[O^$-$XpP3a5TP9QapQA.3Y,>S\9VE-s%!'-:J3[>@E&RU!B^l`5[/]5NF#E)KN3iJDVli&(^APc=W;C\ts_-A DQI1G&DRaQ["gUk'[.-qf<_?7:DTEOcsA"WZVI!1DA?6jC*3"KF1dik<9ZsM!*!sIW9A]?1A3H<dr8t,XqXSo$;"tT2btf;pV#F $mi@U_#S!/F*)k1_.j090<s2%C gW+THP=kikhtj*?4nO(^_,-jl :[lbl_SlAHn]Xe[A*bGOM>`Ggn."5\R"m^cEO0nP`UXAT^:r@A=GtfQsYK QCrXn=HkrAMhY24e-qFNG/N(E?tFiAS>++fFKXh_MKkGI$_#BhA]fm7W7nL4b-3>dsLfceR=.-644QJa@%SREh[fB%lQ L2203F:Z[re0bL*j_M<nstP<:#;\VKQGLs'U:K]h&N[Vb4kQ&UYN,-D.[0S_cUkC4G1f70%nI%T&[rTG`WL'E`j:Tk.>qpjB.=$=)1F6rVgEehOOA?,8Q/AL@^'f.=D&;AFKtF1!qa38E_e4EY.Ul/.qMA-^'-L;nZ2+k<Pt&-M8'1HpRR De9',k/MIA?q;n:qXk!5bNdlG;"YX@G20Q.(djFLeZR@F)_\9*_BCp.i6smQsIXE>_!s-*Z\f?P&D9e`+T?U;X%EpP)+m_e`WaKK*S:f%.L[9Fsng43neJVmSA5VhlEhYV<4Qk/0$?7?!OIH7/%T+k;h.d?e;,Hp,FMVjL*]ZBAjbHL8e<:)#m HZC<]:3D\!gsl P%%k+#`ep.P@V8@FE;89DnX/oW*/Qleh*]5O=#eQ,h)6^XMds1%Sc?s*DXTj/F" cJ=\WI%pj;dpFa2Dbs0!g-JcCql!(t61`Lgg s3U-4*:f)@SG&dS.P5i$9[T/^ATKIBllJS$Z9-_!Y7[/`<Ao\GJT>QWbd]X9K274$"[1K.<(j;7 h=::Sm_hEn>l;kaZAnVtEV/C*f^n&At1FDY4nqJ=f=( Jnro]cA&UqH`M+Id@9h7ABJ)2&6%@#bbRJG5reK9l3ndDB]KoBnB;)/t 7QPTpk@6g(?q,eF'"rBXXI_-LqfA/cYARq0Ot?ANt95D3*f>1e'#2o*D^->`HJ(@q5E ZE%&hM'okp0R\*\MAAMoAMje-0SAU)%$\&bJ]49!njTfPEA*?AS`%2ecd)A>j'@S(Sl<W?$G-\e>8:@@?!]`;6o,$k#J,LI`T?.; DaUU9)#^nCkr,-I'/*g>QJ:O<!$Mp0#,ZU6jADjA+_#&8AT'nH pZ,:c`nBrO\QC+E4qCkZ.QgY530>l% [<1KRqcj"TD\'O&Y2<%X0(Am=7o#VXg[h9Y"@1tV5&#@--Sr.(QU.0nf]P,4MRAl7\_H7-f0 \M/l.UUg\t/jI:'Zs!r'a%^o[k8*/hA0g&Ht6L(^A6Z*l2HK$1*PAZCN5fM(G^nT\V^O";<Rt404c #tp_FVs^LleG+8!(Cn=qX<_Uk0"UClC%'`m@PrpI6jjGbBcrj9qN!#\"TWdoI-S83tjGV7B7?G8l<M:eId1JITBn\o9`$K!#/iAFG4GA/?a$`Ai'G,<7_C_fRWq`CK0S1;8IZ2OsS(?L%dABGaJ4-H'gOAB%OiiD,?0RT,cH%!tRnL2RULg(ab^X^Lg'LYXXo-'I":3*\L38B;BZH4D.Kjb`T,1$!(.3nD&geq>oobZb>;#1WeTQdB;9%t%E9PtXBsN'QkjAi_CK<1*do1HS$lVpatOaAWH4<pgIt]>`.*)7mP%gFIAo:A37TFF?Deh)B-PF)QP<'8p>A-D)\GLG+o_>+O*YepW=P[b]n++Sbn)0J?@ehKoD?VSmHiK++E\;$,#?,j#Ks2a!c3_R=K-DJ7.N,8aJ(gIfsX3c2)r]W)=@e:!-CDW`AEM-UBa7/MCc\GVGa"C+pN=B6Tc)b-jcR)?`?ljG1'4XU/7j#qFqg8><qtKJnQ0f%JsU=-b)&P>@MmXD;,%mni&W6U-9%.#A#J1)j6S:qe:7HNrN9f00@"=@g+o'=bA(LB(#M2Q0#(15*,#hI=0ei7nsbYW?rl0Jh2_'m9.CLpb2f^DLI4'h;kDoen2D%[njMd*]Brq',GS%4Ti'&lri]L[^1,llHKBI#LA_]"5F,^ .G&,@/ATTj67<&QA;A\[C5bKT>p$*d"-I?X+RFd=Pcm8WOk-O(+j[0inG64`nWU)>UX2K+8r9]8s)NXL'5L)Tkn/i>P\UKVb H-#/$Rk`J%S<Psi0%O-NnsM1Oji&rEJ AH8Ie*Anj;:/?aPA?,q+"rGam:f^@#Yb>ma#PO/=H+gBY"1PWQ]-$?O/Xn"k09lk-i+t$&aEV!:tgY?5>k?Q"f9#HmnZK!Tt$5Yck7V3c?ngJ7sn4L8'kW[U7CVAjhCb`F/Q<RU*3?kSQO83c!G]=76=C6OENoL"J>q/6rP6-eIQ[RNfOihrr%[ta3RXVhh2Nj4A2Wd&<;sYQZXpo+4Qgaocf]b 34B;59\?%Zp_8TBbO/`4g2lAm'H/2%9/;eP<peJ]:)M'fXA(+Eg^cAU/Js6CN_!+1a'\8nW=XQ6T%"^)&U >-0.U`[,SOYJ:SVE][itH,RW"PbZ!MgHV4%[&-9LQS^O\C?B_YL%W r1/\2l`r5r-eTKc7:jRFI[3k(Bdbk5$3E'NGeg#!Y),<%'`Q+$Zh>N! ?K0:2:H`P`H1mq9kG+',b]^`-TJ6!VDE kXq7ODPC'db<HTc8Y \?'3<nWDGj46c%C[=8_PPcC:H@aUJQ@jc,/e5i)MRWBH) 0] jf3\+UfB'Ks#WP"QB0i6g0Upn(Q-a:n)^^o\sh<eGA^24 "`@[8;.F6FtXr\N=5bKpt!>?CLLWB<58:Aa3G$!YP9>FaE6>YRi08]PTC*GQgeXpnS9djJ\/8B6\i^40"ZC=e[O:-^5'F#3B^MlD(j,SCA\OLt0@bK:_6]i^3/QrapY0OcsfO4F-h(n\FqE5JT[?s?qF._HG JiHBZ`;*b#m]=(2%96l#.!rYe4((6K>DN;40gp@=iV_4=@33s=!+%_Q"N.7W)Hb[_/Kfh_gAKBL*NtJ-_!U(ms5KnL`.37@`C"sO-R#rNf25)WKiB`CM'7FJ,!b9#&+:J-6pb52+F[XBQN4WZG1fJS!Z*s,JmA6[KV`g+q6pq4EAc*5!WA80f_Q[>0]2*[tA<G$Aci\tYO%Z?37[Xl-8)9(<'F7r'lO"^*"rf#h1Vi#6=TtDmi0@#5D<ebV8SZE/S9Tc*,b4e"ODGFVLO8G=bO((lg##k$-8A<9-VYbi6Ai"OJ/Ec9qc!(_P2So@aOB=/jOML8m!:;+\/;LDeA&:JRrA'h/0:$)`grlU5^7#[4)Cl1H3'k/Cs-+dDQ<i'(/9(320V0:NpUH;)Uh0>3?=^Am`GiK"R:N7*]<"c4# ;gZsp:3<69dbiHc7CG=Gr1Kgr(rt$,e7;.(oA3C1AfXY`q6pO0%rB!M+a_AD4Pj46.O2mI@7n/Q<[@,n@t5C':,T?AN:^[P57m(nl+Ud4el*[,M2jn5!(?2'M\S*7n(EK$TqA?THdj*g"?.YM3F'\atVAkckL:E;CKEA0_!dMc3n)h\Or^A'YiLPW&I3=NL.Y+B@tATD6MJV]=j[F8N'W*nA'hWpm(^(,*p.[BYnF84Bk??ZfX7(/i3_Aj=fKl/N&#kIi8dIAM8Q5Z%.Q2A"KfX/or\San76C0)s&Dlc!MY_R+-f4A.H;'$H>os4rf+#>f;'9)t7KAqc4nj+cB9)%]UrV6P\pS abT$k>HB YaIYN]j3r[RM;I&#`T-ADV52bqOo->D'X,3R3:=f/$Le%Q#g7qb)2[]^mJ<7+%N%%!`#A'L!tQ-s?Ri??bX:etWR_)R:."<%g2VUQEe(&'H%/4k0eD,Q%g9+ %O9\0Jam:+d5.aE+<Z-D<9F9`_HI<hK$B9r<]&CT,odGn,&_;2sAL\^CniFnAEn*&8FG9'87D" I0OPOhWIHT<GB%N"An(M6OUjJ>):XrnrDCZ)J1MO&=)5G@<iNYD%8@<GlIoM>`bQS[Hn#E)N_Y$j'^N[&!^N.KL!^YXKH6=JRr?)Qk$_c+N(38A=_BrTSJLh;s].,D^-)U]H!*80l@_XA]<'iCPF$#ZGM'2,JWo=MP'&Y?HlMKd;Q7L<1#U(3f%8"iSNFfh.iFXTSAJQFgYp6\B)[TI)]N\ZNti]8+$OKoF=eI"AhS.VcC?E+7I1IoFV+R`]dJqfrF#@?01]/QVYk!n2aiq3,RWOJ.d!VR&h.RRm5"C1-#hSAI1NdXhq'b.MAP#YF"Zs`R+Uq8tV[KQs!bOlAG35odc0aHh=3qirWA<ON^=YW:_T6+CG;qT(<HF>d"?Z6I^.4lb^XHM"'X4E=[YDbS%I9pR[.J"kY6H,)2^Gk-K9SiUN"o/S-;0_sgoNSsh+kp.GAX=J5,E>\l'$. LGU$=Q$Q..^_Y%gf<X<cAI`=IC+,aGb*3U4Mc3k@gh58D<DZdm(+Doc1<UYo.2a7XCP^0=7Y+=F2s>5ft@6)hX;6SSVD(<tbBW8`MVq:;(/-A91'9P4<Q6ss`87+P$@7]V8ff-kaY!*ib'9>2ES,cA,i9;&9p;U`sirQZ[_n_QP/,lBIKpaEs9=#l+*n=cJ&3bto9DC9UN%ZWiW?l>TE26L`D%lcqQ:U_Cf*5d+3%Il0k9>Y7D[r`#pPm[8,.]EiKIIVY:jI*@tNioeS-plYcaZ3YKc^3rUr8kS$a1:Ulll&Kbcs8a)cCXO.n_/?DKneYMB_AlA"giW'AkkjhA$5#Fqo%4khT_2ABAA^QI 0JT>b/E`r!bNVVXXNG!.*]i!PVfh]MT q@rt@:%d3mG8*qq b<17hcTcU+Rk ]d#.Aj0:Sgh)R^:MN6o[R&2i,+GdVdj-0-QIngWG;N`$DBX]l)K0%n;3C)A6$N:Uqp^=2(XBb!]&AS;Y1-;)IAY(o*3,"JEAmOX O/R`(q>=0P(3`rSP?:`mR3N-#En].K6e/h>Qf6[>WT"*pK0=6cMAUgL/hR=e(%Gg"nED KptIn1S_8P(ab9M?RY#^]G\3s(^qcq-6E_K.]hRDiM"sEU\ UWV\4Q7h;[2V9Z+dj!(E+W/Fl//UFb\`QLN%>YdO90@4O96K;_:M/4)W*Kj2e,QZXo!Xq(l@6<'+L=c_?,B3VA2AS?AfU<CkU%`V8&r'fnT  o4;XCg3cRmNp&XF`B/3F&(kD!Q)b.bi qO+=B8G,d1j@Gq&f9NOA60V4'm1_A!?Q4AXINT%di=,ih8D0Ts/i(!'g4/[/c/hN+d.+aUOOKGDD_\lA%-C8e%OO`91bt=gb<`2U'^K,Ujco)rreIN&b`HHJJ*],A$ 9d'VUI5gSTJmoMaidJlfq$3?+/1N> R`X js9,kLt3$GCS%21rQ9seO!e&@;3AVdl)&3s@HCfAj:H8l,UUE^*b!0Q-AL8l\:^#2+ $KLAh&@C[(Sit(tV27A*6r<X=`>lR=>7>6dM5hHA8AXh](F(==jB"mT&mA],Z#nU(50e\=8lQj[Y3d\N/XQFsHs<bNM/_:Yd+-iY2O_$QG=EBqmg,[LGP[d0cAhd_aXZ]0+TY<K]oVr1+@<?FQ8cjK<b3 ZaTEn8A./T>T4&`tY'*H)m!ZR[$T<V99:HA[%h_>?^Vdp,RI;oJVfKp::cpH:\POOh>7Ye^I1aW_BoEl'B@cj'pk]]3EcShAoPXg7jqHI6!KlVl(kb1"]6W?7c>Gg!,;<l2Z=&@QQ2CACig"q!/F`=8_)*e&Lo(4f%7la3F=trN7i95L2$q.@5t !dO@1adiG7"h:7VA7j5m3R+6_k-6hBOgIi0st[<9b4O1q=HOfJk7"TPdB:0NnUf5hg?HUhOO*[9j\=QH6J!fe/f1F/G=O;A*qHa4\%A6B*oFLtVLhNZ,=k'opTa`ao0,]onfJC"`K,CSp6K50ID$C2'A#,)rQi" `bdXVd^7L@5GZf\['!kXp'Zt$9nfEi6$=&C?/7g2$gr58BRQ=*#j8W+J!*,GO`\`Bh<3gP>m4<]'.XQ[DcUA.9/UnWfWBf`-%DTS&g*O#NsfW9P#Ags_HW'$%0RpUZG&kK45VaCW@;7TSA4,[Zt,,-@b18*@BY< ,1.Z_nlhV;:SXo&TpqJGWb99j^lX^,qY>%dSGtf)M'FgMW,*c27@0]ACL4fH4HH4rejDXr_T@0IA+LW K2nA^0ZTG:[B.s5J`0 ih'!10=[:OtA*W>8Qin#'1K,OrS-,1GT!,5,+2EM("!k9RAD=M`+p";q!S76o79;Ii7g"WD^DOT$k4X7E&-mZj&GfkZa sl:G;(05#jSq1Fm"Ym9q 5K!U.G%_@Eh]=,M(c5G[+:l-/)U,)Sl*%A\$3rW$X;AOXhAX'sf*7DBTi>9skJ%GQiYSk');4L"tMK<\(HVF%E&,coajVoUP^V_T5?-!MIkCnGC-m(2`<f  :ZE/XX1XY0A4U^c,f9qs.m`cpdkX!fH*:og;q@KEW(*enVX&$/Cl^' /\EU+NHe!t+I1#"(-h1$!* Nrg/^XG%AWrVIM]]5AW0i$n.r=#OS:E;gk@>mBGWj7(5T?(9AQ )0tYgAo?&:U=Bm"7ia[?PC>0)_qWWUU>/jP(q)+OWn6LTW \gfa>^kXsO^qQlD33@qAUseAl> "FdoEpAY7jtf1mJNj0=!/sSpBU[Ghs\q4j0abhL,A"f,4U:]21a*5!N$p$&9<Q1Z-8$jrGkd6dK+aY`T8V> ]0A;EOf67e\04b.-+3d].\mAkfdhC=@.ZUgng!9dqF$7R;l7)&lE&mjN$/"E,<GiEZd?i61=hFVp<qPGX(j%-5*,3&g=&A@/m=9Ai,RaZB/R>MAsGAQY<4&AF'[s:lG JT&lO=[ck6$$TQl*B@ViB#gPRaNF:oKs?GHXm$91<\D+J#AScG\fPC_!D:5iHm0QoU3aUL? 3WB>`C]7QB]pQE,B#!]CGCK66p%EG6[8AW@"*Dm<8H2#(:Fgtae4#<t,!W'TBsO,"ns&9&'(a0%.jNX@92"p[_l=r6*A>!&CAV*?!A,>/5j<k@nW9CL 2dD!!.#VUC&X&+Q@&+KMm.#oSInWX/I@`g)4Mh[f P+X4sL,#=rG BAaPKtP)`Tao!aE^_Neqd^_@P1[%haI*`)<.sid4>()QJid19Wap'025k,sB(-1NVt\KAlm;/6lnQ'=$EoDl(A!I^6?0JVOO@t5Xj6a]glpg`Kb$pAVC`$Z`%*t4Sr^+3%CT?H'Q8:_C9q$qeDB2G@&NAN?3"2ct2!R`J`_Rdk<[2?#'4I-&eWNbEcH5O^A<,45kbAJTng5cjY3a$@#"7D>DgbL_oY[>2N<37$)A1o52B\fR mdT>'igh2Ij)VtGBU\6;<?g$APIHf EQd)f%VgQ0 &cdkjm\W@4KrXW09na.=?s?6\fF8f#<A5dDVV:peYQf+2i`FH2l+5VNKe+JM2i>d6C"t`L5eqC[KAaP4XPLQKYB?Dhmo?,Ul3(f]AdVF6E,#4?N;@+d[fAH`GcpAl j %m>5q\/=_HrDnj(S=l)gqNXXt5i3tO<G++Yp&["Ds2GdOLaD%Y2sBA*CF599bLj4b))R(*Z8_Q'4A.AQH0RCA3*(;3i"Rb_.QK&$mp56-qQftE;!`5[P"Kgen1K9$;MKLiqCn]Gqf2r5]OUOsM3krBF2=)d5-*1AWrcB.=cQ1W;g+C@RNYI6k6e3Y&rW6XnlOp\OeD:-Kf1:;2HiN`_97?S(,@I6saUZOCbQ,K!5MY!>l$/3P!5&dINtg7XbOR)ngTV^`f1!#/<A86kXZ,?pIg#5>[-;Jp<RTNAm<5KdL)UHC'0)QMpcX631'k($pk^g?nX3\]GMF\g!:C7Hq\="A61_\obIP)fn@S6LR"SiA//eIY%2on`7*ER_3&nS6\[+?a8i7RC 'nACe9sCGZ0\r+A="S<W,!PAd-AABAk'KW@qrm%FtB.R1hibc*@\W=DqaRY?Hp:(PX[*`N8NBGr! 7T\(:Yc(A5\FN+S$(\7'=Z^_^m;L04$/r1D.*%C`&4.Djr<AU<.0FoGF*+<`Jfqlc%4Y?;=$3T*.Lt%F+J;BR%LD1g[\%"F:Ao7A='QA41,ArGXHZ#9 Yr]DnmUGYqH;Z*A5(n0M&o[@"(hC*Y;H AD_J4"lNU*DbhD^*(M11@4bY\)$G]Nt\FYG;>P?kH;W-QAb*iqI'Zcp@Q\P?$%*qX,Lnj4/YT@1Am[Q':WYY[s-Wm;Y`*n ]cmojAboKS"20Coaf1"(Q08XsBeVUMLN`B]so<]%1n_H]9l\j%4)-A;0%4o7H/:W,^`T.YXmMHjQ64\]M#_NG9q<=Kf;(,=f`l%Fi4YL)/D[Q1lCJ1Vq6kpFhBq$4TAVY%<Y^-53WF9hM9bpa.b p*O4odJkj<!lj_E\^gAq"95Rpb>%Jf$@9VtfOQbhA%%AbJ-$@/Tgf#p9ndpjVhjT:L0e`lJABr5<-&Mo/+m9:.)>SZ*%LXfiJLOT2g)HM,+D[U=7hAO:oLej8=eg7#`TK/@FFm8/l M"oXQAgoEK(]IfTt16t]=Cc9t1RKc+1B9ZHU9B+Xi:j^Mdh;3QkL42'MUp$ke?IdFVP@'>-W[PqLNLQ>P<Tm0gJL0NN2dE3]WANDlOI]IK]R?Z0MA7E_`n!UR<d*>[P8Y-,ZtJ.2rlLo:pb1AXoKt^Q<k/o p"m\I2'A[20^#8Zc%]6hj@_AEY^S2:qA`Y`[g1NCYfh.Qd\(b.6.j(/k2;` _3"c_:?H%CE :Z#< .>l]OTo/5DMrp_DDiAi#Aj0];_#:9LB]2%Fd4ni(*BrD=7D*MfeX"aF*0o]Tb<4mN;!J"N#g.Z<CR$@HhB=/HP'T>'8p>8AAqrY0< d*PTA_9JA6<^^cW(E2g_,.O]B8XR<cQ]fAXEb6=-"W^IKSRoL!I5A?ZYh))#FZoa(ir.?JaCrCl;SQ`5alDo:jO<0Xj[o0^=YhC))"GeL-I.c.0>Hc9 g_1A)f(*o^=.TQqIGp1>6[Hhbk"I"rJ'D3Gs2*IA*['9aiT%<#+UB@_Y!IL/`k: GSk7YW[">kK=YP[aj8^X<`&?;*189@+<op[&I@3[)%dH> 5A!';)=k!n@&>K 99BErIqF^-HYH(tsdRoAD,-B>2K]@SkUBhFWd7/7L>ABL-6[Ai_!!X9khq_IRcMM<_$/Y]rBa@t+ab/`maX2JBXqApH<N!\6%Krd=DW]&Tr#r@ItH_3P5d+f[rBa:!S0Y)<]<X)A3WT#Km`-9]%sePdTAeY'[H)@#MkfcG4/j#aV^t;G_$e=Kps8\pSA%s2^1@3\V.qp:U\N<CYZW>G[0>sq@o]A-"EY7f&\VPn)A(%H65WEo.?d>CJBMnB/sEDOU&6_OtY,.)QE?j-`;qF[MQN?[%l<W;,YP0BjF/V4Oj$#VYP.e6kpTB[:hNYeNa8D<VG6@J=o?d2A8rt4^O)LR3mnUWGH:OYCP9)e7;gjLKJKN`2a!M5jS5_!b3/JKcHZm8TJkeLA(OQg+M_&SeI:h5+rM3&cHs[#AZm'C(Hk.73t3!@bhUjUj*b%g6k:d\2K*b5EC^-6FkF $3,<h;-lZMs;d:\2<DD<pX&@+<s1ai L]]LM8N[E>(8M6)6(50.^^!8!GPGer__c-dA:TU]Jd)K\ oC+,0tIjPR9:Y&jMEk+E'p<W%IQJ=2g?/tK'0f.fIsHECsBOttGn1VIjb*LUh^h4J4^KiK0';thTCtSt:L8ke(?cO7AH3^\G,4*Kr)aOR;SE1ECNp2W"E$m(/BQr1(^kmZ<X%hLKID.XQ05DT&&?RN./Q"!HHs^N/ `F=09o'*i]o<J,Xbr3$`n&XC[tt0C3Z*9$SPo:NnSBqb4W[/E6:lKc[lK<pMr> Z?-S?>Mb6eb3(j[+8^-Il=2<#/OH2hT0k>Z0[[ssS% jc&jJDqIj05q 2H)_4:tIeX,9O#k<@3j_kI(\mG`MJRJ&lq6Rlbn(Z(iXCLs3W=D`7O=%qE/a3L@rF,2 LHMlOd/+O85Kf%FH:Z5th-l%DNoKcPk6qHA0 SR`ng%p*^tf^hl;pQaHdRsaNiPIa%H$Fj['!_2IARV6nks2EqA$2Fg)]/dGf?,7HMD)#;=[es(W=gOOrQc%Afp+'Mh<1#3mV71a0a`R(b0BEm%<ITiA8Q:$rl"^ QM_0(\Lf_X6AnUU$*:JH)p-cAf`,-<+k@YnA`6HR8DZi$1o=h<gVr?=26Om?E=KWiN1R-9+clIIYmac58&3)(78U?U4#hKm!l-/OWFWVH>H1H-j-YNWd338:"\d khVYX$npa2m\'p,$/C/,iPi<dfr[b0o%M<2`22AbjVB!SC6dJ.m]"159!!nroqoeTOcQ).snY%++HmPN#:9,Wb`@ds+R5#V<VFS0qrRh>jo\,'$+)$VA>fX(BI=5L$Aq9U3j6gQ#&A1,V]="AX["74tC$$h:@12L8BIYH#2`h>#p)8,PeYJR%Ji?f,8aH!N<j)5NT/k)!^[2mPGL_P9t%+RYM)K6[clBeL:mbt6f]]e\,J+_@s7+@-]3YF JY2hM2UW`!Mh\Hs# 1DU#VgAa'iFIh.&5.7;]3AG^fmD k;A9*?aS20KsbA+GL29'5A ZB=iAkm%i.dN90(ifXAdf,WF*Kechl>6oQ)I%_d[+0aE$a--fXo"o?P-J/5T4&& Q]@ZH<1hindE]&N?!fBjUs Nt6>.(*Il"`3bH2M;,X[_-tGAY-or<3<e[>jPBii^BRB6e\_]%/='Qm?YO=BmQ;\o>6CX<n8r_B7AC@:]k6$ GrWW6iVAMRTPLh`%S-PcHpe:lYHME/F<g-J^!DOFNeZj%b5$To^!)6(5LbImkHQQiK&PIc^biOj,l1(KT`dl!:A*D*V@&g.e9sJGkL%MZb-:+;=TqSllktIft,^X]W7)#4eirn Er?DQ^kBH:*l@LG6O,sI-:s!XZE\B%HfM]r`65f"#U^" &q/VYkjtAL,,=`;>b _jV-p9Gi32@">$<__8(mV-2onU;SI4D1 @+7H%d<Xn&4(>3Na;"0MRd( Ip'GR;Y9-U+%tN;VZVe!C ;DKo"FH69j$OM:8![&VrKWlmb`m )W_hT7m1Ki"lERR )P 7Roe/E5T+Yr6 aF/*4\VFg?),8H'0<19moRd.P:>;:37Eq)PLcE(m+c2_i`L9Et+a[%QA#dhT20^;fq4Pi.b+dYh.fnaQ`K&_s$;7+2DK=G2sBY=Q]D05AOSgkW;gMpJk]/^e_8Y'HTa@hQ_4 F4k[Q.X.gH9,Z]MBh:@O,%m1<&&%q87meWipq8<R?WeZ$;XV!A$\(`Bs?a'Eb-XM_t7h.Xc`;;*r(#4S$n&DX^9::W&m6ZU"&$=cq=?t:U5`VA"noeH2]01.mh2"@;Ahd7f8<rJ;jA.mhBYQ&ARR)0Y/W#(+X CDpTAmf)-\+qTo*X+i8:YP[tC#4%0S5jnpGl20<,1)o3Cg0\honDGkW#th^_a?cJ`1]$A2'dis/rfTk?LeHHnqh!^AZY>,BTSRh1jn2Tg]tJPjU4(J]$PGCb!jCYjt(=e51DQ:%4ce_e#61K!brrMtO0K^)Uc+qA:"bj:P0d-9HL2oj%72HJGJ^@Q)m7A(eMC"AM3CHg-@6plUUiJ-$M]0V!>^E?\M`Apcf&@YKq33(Xm]O`:ARIe@QAfsF!.QnrZ1"11A;ioWci%j3rETcn.?8[-b!^Am39b''D _kt,;D&^\moFpEbaFQk.iL0gsh!7`g=!=<aHm`0!SC=p\k;sX6Qhr)?]4U5j>Z2mIS/Xi%A,sJQnE^o"642#0$%?"&5of9d<..7 $![*BQ#DhFerH@#oL*K2[oPr=8.pr>,Y%l=Aqar\;<V.n(['6@5QItd[Xrn"k=E4jr_)A+N=[#Z`T[FALCVZXqm0C;DqSm),[F1/EV?kQ7U;M?8&]/ALe.3_ABnA22V%g?\p24; TQpW/e$Q;r<[0_6]d)8m,ZWo1sk36>_UVZ]YBDO[IjF*M^YB;>ec<Fl5J6r]dY?38)75?9eA?(^ZVM186JKCpO&gg,oX3C9OrB'?lR40s\97q+kpUMd&(=q3,L&b0)\FQ*A`r85d4AJ@.Ih#*VSXa%2D)/]Q4S-t.gN_!D&CdF)M]U'O!5eVR@(7Qt1jA:5e(QQ*Cp56"jIXl*U"8eA5&47p6[ZE58*c9VN$9+'n_2P<m[n@M"qFII*C !b6UM$G49sAs6&R,8%iR,9(PZl;L_+hXd<[?6T,:'r,P'Gp Z9 0/[D.((-Zbd7^fAAK"'5A=A<=V!dUic$<IGaXIjNJhCC`]O'A10Xpml68Z2L`fUk"Y;A[iTM$W\n$*[Jj((A12]cZ-0W$%gO,5Sbie4L-pmn4b:fn.bI)KhkO]&Lf3$UiPm;'^(LeLW(EAh\m/"LH0*]9#bk rpeJ(,L<q4/H&'LiBY&4B HL]@/dr#+!2@J"$2q?3sG4`,L+0\K.NC-JpU9VIs'.'';r<3skO&Vn;>Z@OO`!t#!jV7C%;bAtf%:gnfgDhkb"B=_pYmF6%WE@_2gV9-+<$cCNL]oAaM&5)m.GrSno/+>Q:^f&POh#KblmSf`3WtZo8.\TS[.6GLB+a6"%m3m38>n@KU\K90Aqn5j+,`"\F9Ya%\@qpK=Sa<<G"cLADD(a@]3Lr6g:EXR5AL4@QVB0a=3*oH6U+haOdOd/rANcqBd,AecrR8s$Hpq(^K(RlcjT,(T`>T!33fYi=[2jmA,/$ r0a.C".=aZK@q$$ Ecg_kl.5^Q'Se;=/UT.age >g3km P9- 2,^RpC[q?9r[:m GcQJ/.M$OO&b.r! /X b.[_\moe/C-kSpon'7eK.g\Apt]612!=B7BS+)h*MY.9Y5Nq'5l\?%6[Np&!o\7!B)6]O#nDP,MJ-/F4PKCL46[EXg->I`nje*+1EJQ4_Z!W?98AD0t">@J1S4" 6DnV-?T*r"MH`!$Ec6VU)2gH##7P !/.Z';K8R1Ns/$0A10KQ9RTd/nel7AndJRfEAf24+#[C/c,WeEn4/Ws ALN+QJ!Q5;ib-Ucl5>O\%0//.fFi\S,c)%,K!,Y_6n0^nD5k\;nmbSLf_l <Yf.cs"o#gAZhK'SBW1-'&q:@-p65<UJneQmSnJ E`Kpe>!GQ_&Uo!mN:kRl\=8_8HQLX1TaOANa/?(8NtU#i<^T!&ABOS9:^:PULc"ghLa6<9ms-QL'E0UWUbYIQ<.hIbP]JA8qgb17+l)shX(Lt8D.8?d+tq$Ym3bGk9Ng(Jq nOlf=C#O9+Q:k^cE=,]+c^T2UY$GmPqRK:1qdH<aT]JNrO'"d/ )b+:<1q0pfm(5OVCT^nZ@IRl!VErQGt\WPB\):iCc/%5so!/-VtIs3\#ab(6lr'h8nQ&.qX],[iCNCa. jU*F,^Z5O&tB.ISo8eAPDW121PX!5k0/:qc-d44p&W2B(0-N1=HPA[AF,8 @.]>&eH\b%N#0d#%P=%5Y@pUa(m3J=^.OU97naP_H^Q6=X5VgnEB\Rc\R]_rPPF +CjPJNLVfJ(^M9:L>C:RaAblU+[7UAfQHWP+q.`rT,PLWD)<8PB LH@<*lanqM=l/.\OkXJEFFV0tA#2'o,Bt7#PZ:bb3q0To=)S%N&rL30f.Id4o,3Ie1Vt= '^'3'^i<t"L6@j(E[['tK""llm2MR?0j[#c>9psXE\M"MS\,?S-Z nG8[C&V5TMabH4XVi9Gb.GMCP"D*ma7(#;nIn(WfG*hp;_2HsUF#GUd#<XMq97(.YXN*1rE*][VJWqlFb-0KSd!09$U09:/D4!Ft%O$Z5SPU;kAXL8qLhn$_3XJW6%%r8_rg"U+jf2!/mriPrh\a:jHC[2+NT&[<m:]Wj.ESA2Eal!GRDOS5N_K*<"lrt :5mk4!]@^thj3&Uq)qKf#tQN$=cY[p`=.cs+3ti+iEAl qgi; Xn7_>=&7XL`[sVMi%j2)/-:->AUAHiQE"GXQfp5Z^@TKQ)YJ3E,)!AFb\IW#ETD$NZ(<;)L4D#Lofa=)(G7QR MNZ'[)?*"K f3teIh<pD;<"p#IVEMj>HA&A+Z;gN(2Db^A,M9Ak7 92o5EOtEhe%H4AC">Bob^kO_ o2)`2'1Y81je?]k_Pn]cY&sg\pb?c]"_ERiNa(5Ih$?aj`>or.b 9ef3`lX+^WpEDP"sAK7E`R?t*R]""hf\a.5ch9@FNE/k`-d$lO4,S"PLKkFo6pmpNX?-!.fN,?rMeIA5;^:m5AKA3h=<m6>XqIRgrkS!XW/W]RtKDUVtY?/Hg% ^A_#tU`%HI7`O;:=fi;CO]t&+=cWkkO= qW<&>Gn4DbX%WG^@LMiOA:DM?F_noP<t,j[ci`IS"?)HYV1_7b=BKZ\Escm41 LQdjItR?ssdT.2)$5i0RD2TVbC:WXR+hi*ACQk_'L:nb.k^<E^#""<C(SJED(p'[lkI@SXIs<=LPV#7' 1SgT=k#:QKgVs-aZ?=Qe+1AUFt\Lj9tdW8qWfBU6WHQsicOFOPVf.JDpnQER\4CA&D"]mt?3E"*'#M0sZN?s$TXXmsA]W0(`Qdnm,r5`73\UY&_qkm7>0[ti4p=eb=jUlQr-%?=tI_*h;j,FGg6n'[1+/YL2e%XQ%5Za?H1 FijKT=bQnh)Y,L5jJ'i(CiWe@k V3I(3\..UG^D,1c$8'^E"d1&OPb3+b;9'"4XIGCYW"3rc@7dn!lHr2NVI#-@GlePX,B^-/%GM_?AWi>=UO42JZtbN[=hB:__5lCt+\W_,4ABdQ,*aU V]ZJ8>7g\=L>KWe>'!C?6+Z)-0jRE&CA-Rps5K(70Sl1roE&LBrla.XhSmI\UtL1mcQ8be+cl4S+-aTsWbdr#fJIVl37Ap$.$"jJ;B'nQGQWIXlTh:(\9L1\kq'h3B9nm5h?D0F6"mHe:8WfR,:FZ`?B:Ls;/A *\FIk_&W!4'B[q=eqCG O74/*A)T;>1s$\fnot$3M/YYiY]U*S2808n3Qfk!F=@lZ=D(\N6ZeFj+6"g!$.Q,J*0,IO@Xc0m+/P )6J-K_8@A,(]XdT]DQ5NdUq)2]a ARkg"jCl.Chf?;X+#Pf4UtXtT_%I$L7&;t.4dr3A^eWFCM(l0]BHnG"r<_ ]3+&RcIbb4/sFEg=Jlo  ZO+@;_]qG09Pf/iN+o*njqQ*]V6NkkH=o).iL/N(eaS[;Fm37b]/hhD9e)Akf;iWg:Y@#=RI`ke$N(M8/Vh[ih9YeB/kE/]6ISo:h>t4PVNP#d/I0jN]q0-]U0)5RK/B8E\ D6.O$J"^tm">HrScj`3pl/G<h+mG%\Ki<IEE/JV4TQ'`mhlkVfaC<QR4N^RSm4gmll`k(!a<TLI^asG2@/jAlO^/YkP k#mSZi7%9]#lrDF""F=qMYIA3jq+mIH-jcd0Pos2U^.kTlHEd#DQ:I0G3=:Ae"C$9-N4UJ&7<+X?QbRl[Fa1@50hoPFmXdG>2JHQ[&VaN/R2qUiXES^'\*L5AIV9;I/Kj>@m[_tl7LZ"K_V*9_:toSt`Jb&HKZ59Jd)M.U=2L*MK#RK]D+k_1O \rN$SjGgbPH6c4q&9<i!ih]RItn$p"i$>40efHh`_6pfr<`;mZ!+'%QF #Z"NL;b2tObIKb,oX_<]5?L:4H@.:^?Yr1N0XKrflHFe08O8iAoC&%_]5 rGLQ:;+];W-d5Xd\m@cMfo]n$$nbFR$D=!*sFI9g=t:UA9pGhX;Z_"G-j[XieTA@N8=8)$VV R#seSQ/-OO1g@ho:05*ZgY?=F$L$8FZ7isK[`M[P"#r9.8ij22-VnZmCmT;Zkg]fT_D= kCt`_MU#4HfD)7#QWf8 nfI<M>KPKFK$lS=$n!\5YB)*/2<)1iAU2"8;OGU*M*j%A `i?USbJHUGmrenT3\=<k-J23`n `r\71d3Z3qp<;*oA2Vh,rd2A5/0%BM18]8j_!QkDN#6oTX?E9E?bcPpA$A*VO!nd3?Apg60\[o"++YAJEYigSs'(p9adY[0>#]A3NM$AnIAo J9E];p^10fG?g./pNe&$.o>&o>1eI/e &n^0m'Jc8eYdG*AX4lLWg9eAJ/d5;Kh-T'O%I\0)\("66bIG"5+2D@,C*tf#)_]ElQom_j%<o`DK^.B)o;YAKRjCPAkAko>%ndqP2i]esMMcetM<`?5+P>K\?9?A=$*OQ#UA]\b[o+diY#>"0g8S]475sOa,-^)"nRcF+)ALo_FpEA,+/j/5!/4pR l<^g,NCj)eD]OW,-T93p+T<*e++H.#Y=/<GSn%r:Aqgh`.0#8 BOHC8U+\%(:AK%-L1aK?bT-dr7I2]j^Af*tf;=-Yr`qNJ,I'Or\&]G4$A5cs(MR^mL.dtc:69/(grAXFK5QC#_I+#pZQJ[W]VjE5tI9P?&tbRD*90"4UQ_9"Lb@-dTB&0VH"F\8i hB00'plbGFAeEn43<iE'+b-9C,,%#87tB*4.8@aA=Pt!66AEXo`/9n/M_\q4$\25!@_]0b?;VhNCVI$d,fZB jG'N:]+P7&*l3d16IQRhlDaimVYgCER^8"UpmE!9#X:;NJB^Y %16FGrJC8Hd[)EA2?[InP)Y/B/d>HAmhBtKs!7jc`pf6kV1aEA( 5B]9KBeVop.NV(:%GT.b.Fe`RcrX$-]aYZn$m@4t`7n[GVNm=i`0sP_$[;,2q=_4QW @bTWV`dCb8<^hcrBaqNB59ERMDEP[ps"4lRM0R<50k>osK$U0s's3A_n`1]6igsA1W1PiqsF3X'ccZ/9AO,YBP%m]q7N&+Vbcbr8_b7X8&n<3mbD/."]38%>p=Y!s1PhSV5ld+qO2oK/t"D<d"fSaGINFrNBLZ+(hpEA7[:J>2mgS)r="TmFF0^5 49)qG-GBn/k X`[J,nI/.>Wf,Cm'mI7B9\PX=.J<scG*mj/ 'jAY$$E rJ_ DTqPA80t"Tl_b6e[M:%-b)=AT7k`!+;t\+2?k4fge$6R+NXa-ElZr4L1K%%Co-\L)-ZIM2hr+<]qaE(68L[)KRlcf.Zd&elS6l `rPBO= $2Jm]>]00CBkj)I<X<dEaVT66!,Y0P+3qG"9<0>oSG%isn6.K`cS4.rX-c#`.IPfP:I2Zo3'F5Ybo7Q4.24%+:1Z#CiiK(Mf9,>VBh'qphp%]56afof\X;UXnqL5&qT*f?%g2RT5AL R $j!.baUTI57,H7"[m:A0g;KjN,g9X%4bZl,f=nQ6.r;$'2=('\pb5&1hAfB"A1!F2!Ig>M5`Si;dk' Larm2<:[GRk%D6WCL_%K*to=+]RhCCqb(\EZE#.XU\geYpA]Q-S\Z24Pf(.FI9NopAptQ_94Fm#:kFBj;DW'K$a3tAnF3#oY<%EEiskpRq-"q_sPeHPIZ]O7E5E40TMV q3kU MNVRlRYRWI?tLlsb!A&-ZQ2Q0:j'AnA^NC*_N#Knf%+RO6EWS5b&s+hp9n7Jjt`NBHoTa5>*$C=q5=Zt[*jhRQRYX^#NaPfE;WB>NYk'PELnsn5.a N(8QWAa@VpQYUMO."U_U'j7XFVr]T ?Pk[HDbi6$Y8)8Jm8ahPI^3]$lLe<D&NgI!1G.pc,)kDg$2j 7GT:`R?IH$iW!Aaf$m=.@[CCZRhk$aMoISYKIjm]T"(VFbr"pre5>Rte_fA7VA.._H-GcAtMsC%$k&Er5QY>] $eLqENb)rRA&]"L^!0p dUK!l1NUh9N\@1rS1MiGSN4r&B\Y59U38)E+C:[(=bi)ieLR?jTRY;6,f%A#^(qC%j&2:[bDN<mEW&l5Qg'W YR2AX cRi/iA;oMJ!%^Bm%\hBaqE>:o*thZI,HHk7`?o'WbA=$TfE<g?EX"(.Il&HEg+E+3G+/L*>nGj3&^@<MrOK8LZsF/ie %+87>agOQJcpQ-[)W?IgsX<csriB-(A>O4Yl0h;fnKaDU5oI&FdiOeC$S2=p\SPR2C.pdn M[.65Y:b",dsJQ;acF=pP P)Q=OZ3N3@kMp\GdI4a7DUF-\r=^^jJcK.V26Ul#2>=A260Y8P&,g&aYR^fIh(rV5dRt;IcInk8E\Va:++%D<+1ETI>_:#"mjF/k(SYr0 +-@.+mWLM\=p)%XBFN/#Ao5MX7>$BFYkq`<7O9[;nXPLO)Fm$h!)[O6V(/hraN_=1m4o1q6benTM+U-FWJSEtI$&UKQ69cA;Cf5qOaE\A$B$0W'ns/F1<imRFbT"E \NN@_Tq_M?%BAHa%[RQG`X^UkVKsDa$@o<9pb&e$=T.-].[r"Y2K9J&cCpb_`a9THa\YZY0**\CESsX3CK(8Dq l!rnO [rANI#bd\0N#f-S+(R:]&p$7thil[tC!cDJ :*ZJ"a(!A7fAe-rFJo/Ap!G+BN@*&m$KplAA'7qF1bsreDKeahX#nQLs.mANJAMif4OQ>lJ3]sl"q`T#b)o6@CGfs3N+i#R@*dsA;bicGN3I`ABl0pN1[UA0HKOa(RKdS,>o`R>?r-D'hWG]iTIAA&GJ_T)!`0I$Qe9MH[]3Ar33D4nd5(g><^5 H8-U_0 5c')PJdA73oOW?I],hh3C1.m3Z?:AcWU=p&7P!<^>Es_NeEiN-!$S1=r6/4WA@`LamN!o7>Am%Z@"\fn*O8IqFGp3fr6SGN*6BC/`R?> bt\R3+3K`2"C&AbU?>pk4]+MjSTrHs(F-Y.S4[9[ZL\@$IIB[W, 5.AWBXqJ9`,9 /k2)G PXeir$/jEdp<n2G<iitj^R^[W'Pon/.,qnj0&MqBL&N!V0dGO0.(dR3/_iW]BK'4^C:lhUG]7(F;:.,=_JtD;s.`4Q=VBrBCNe-ID#03cmmK"E-0P3Mf 7A-t>^rdN&A JfUa(;<6\4/`_^OkN91CJ*dc<ZRSc\T%,!*ftAMQmb=AZckKA4P?k+&&ZQjM/K8PNsEI'#K5<PrIS$a1sSr*qBl)e'@3UsAf(7M)5YSXVBQG.n-U#8hM#(gO+ciSfo@,^ITW*&[H0D(UAkED2$cg03V&,s8Wnte:tQjR*S]Q2KO&YA-^qXS) WH[D\KUA?)] :T ^+cp'<kS1RDo;cILl K8XeKSCSCA.2e4cH-*j^(B;9$7K3%\s:(#AHR+Y&M@R.qU6a$K@8totIK,SOVgK0Sf<">#bjc^31p*Idd@hkSj3)Ft^P"oI*Rf%OlZI8n):UC`R9M%Yd$>"]25-_,o9"[bNAm7!@4HeP/9@O>rR]HGHhJmqr$Wnh@:Uc6\9.bB7/-QN,3le@Zk.T"#O#;g8[lU&3J>U^G;emN)b7gHi*^i,h(^I6riL0q>V6%ON$[5 VU#k#O&KGXUBAoi":>TKBbOld8Cp7Z*QedV!)qEp;GeRmL#0L1t)Bg*SEp9ZFWDZ!pof[[tfifAtE'[_M2ODcYe8G-QP\%`JimAQM'2=bQhXtt.=]a\17:QZ]>^[KgnAgU=HPk0\Jl2\<fr@H83=6]+;d)J#PN%*`<d#+?E]&Dh;`'Nj<7Qp#b.;8(#19a>L7=c`E.=*?kij%`@&C6fFG73qHg$%KAP6r4CFOi;7G^Y`3i[2Y>Ub&rLr7mp1>Y,F"]_0G!b*/(rC$Lk5F%`'BHD%AU-s%oSq[Ti-#[&^0Hr^j4b^+d@1)J'on<Wr:M:>Fnf"KL5;Ar=E<fd0-<Ii_<%OFfnPYGS$r-`5C\A8r&RA/J[[MtlI ?ir4sO?Gp,U7q\=pcY1h:3=Ajb=kWP19A!P$WX=(+d=h\Z8 ZjWmqq(4R9[=]Q0 #cJ'?Z5&ch4BDlL[=TdUV<JpX<_);ak/+ras;/ 5m4UVp,_l=H8YpZ"F+rAt6q9(h%1J)hXF@"Q[)EDHfZ0YDP,1H2%8?K&ej+.VG33./rEHXA^2PAd;M!/aI.]e;)k6qT?A'>o>,"#>TGm_(Z\/<lW"5Z`CErlS*G0RFP<&^8*7XAqBM'@?4+8;W-B)'rjD*_@tm)'tfoXspP7 X!"6n.]WGW5W(\>M>JXZn!-i,;&[Tg\`TqnFRTTAR[7%@_C^kA_nfNXs's!f*G;-ZW=,/%mfR?p70-Q-iA`"Z4:qM0SX?tB?ao1,O%ZS4aTe4[_(P6*g&7,DPj%DKp]oDtlZn9sgcK&jaFE`TjQ[B]05&\E^n4@e*hA]ZnnhoEA.sX\MJ%aNJ]NQB3WH]`[6d!$[m X ?XZmc41Hj3A`gd\,XKf#X)oCp7pgFt*OtSUEe>SdPD`.X;b)JaNA\^n$Hqq^Mh3_r?+eHn,8$5)cDH)M-!<T3CpqYk@e_i^hR/>eWgOZ(]mlh`AJ^,tDp/$$J&;pH\%^\Y[API<t5D-L[%%W48q<ksCt/.-+l!0iA1s0[ZD%el;G=#iO^tNiH=)s^7mW;FS$pN81DN4QA)hA_/;AV3PFbYYg<Qc6e;T@Q\g%p;;)$.\dEbNA!]B9dmXlpNro%>^i+AHK/i,3?26(PW55Hd0RC0VVPF?@Af?AViM.k brkN5LA3M?g_dD+(b8.tq,2S"F* Xtj.VbXlpKtTmP,mHh`)HLeXP#tpcBeZg-K)tlqbaXl'!E9%Ejo9P?0<=T;,ANC"5Bn#^H]?-d9!Se'+mp&o6YR(m25k2Rf@`CFUD[kdAQtj'I%NmQ/`W,BFdMG;;P%XaZN@D_.TTS5KfG3W]br<.]YjY'k.V+O#[0l1O &#g6C5AM(#_I.*\F.No3^C#s6<qr(Se/;aV,r-FA$40"NL&dhKg./L_#Zb %ipAa:Pr<Nd1j<mme*7J5;35_E+!N.*5A1f=i&!@;2iin<c-@ce%I@$U^?U hG60[Q>][S8\MYs[iG2I2dPPG]W5JAVkl<U2[7?6E<%6YFlr45q"3^:eFbi.;:t($pc6&Kd-:U2\1)WK;dlp4AARd7!]>Qg_EG25j7`I'(jKo'IQ?0D]L#X,pf6=<j^d^F=p1en#"L'rCspF`d1oRQ'4jj7ZSi1Wek9E1X+?j/+c;gCB].IQ*sbj],2RMF`pdJ2e1Xfqs*^FYfZ[BR65^sIci'][RNaEMQXW0pL^)(1g;b:YBW,^$.SUke>-RSB9j),A@6.+@#EM(_esBY)5>PX^L]&F"fgaFS]2TBfVJ*BEc9@>K%D#:>Hk9R#n8Hj;n8F]:;Et8UD)ArC:15Q0+1snt3Dj&\P.<q6r-@S%abjA)K6qe*+\#tIlsb=DQjS=.[[OD;&k;T70#&X$'A,>W/bPTOr&dUS..Z1GQK*KA(X%%$k/md0ZmN+E21iLMiJK'GFdS'@5L5!$j!JD#hJ>9s#UsqU':$(#)!NoWq*k$=hsXj=n`,AA6AAP"]Ak*5_P>0.VRZcP.A?0]$^P.94Xps^Me753/ONH_i*`*7oG)l!g8CGG%=1-fH^BIU_t\#?Fs);Gf('Xbae<<=:l)j#h\e;rY.ZU#F!N-:o.1-<$Qk. /M!rbGAc&r%=Vj&_,i-+M-"G]fX@1Aj#j7J+$/5hfbHUOA>bmb(7]@+jYCNCg>B>9M;0#rCq5oZ]r9@hb$e$R:P"+2D,'J '"ps:gb'TGEc@'t;"4DJ^k6aM-)(E*pScW4dAq%?Yr<T?:2te8*@1Edk1Ef!A'b;^.*O9q82Ps_>^,[!`.FW3q?^6aF40AYUWn,GM$Q3ZN.a2Oi).1P$>s008oRAV$XmF(!X:HeF:EJ`E$tnj,RSAR(`(Z_2"=A,)WE(j65.=r-3t&6b4[>/EQ5Iii1OBA:o;''k6q3W$N"+DnJgN<TAod3_g_IB&sd0-E<-%f/0jL<VPh8]`5Fkln!clZ97BarE(*hA&7DL(DK+1Hj/f\pS.>KN^2%:[Zk="Xpn[f@7#GLKdFi<[>ZO"Y@ 7R#Vn5DNP]kl.6?QX<]JJ>"U(G>6;TV,'>X`N/>^<6e]Y=rEG/maO*SWSH5cMYhpfT:%TC3TAgm[aO&f2R"WSX39mtB)ED:`;-;W*7?*Oa6_I0YY[HPeFAsQ'A"5s_f'H^%BUN]1f\] ?p9LX+lWWreE"PZ].Ef7Qe2P?s6-^$t&'T&[0,EaBKthk\9*AlDW$s)CS[Qlib6-r[q@UshcoJBUc7ndrtbQPM2K#CBm6SI,i&A5:]>C +4_ZC#R%:mKOq0$FmMCUeP!Hg^Jr8P:% ?e%Jq19AG.\OAbFGV*\B9Y.?j98=S=bJQVrOjV&hHGTf+kj6@%%l$El[B-U85[bg""pjC6;r4Ya[dN`#[N/$1\YfI*l:DiL%?#N`9AHh6A2rLr(Vhc$[Z*C^U?SM1I86hto\\Tb6@UsglAIo%L\"lbh>h]G8/s+4)a *4olRYgfXB,!]:BDb=DA)ICp#)6c!BZ\])'K>PAX$,'0]5^E-Z<Oj*(F_SpY4'9e 7$!Y4,)cLF4QN4dX:SBtPG ELLYK^5AtJ$B:Of8"pFg:r?D*5);**#/r="Eg-GlT!+oAINJVC;gXPm(dhblN;EAYE]>I4MEh(!YME9"dkX &Ar]K?`R[o!*knNfEs7,(MsWD-?Wibj_kj`#$KY]X/,AohF1U`tdg#-3F"J4QOrsep<3#g+*'3.T\2,3l$>I>MGac F)J<rZ&n%^'C#n/^;EoWS^'j7?cGhVS4-mY@jZN'UR14NsN\_#"i m]g*5_s!9k_a/HF[0G0*_h#.af3s?q3]U<h31YDM(0Nr>p9mi=rhr"%n[W*BMPIHkU"_:]Y?P*\N]AfdGkR?HEoOn&Ge9)BkUjH ENcm,MV:EP?QB>0B!SCDoBWb%I[A"8A7Ii5A2A)>e;ViJ#bC1XQ.g9jqLAO$nQe'g@ISjnFkg\t\Q^EVg:e(XaU-AT2,Xe.%D-<Ta&s0NQ`?A(s=JK8P)-P3A.Yhf?Zh:kd$88sC0N.C*a`d&j9512Z)@+3NhHg4jMD\?!d&J9!,]Fhl'tm#e4_FA*IJphtmgdZ00*cBAkkAW"t0"S],DoGBf@?l]O$%SF!m57pb1\EOOh%L5-W$LT`4lsb7MP^2"d=&;tW5Af\ ShQ<D"3RLP3b- YWWP5K_HCI\ZL\]1%CM^>g`<fi]qA2qPtJ2:"-Kj;Q,`A;LUqAm<cqA-=X4edXjr^/.D)\`[!U0Gra!^=PbU5<hD3nKi"i#%l& o@XN9k2sg/hXsL2j2G!!DnK_Ik -O_E9d?mAM>W$PFDP7c_C1ln8Ui]pp@U*/2)'lc3Q>J$_5Nl%.a.=1<<46LR:WBdA2ETem5-^'p[6G_n;\gN=)N@E0DP>kfn!_46eMq`2Di4(K;+1G,X*!KZ'&(Gs6IdK_c<4n)p'gP.K!hp%g2+'pAHPdX.kq4.H10aO($ns)&JtK:2?^X=L$T!nd`:oHb$,O/$PM1 hGkFY5mi"^ZHKFN<0*\0rkWB[()Fg.1'^8pd#G`sA!X'r,o0[eiF^CM%G0pmYKKHg%gIWJ I1']6cLlFKg5nW:&:ofb+Ejk&naPs3iQBTG7H`.UDg<P^Oji>;n'<38XtqH6-NX!5<c,K@W#GU$3FB3Q+:&taRkl2'?W/Vp,I?*+ SLp(a6f(*?90(eT0<drjk_':;\*i3WI.2T]%SW&Nn!t%?a? ;^@0af/pB^(c6S+8.N[4:U4e^nq/38NgK)76QK:cor?:lh G0YGJ.gZ9FS/()) -4 2Dhaln(^p,H!T7%&iAa]Gk`:i'UO_e/Z=JR6l[WV"I];WTHU[^l74)cOk1@(Ho!2UM3SW-[-]J@>rG*h`n>r@'`.c9&F"eG+A?3G![`;O%*LiB;koOJt3a,\aG9L??iGc?RF,Y9)FH 19=WQT0W_bU -:J/!3Q(R5f! gD9&5HK,$B=<S<JQ0!qV^-%Q.Jk9CYPsml;>d)1i-#,bT9Me9+PEY4*#Lo#bK6(ADp+_8MLO0!R/esc`#W/,ksGpTGJd1k0Zl8dk"LrEPI;`VfF"&]`bl<$9A`NPBAqEV91:IH4-mH-b$?W7oHKX.07\hlj]G`&QBi 4R@+l16$ T&P7KhnF_LKNG;DtSR&ch&;j$&828OT==(BMt,/WGBil%iW*j,HZ=HRO.TSV7m`>kbWTo9P$;l#2;spkK\j>E3?h-ZpNH!:&50TC]\&0-%hiHeZf4j<39n1e_Bl^+JhkBJA? DAatMI%1rZ(YF C_c!2D+_`t+kAFF=9@S)fjBPD8L]Oj6%J<Ul<`mbNC_bk%?4SE89r#t2J-q"6! mK2c='Gt?%*!VcRp90]RW"q`;B2nXCMf:.^(_=aM&VAIZLo@mm(`oC034mbr6/r; /.T#k%+7!H"/+N92`nfL9sUH(q_bYJlpS,6*1A>s-X!L9 =(m'o;L.:DWGC4TsQrCiL,t_RpDC:#]"lAeIjm:fFc?@=, PdaqWbpis'*NoC_KRFlgiUe=&K+4Zf7V-)MqASmc)6hcfja5WlS4oN=*Zbr?d>"5A S`4(OidbFqk",r-O N^g9j$C7Io9^!]KZk"s:m#90bM]D5E)js"Qa9+lk'Xb+ONY=,(-1 4aA6OgC(qdOV^"N] $AhU-9&2Kd;^UM9K`7$6.W3F1Fc\sp]/%]rG-Fk-(jj1qNr,5 5QqnA]@A[Nt/&0U1?eA?A[r6&Bk3,DjS17B@g4o[6Z3lAT>PXK"MG1fl`U27L<C92i`6A@^\%Ri=F>-7IRk-//*\ k+6OJC1M;X<nX(IgkYR^(%3rJA@E7]8K%7.DbQ'P]P*ef/f&L4M!]9F/TA#]D#+h3J<D\2AIA*X?.!Y7A7Xk#B:%4.'[(AS,a FpW>:XY?s9-VjH*AOQ%QGK#o]bm$,`hQN&$0QF_.-(2`$"cbE0jQrH7pWZ99Z#%]*s&i/ctQ?r\9K%gjYMg!ZB48"q/q+n.fW6'*`0D3]gGFQj-B2X`&e&*Md%lgf-2:>lk1AH7h5>6pL3p"PSO)^)!=/?7(S:5%fpb<.]"!frm;o5/.qtpr4G,=Et+<?\Kr/3h@_m>A"-_VS:Q2Ph&A#?p*FVUi9OPj)sYJK/d#Pj6ZU =gXMCK?h$WTY:#JjN=AA]K.dZY,B;AGO56C(c'g]$P-(\/DF*X"/jl96JA+gJn>l;]RdWp6jH#pNdt'1U7^4qgNl7K#O;PA&\;1a/s*,PmG$BZj6L,5+dO^Hq($;\je4;`5`D<"Y0-[o1_6\r.AF.emOM71,=T])=4RlPMa5g6.)mheX*t6)M7cVKQ8LV!BEd$R<. h2GqHFC0Ns+s?JYeXAt"C=H(apc;]J0pAnML& $Ma55qhEp&k=:9n7mPTOA7P8rS_E%an7;M^(:aY,D`U?3YH*^Zh)^na=bkW@JOo&f?E\qNC+Z(ck7%Z/<asX>r8F]c]M/si't?:2I@;o2?FT8O1,iGhM9Lc/^EB5lUKa-Ilj_f'b?KH6U)A&A:YnG.k8;.gc#iYhU/5PKO*2CkC)A=Wm9hAF[I?7rA=J;Mjb'9n-nQ7c?_Q/$L[VOe^_q;%%t)@\A;G&r3E1-Ce6hG(sYt9<qAtqC/q(#h82JgF[K"s)R\>)qtJhs!gjpPa5H=Y^sj`7M:3l')69]7h/>Oo !E]7oAFhAf]O h<=)V+74ipr4'OWir3bB(rp](X@B9(j>O)&UV29,=r`\E\pgD6aLVO@4]p=QrA^d"48KtpS=JAXLU'U)FVN4mQ2helEl]%kf L/aPr4R57HoT+(JOA?Of>:BQ_fEV==N#Y=D-!jeEcm"U,a7HE!AI#Ap=c=p!?jq8rl("H/m*:3iN*+YI"cm?&!;ESGIao< +`8Uj!FAiL+r+lD(oh(\G\U<-mb>Zso$QRskn#&=QLl+)M\&D,mmcP(G.V@Q&\l6]%WSNUaJ1\EV+-Mo,H)f0c<Z$>@?=/-?hU\8n T8 9o4M.!^]X%i5VV7kL[l$UT4?D!aJH!:Lp`MgXJE_2LYVSA0s%P-a0[p596d%s %V!@0t5O#e(+ `E@" X[B  >tQ>ai%n/D=:Oi&@%,Va'npZ so4N$!)k,_h4mpg ClW+6jrf^rt@\7:L0AUVG6j30g+Dmda`I*AcG[r-J^]S5ER:L ([\ 'Oj^$!PT^/K%qWl&^VLV@'Zq`WcbF+tL`M E#RJfj27.I$rC_hh.W==$s>Wo^8U<`3p'\)>6#:Kmt[IS;/FA1c3MOO;15qR\k%b74fT`^j8NQ]kbs%N<I[E)@jiM\6`4]ni6eppR6KQ`c;cl(jM2U-0a3rMH`/qemm[UW%</)WM>.5k2))<Un)f<Je5:;NZc4M^GA;L6YF(a&Y)p[:+*lA/Hh.H1V3R:0.2JiY[`0_LaW^qNYR:.^lJ07ImZKDCZ%AhjEJ9-gH^Rpl YHX8nCPVO.(P^5.'h GS@\W%\I3"JE]FtA,ZFGo-t)`5$m$80ZY]M\#>oGJ]GVe\gY">Cj+cc^:RgbqaGOG7*aPjU=4I1h8%Ym&)O/^UdMO]s_i4aCp&:Z8C2`R2BDAj['ZE(bY 423me^/KaMH04tC(Q>3 V fiIpUX>EKdp_.bCc2AWW\Tk>?KA-Z&jX$iQ/*iZO#HFd1:SrVX#E[ZkG.3hbt@*r44i4&XC/K]9%?Hs3caUV+&fIqI21*h4nnW4tVD&tVK44p];U,Y;LsLM8&>4<jt1FslI$k)OY@ASS-q;l-sckF)4oR<)5Gi64:"F@j4&?L&JE9:JX RL`>l3r.:'YiW>)q2=K^!3h9O29F_m40jW7Bi##fM1^DnJ5oWrN_O:d;&SX=e=Erqo;JHn\o9CD00qVpT4IjMA`<EhoX&(F4Vs0=cZ^: c:jadc6q%'Yb]](p[7*kk&-;3mjtXf?"H@AN6R*N,f+WpTe9g9 n"%[Ki`"k,I/%;cp9 CIq#fsO/A g9#p(AE6_cTlZ8Kb0M9fesQE1#FpN[L=%ef0UHakB3V^Ifr6dta%pGZ0hqK%WpD)B!1IPsAgc&;.8%Hd1JS[Ra(&;A^ls=d7s?:#3ttAqThEs^k=hMSj-qEAR+a,\hUD./-=XH1Yr`!Nam)rih#.=)5j^_Plm%.Er3F#:;n(,f()%62NEf0,IGbmK9hfc2X1M8Y^`"QW4t'&JQGpp?k&,.\&%S_3XaetpOT'>r1')8JY,(p8d5FAb1ERtAsArdH+)1!>c/As.1Af4q,Ads//k1^V%4q1htP*"KAAU$:8Tl#Xp' SY7!a0h)GrV2.g'JmbYRLO)LRA<4b>"`LY([l!QIeWG; gd,hdog "Y`#g+M:b_O0"\.f9E"1'O*o'eML4nD@:ARo5/G>7[rpTP\i:7 tU!rb#sBA4EEN)o)Ni[ ;2/-_A>F%U)GMj c[_s18UoP6k-A-t@"aN/KX9cA'Bp'hAc3Zp'i.O(iH=o4b5JRt6&`C\RE8D<.*KA_2fRf-IR?<aDo#7oeI+ @cbq@6QnaPeG+q@RXQSS,A2]J:ce.[VMq-8H$dJ2?Y<p<t.g]Pd]06^W.C94"7m?6F]<r&b&4$,FTt;FLt?skeb(&`WCM+Bf@04l.ECA_^je2A/Rr)%X.SVaSj0W;AaJK2^?m`Wo3N;[ eW[TSiFn*e%=h:6KULm*>PGfr,$a6gXYkkZKCoEYMCe*HFbQCgYn$GfFr6ELPGH=QF<Wi%7-T8f[._L>7g%p;aj)dPeUmISchF\"]6P'FAH0f7iDjao-n'[qXI"PT4la?DUfc9k?'m.-N\i_:dC6/'UfH8BSZ!rAAd/+58iD,\I;#"kZ-NoUr$*li&@'0d W'RnmA%&+R^lS]WQABo2P3P^#>;>N;G2K-fTeXB%;%8fn;A,Q%%ns?><8o *QJF]rt7:1#[loX+;0\@"RL&[*9+.d^2Oa+2cc<d?\&kY;#g&CC`ik)@6F&mP/(#7>$(O>kMPCp3&$1aAV&(-aL^I$QM6?5UnJ"@]_Vs dBbJl_681=,g7pNAcVD@6MFFR+p=Na^QKhb5" m)1P&9d](6!0QB;@ZNN"[^RIGq?dteA/H<n"\?Y?Rnm7@kqdK/GqnUlq63"3rEJB>&Wg?!NmJj)n9j?;Xrn<t,8N<<IRL5c*c=_AV-Ddd/"L9Wt[:k]q+D6A+tD=Tcb%Yo>)A.G000"[<X2lh.ADb8ZAJp*JG:XqD0q s^T3)jbPK3aX[)r+Dd`Kg;3YfeVEotW^Q3f:]o1 b[h3#t\T0%V8Z'q#lo ^'Btf6b:(A'j_*J][YQkL]tE<U6^<B(tIX":-[>?bIi982-n6rAf`gnb]Li9_pD_00p\O!VJD#RJ:SM7,.OBk$a+/%ehlgR/`-a,3Dl[6U437I(g-dFm?3a,"0WAS`MSdckj;^91A,DS_jlfSnr'NBPn9An^:<>2,tT%e2IF?MH:%T:om3@b-H's/A8hT+",Ho@<0.:MHts;XK](7P9ro#NaFHsR6n#Sgd#56*RM+t95gWAo6;/RK5Z\\kSam[(9@)/;_!H]eXco]q3+hUF Op8eZt4 `OKe+LDOF]qTMDnY9!M@Fo_i2LFLsTlnM@cqcl(t*a/Sd$Vl@8Jj;a*@diN/...8;bboWLH ";eF<?%>tsQ3!S/U;]UA_n@+n2://?H"OZg!t/8BY18p7HCLq,]h/O$M^XNV[]B$!2mKZ&Dp#0dPk1lc\9`"OC!j^ KqIoc#/s\GAfsW18)-XA8fkOT7W;_BJGar%p`pQGl->3%W;3kb`NBh;jlPKtiT?opK"Z)m#(?2jYIt;c_Gi=m)noCh3nM,MURUo'"-A@Q$E!/YanjTG)F'>lV I!6D:6iX]8;[">h:$aQZt[I8J5b5ha'':)oK';Ah",Zk)mj)$IJ]53kP\9%bPcf2q1>J q;4c/b7U 3]5<Ji*PH&"e!2&i%s,PV&:+mM&gT^osaiZ BA6 jIsjqOJs[*'2pJWZ2?!MPBB[No_QWpD*8=41&0ikriorRAi3[<g(o5WNi-'-J>QaID5ki5@SIKL-P7`a h9FWc./jQndQ3gC&mb&>EHe(m;rHk*JW</1(VW&be!-A$] D@rmnq2ViiH:.C:#7WS@?a (r\C?a:A39Ms<sRc'+[1DNX3,!)q>SC `[`6o'U#MGFW+=#%]>gYQda..\Uai?714@2"n`26b5N]AKfOa"@himgc:Cf-?$K3*Na%'2P$R)OmJ48%&[i6c?+#f4@t?<W!IF0Rq^[V&JqSmAMFB9+7\r=F2MSP^l7]/!RYS.INp?4,sPX!Mt<M[B8.A_)Fo8!kW\f8QEh#&3B3iBXH?X'J P%2i(tAr$AVa5@f:4BT!OGqV=..\jL) 3dEfc%lYY%0_ ,NNhQd=l8SUT.clsJnDsMcT5@i`CZAT?atToZ?([ST+/Yj^t)4Pi.o>A!jBGktEl?gF^"=rbPBle;s0WfFpiWKpOO;#<Wfg&UN%]\Wo*aG$D->/5`\Qnghr'\0&QMbrZYB2nrrB"E?n9@F;/:;!H:/a5`YB4"L%+ Vs)6n^F'R\fA(C^n3f:AM"`+!#Pr*!k='5ZS'<lt8a0qpkHDcUiL')5+0S6>Ka-B!S]7\]W5DnaQZLXfeT0+l:PR6l[ZhB<6p2m#$XUl4K=-00dVAat_b371"\eYVPe+kK'?gFA9T;PAG5SFqN%;Jl.&FP&G?KG?`X,hdZQr"3)D#.ZrP&"eFHP4\>,>A9e_M@,51:Oc6,Z*`m?k]Ej@9B/l1e[/NSA#/PDK`+R^Z0:[$_9=1R,$!PhSOY_#c0\<<n:TWg!J;JJ>;"J$tt:>s.0Vn\L.V07Han]b4!Ti"d,M%5)AC(8JohVLR5qJkkhB 1 4h:t()$A0d4>=k`nS$<^*T`,?-#4Lb/A/HC7<l)I)%)T,!lCZ6JF.sX$>Jfnt$Ys;@p6%%ofb1eg1d*[Ob%,S?DP#"[2Vt3,D[A'')R>XQief!P!GT>PaSE4cD&5d2D B=1b-*9`kAinr6pYq7QA']!%JA\b;QE&-B%76m*3T 1d[lW@nf(o(264V s];P]5A=;Ad#a2BsM1>oF^A+<U%UtGDV%G20PqPATl36MkMK'50G%:d\> 0fTQg]\ofTp`<&_e[A>YYFAo7b%#?\QUVS]l^AQ7@a2>WDBm&T)[CkA)H>h7q(m4)-Ilf6Ah%*rZ&Od!"no"AFLQFg\]ODV+j+_/Y#%c7fLZHD*USnfmKn=W+J%Y><F(Td3NK_(/F=b%;?)Q.mW<c0a_FgK;Qj'Ihd&]RX*?7o(o]5aWM//0r2*ciA`lRBLh2N8!W$#Ha0[T[[U Tpo4oe>\$H]]a&eIRgLgm36@KthJp)6^6r:AcJjicCR+dJZ)8AmNFH^MkSoG9G72Wq"oRn8bM+4Sj0Drg&)b<9s>+SJ:H!+B:.R$rCl%@r*8VGd7lmX[/Bp(\nhtU9No7OG%Z.n%!t40&=RA);(h&2re#QVjaI">Ie+&DpW"%44A@l$_?P*S.q#Kp;-?\1<n0S,C+^h;RfAG;@KdcrCfFcYOl3+DW!+89kD$>`((G9c\?G(O%^A6I1(%j"6_;(h-oS05i*]PVsl!B1bGr+,#F9B"Trdq2\#rXiV>&q8BTf8)ZPl`P[0dd&chG=?GiW-LC80L`6GP8/sf:S\/7CJ_,`sU!VeToliSQ4)oN,E2CjEt96D'A'$Jf_GHlPW05caah,O.?AjL3EcW0 U7,qU-E!pKce(2/0`8"77bgBQ*"q2Gh.+[@.E"tn=X0[h%CU1g?Bk&,>2d7nS5qtU!%?b:3<7O[!Ym;Ws&F.bq$$8V/-@D38X*<\46*T VX7rg$cCG?#W9 =CEi(:AYk38OAHBbo@6gPSM\)FF29a!l6U9A9iFq..l=45o23O-Y"ql1KW?!;O=7TFBdokoT5s(XkbBoRGs7pZ[s]k;;'`i*2olK%K6=]eapN&*G/A30`..,U+_n+;Qai_1\t[4%iXHT''ABsqHAWO4c7(!sY7@DAt>rS^`LJKmC)62:KCh!L>1>M0Zplodd..Kd9 U^c=h)_c0->jP=W+9Ip=c3q]Vo93^ZMQOG:!#ikFbN[dV0*+sK]A"q^V?e2?<C.b+"dUB^3kmf]Fj-3Gk175U^\&A+_aD0G.98eOVRW_e(* k5StMMQiqkA`,tI+mH,3s0>YbA;()aF5od-c;Uj:(.d/$C=\o-lHB)4+0KiAr`>Y%A$A47US3H9Ml!/i1I=&!Gsn4T6qY/N1Qio+m$p9'F:VhR q&B>XDo6'$>e6pdh*WYB7[5`k^"HtY)0?V]2_>BWto!K599ds]mUt,1KOt=3B!lcQ'E,>I"A?O`@9qMec$5$U-5\3pE:TQ7F9+]!"p`K=ek@>,dC#O/$8BpkYbVL\*6Wi8f2o,;VLS<qhbHe%,<,?sMq\%rp`UG, 86V2T)Pkl/(`)=@D&ctDt;TE"eeb+,MQ9nHV\0(KX2_gL1ADp!=#n]`fq)+s'ACJ8d%t57UXV>@RHr(]NkRQnk?8<fj'p(8F`5\d`tN>IL0oKLj'*N\Vrq:AN)aZ!"i;e(D(`*F:_.$f',ZEg]NdGT*S.:A6PkGf0?/@dmLP^A) Jh3rjQ0-kZ@OA^T41[DLG)O0Jtg'A#G+c2^X>l8g]1A\lB*)c&bnG8imS[ocBS.AXL%K^ .*L2`,CUAE.'DkbK4LZ_@)4MZS:Wlk('RHFUO&WsM;tK5fL5dm<-G_tmh><M48CSG#aDd =eP*OC;Sr6-64e74'>aA/"Rn>e-@cKaUr#`D"A%Bp2)>#0 Y-=s-jr:ita j_jD`jN+i(GL@)Cm%o@Afc`c$66HAt[k5gOGJt-<Xl`Q@-J0e@ZMq082A]Vcb+#AhUAG#tSG!8YNLYDPh-ge(BJALNLL]jrT)LcHXf[r^=ZXL:dp'AUr%l"11^rhds/@+M82o&oa@MdCgB$eIX)2`CdR9V^k(L]]C&p:<.^;]XqeVQ&@ F[DAGd3!)q^g1*:2[\A8M@h)][7L"(X;r([rA Hf8rN&*A5=$oTo5s#;GCZX
+Fo`kMsMr*sth,dr2oD]eg<N\U4KGXn2VQ,&'!;AW&I["@nZQJWPG$PpAl;1AZ@R:DZ4;<,C-&1H7ZAjAq7LN`NV[,YDa^JKW_!oW6>=tA9q;%*^5$1[C24!O=7p0LB8c6EQ:bpii++fSJsNl3$mN#K[2eL#%d>c<39pV55VfO? t5a5 >+g-X9Yc=es5sI-ts)A9R5TqE?/<M.C8Zl  ^n"s`BDQp*W@oVW;]I'.h/b1:4!=.W%/01LcYF7=Um1(.GNWiH:Kp?Ch12Bg[5M`,TTE5TN\pba^I;oM2Z<O9%>*KqR@#RqN\;Cb99`H0d5^K*=fJ<]%Sd>i1hpA>313bbb#MCtA</sM^aXko^.Z7+VJ+(EYJgJ;j*\%5@13(Wd^;PcjC44A  I$8,VM>Eg2T*ancMc.8]W(7%'g.#VpQC!2AZ(0oW12mIhnJ-G]D;Q6OF(eon*,UL_H'UE 4Y? 1?_,2ia$K^X?R  hhS3!R5BC,T<ZN5!YZd6fQIR#P5G9=')C%:&VSLdN^\Q=(FarUg3XE_^O&Pgd`)M[,?JMA(tc@C?lCJElM>>@#rs1 gF*lPf8,*cCi:nAaE%*,qP_ofK<pL[$Mk;2G/Yp+/G1pr\l^!M.q<kd)1"Vlkr(fg;1^t-eBC0# -tR@1_oAY#7OSHnE=pt A/h_K[T[d[A-EmPnXA5VV2dAf(\"gAR$FA%$4@ TTG_O?MHmO[);^]F##V68*QQ'&B%p<rfl-LXiD[Oi3s;l6aCfB$UP,?TEArTRhX@<"d8%#K%`]$V=3-:kb%mdJi8CW_5"V/'4\V6cVn(YGRK[4odBH!?#G1^%*8Li_!UI[(4t4WjSQ6>]hZ@>cQ?A;,EH7fU]GR2p25#UA$.oE5/ETshg=9Vk_;RC#<^nK?)PKL(YCWdtBChB*T>[%;/@Rq[Alea)>Ib^QrKHG2KcbE0mD&/'eS*tX&\8+U,Q#Zm_`8?%In$FLme]D% ()6T<AlGK&rH3GdpT73OclHAtINAkXo9otK-O+;eAd>Es4q.E.\S$= 7iO_5W?K\UA2:WSgk?-p@=SQU#E/1!4.@--8IW]kLr"j;$TCUr,R,s7OlD-(o#dlEef9VK?'53Xq]/3-s!3B?Z&#3Ok&d'a*X1TRs$(?O`DTmX3mPX,>=,![AO/gEO^,W^00>Aq\)M=a3:U`AVleR6-Y[4qdj1Nl,gEDi']t&><6n0#)'Z0Ci.pZY$d1A4E?FrX9Blka%LHWMd ilA@#'i'db3# ]+Mf4VRQn6'nXg#E)I(8<5I&(lT5Y$1s.5[0tYd]IbLkgGlt%?^MHq!k!h'FP18>%"oqn+V?WX<FN2Q /=3aW@ri7A8]i.)<pP=;OJ(Z)@(kk2Yt5_?/'hVjq&n'-sU`1& BJfe9*!JgT84l]Po+=(geID#F=d-($??n#64nRW<AAK%Zd$#23 k]M-`N#T9dnAkX-$WAJJ"GjGp($L]#7h9WMkYN3Z:#eW-.0F=5CeT(K$afW!t/NcVDa*SIRV`ASWk#UnIfb8Q.&pG\)7]E\abi^ )"W*e4ab\s"V@D%?Hp`?sB,We. -tdprb$B&>AK,6JP\KYb5jIJZAG 7F"0a<l?+^R6iPro=S`r-VWEcYRA'-*)?Hn;sK<dN;B2ZRZ&EsB20,MGK34ODqIA<^D(#*Ahcm]XRb_7-jtt0AegK\`A+pX5l dcLk(K!!FcE-9CcI.`M5%N(X$)lMOAaX;8Aei1;3MS<c#/eKhS]\\X;7&aoO:S$^efs9po;[JAY2LrVMALKAXAoY)jW2+n6'?#,UrD.3Y);"?JBb_5B7T]o/.dZc\qaF5-] ADXZJM:f%F&bDZ8dBK;"iW]_U_?TeC+[*a.o[MR.$G/7^Q\fRhVa BfIhNA&Z]=;<#mFl<G$/V=#WI\cG0;+g e-*7cAg<UIci"i+If;"PRrU&C;4poA#%*GfQ>'"A"g<61lB:'^;idCr*82Z>MEOCeHqH^Se6;OgKSWR]"Kji7'1:VI)AC"nA7mhWM*okf"4,G7hm:c6B&$3CM.8fIbb[kPX',j^&K6"IoHXPQgY>@E=HE0-$10tpQ mXol_g,-rmo11C4F3b8t;@JINZ.t#TT7eG.+CjQ&NN)IqI3hE6EF'h)!f6spqV,fm4`H<PRgV5N*r3r2%sSB.9g3R,SAqV8XY<n5WR\)2)2GA H)bCIH2iNTJFY_lg%KBX22=lb66^K?_K<FCbElA(IEaX`H',\"!Z9l][*XjKscs(;`\fgDRPrVO1.1N%NnWgl;I^B__B/-Zs%!K(5jV'A!$">irb'7+pr8Y,NqR^eF_23mt@C3@MGAHm0d;3rY&+tsK(BK30:V]"gTpI=$ms8j*+qddro#A(Y64/i!F2PA3<9/WQX?l6QV^V&)m!C]F$@ 468?JD8p``2c.;]3mkp)Bl'X2PpYSi+[t<UF\X)=EGfr K?A/;Dgq)qD4.4c)7Y&PKg>Es^%2h ,)YbjfMA5:dYE'1)rb]]/j1?:Iq37N8f.>+UQ "f5qFFYloKPQ`I#0ZFLrgQ1WQ\:)L-6\$Q1UDgI!d,_#),YE90K^EY0*0saQnl48<1`(@3L%LshoA?:5Gd0,6/b0!pIibT2UW9c()EH0.+>aoE%`]e[:.A!9^rMan?\peDIlNkjaJiX-=gYcFoXXbTYD8+Sp8US^t0cBf3@I?<7&f 2%n9!]t9rF_flN_U)74!ghb.I!'D4n5dim##5j:?GPH_3Q;gB$j'qNk[H,%n6dc0aBl&>PN-X3J.[pNK 2S'oEm_dsA1&+b`$3h32%m)"QNiJIWX9%*49_)I&ARkIm:gM(Vh!$bI1iSC/;/efNr_`sWbT'>T;fNIJ<%NW0%A)]npAdI>\KaA[.G-8An8&VN6)A$o4,@31\];&k4*=*?pasKksN1=1l-ISJEY/_`=U]j[Td-9;(OFh(IHp70H!2kN/9to//b/_ar&/Xe9CmaqX:`X,S\^A+AQes./s'F:2ZMY :m H+3XS)l35Y]AicKV"G?4XERT;gD?+<J[(S*sDO2A:?Dc6?!]qOVRIYF4bMiFBo]LVRGl:L0SGI!&i%Rq<%KXn8l<)B"OVtd?=)5)>)S.0?,8X=414C$hD`@?3"Sc/s>A[bFPR^]CnN8ml`[pD$q%M,aSCC$b$#D!JN=;^9VLl>P$8s).J7tC!^LQ,c4dPAoU,M7NmQk=90>$=:&_J;s?Y$/#I^h_`%2K$[^S[7f=Oa21<Q!o%eb5FXT:E'=\8k,(oaOF#76[i!T,B`i3",0)r[-8]tkf-ZUWdKr+Sf2dM',1-Pkn/V:p1=[ @a@rRgtt79k<Y"HK+;U;l-5*JOGJ: IWH[>sm+aM=G89g8RD:KQ)pRKLf'/>$ciA'Lr7+X'%1D+6hDh@8G#+B:/_"EaEc)VPU$kLC32rR-?HCm_QlA2Qlf[@n^K'OC7CDEFI]`;C5.EQ$V0,V.<E#;<.J8PiYa`M$FYCP[8$3Dif8K'RSc@r@MF XRPUqE?nmRhVE4D<^[%5D(^.>V.t4OQ_mqq37`53H)8rAGPldR>'1DEec?"2IGY,ane;V7C:K/__[:&<dboAG@@WqJZ:3<^.ffSXTgT(X./0T_"B06!]fejtX+?"dGC#nSP\JUSFU'mS22bI#WAp\83A2T(Mah,S`@56_B\r2.1Q9=f(-6K6YZ#Gf\;o,  7.cIAMP_(5X2O("&7fr`;lO2!5=W,8`9`s/6N!+6+A!PJfWK\A< XSr6gdpo=eE!e1jqMR"ptea0A+i[Qf<Y ,)YtQ4qA7e+#E6pcFs\c^_<f0?p9bUt .#?dSj*JeK,#2QE0&9/;dM6iN8/$K%=&i[%pcCmf(;`,.]qK;nG4UM2&k%eI;9$sf-XZBajKVLE==J>-Q(Y3DN39tKGiOML4ZJ/]jY*6K("f?j]d5Q`"(!a6^EnhMXs-4(YR2'J@A!hV_VYI.'P,/ct:aiY%I5X*@<<!f2$Fd[n#8H>Xt!%`[GkeOtZa[( XBpK2#?$1UiL2i!?t8SDQ]UBEW@5SN:$qlSg'CJ>p4G44h[U)U*4s.C'ieWRf:.VF]R(Q.iRPRAi0HUFmR8GAtnR1D&03T):Hpn+RlJa>:[U<*83'R<ZrVPX-mARst:2b;nm:8`R ah-e5GLA0Go'>8Fr%W$]?l)KANM[Ttj gKAfib:.oo,D?jMTW<3DIFMR\io*NBBA$.9'8A6g#(FsamZsV[>Ml`Y&<W"Tq)i8'ROHA9<4@#a56nIM,ME'>LJ18>52+]9pLqoAIWG##HMS2`^SX$'N0dP/)7*DVh;<llE__mo^GAHFXh+,nhg[,\S`oWd!dH+iUA)Cg@qlT jQ?8()W2`0RA9_i Y;!EBdA$_[IX'ERYmL[MkeMnHN0 R"8Nhn7>%)Hh\ e7F@VP61nAQm%^$@;fs\@mrNC5=Or32)c[O6G3#OrFN>k;m0oPe/]0!.i=5Qp+Is@bJ<-cLh_.Ia91f6M+7l;+\mH7V$Ai"4mAp<H ?)6O[-8rE,S6O8'mV??t@ZF^mZ@@"eZhf6M(b3j32;E*13J*rZY9jWB=Zn[(?GU-9>-#Y(jDKCL:4Vr!h#<idLO<paCgYJdN^6rIJk_[]])O!JW%n`EiNq^ZUXL#,))es;M*=%h'#*Mlod2%-Db)MM 0OJMn95,+N]`l#5MM/:hb8V0R/n[)D3WG#@^.(#o%be-5gJH-Fs"`2VH 5"Aj/cV#ZXCH:9ko0jWL:<B+)"J`.l<8K*)-A:QX/($milWkP1L1aQR%"0GU-_".E5"J>k8<CSPU?ieVSFh<(F niAmnHk!l$\sf_=jraa7AK2de2AF2>R"*n88`WB?N)/o9KG\8A]PmSnj,=q=h!%(X+*%Z_/ApSK0`.L;*%@G22NlmG='pP%,JBl$WZ:AS)rlaK/^(J+B^6fW*E C37[ae^A]L@g$;/Ho)o)1:W6q><Q\E7s9(h`c'OTETn:!q`s_K2),-SGXf+!'fgMAqN9/N3,ZS`+`VOd9oT'@B@5ZB<Pk>GbC42!!oDY5#cMQmK3-<)(m#=s%&K37[:CU-hm->V"/AM\ 2BfkNtlILl`fPWRG^+d_9rcNQFckrMM.!3jP2V<b(:QZiAo6XdK KD5-f95M&=Y$'1hV=]F?b6E"^W.I2<7kgnUQlj;YQ'W8NFe?Pc%[Z^BEnel*gUJ:gp`7lGR<sWo*:L^,I@i9KoZ#DK_R/P.QZ(FLN!RO<DH7qY33s88-V/:nIV`_e;!=EW,M.@b!K8$g(:&dKd 1+ghHQ_)#L(]BR=Qg"d1(IaaPU#L=m;U#U;_cR6@A4CTAWnRrdMHjg%2blsYFa!,V9q^;*o"9e]j/-fg.; I@=hri190<\Ld;U^_L"G%ZtjfQor;mA/V<SE%9 DK;JB9^>fg5_YboSY$].O r)#c8_"RFR#WEt(dQCYF8h;ngfPe^Ero9^bWGha"p"J]+f.<>A`G s^Xs9s%I&aF3k(BC@WZ"J9-425-K3884cC_#0Z2 >%o<rip@)O#%A62g?fTsOAa4a36q&D3AJ!JkoSjsXE1#n12,,+n* ,d(o'Z+`;2,RCd]9[%@hTE9r8I-HJOJ$P6':P0,A<mgI%NSe 5tm=.l9eXj9T'I%al,J9A(=rRomPh3n@OVjk>&UIrn.]!F8I#*Th=17cHo>^;_8dB:>NMm'XX<sZM8$3UsY(sJ-An`L0_.!/GhED.&$37=>@beT#@YdX3DFir7"Q'tVgC2$3="Ke9<0Z/i"]":tD-/nfkbdT_X/f)ka]=.l!c?IXTW,9!34IK/'*,/.8OA`8aoXCBI)SGDZdi3b?a`%bI;nNAAfg5KXSC8NdCa(!E:T*fIo2V)8Q;j q<42ACAa.4[Y@<OX&G&JJ[bW[3;Akj&&e!.;;=)abrA2*`5TQlbpi!?P,f_eV#Tt G!rd bi-Rf'-p%U@V7Q\/I?J+mR0m/p6VW4g26;7BUG<N[UJ=U9+@IQ:rA.O&(=O#KK2dj46OP&TFFE+h*8()fLTP[2OW qr=`X;/tCWh905f8f52;D?m-BG4h=I(A]L(K?G&YK'0eI=%:%%>fQscTe-Af]R9Z!B&f@3T2^G+NfGDgHPP!J,Qi5oCC\$B@I3Eg/6D3&MOfs$K9V),ESse*!h_BL_h-mA_19@P&s3i_0iHT+8#"? A)te`srfDN:Qjr =9Q5@m&S8U*eG`k+UC@Op]SVZp7bRqca5TaQ@_g,K"O>Mk8Tkh6K%[TeG=k1P'^k4`43Kp'N+A3+'>e M>-a!M@QEPf>BY.M^D#@!8RAn&" lZp*=gJD#>l.44)c8kJ0EjKn@q4]@Y"mKH\t@=Q!ISeN@[A=q`kl;K32M1r$LV+mb_ni\;IE,6>d$t)B ABH]7&-OK7"('Usf4<rZ=KP(HgBg&TC*^@Z$<'X5<C=+P=iUk1OUaFWGOg<I_Ac$@I-'EcO]6eGc.GF5ZgC8Y jRT@J:3/#*_>U>;ZrgSANg+q7P8tFk0)r%3<-aN*%<p`FYk>`<IX2bS/j42rGs'?VYO0`OQgi8T`, r'(&F7Y<g4oA"(o?:r.*(e6<hBRF( '"$j+Ci Xpr#5Iee>ZUL-14<T&jITd+]Vlgb4/,'YE&ftJ:Qe1+E&F;>2$\*VD79cs,gWm0n+DS7bcA,c14[CZQn]kb/-$nM,K;F";E%4X[R#GGd<f86&...O.Mb+V3jBs5@hK]OH/lW.[;O/J'sa]E4'^eHRV?!l@QpA/];D2c3j\<qo%GhV=[Zb3&I;=(-;)O9m*P7'-k/Kp:NEm\OaD A\ZA[0A5r]:" %S:V%Mk`0rTs8t-U^gGBcn!om 8^-6V&T;$>pX"!h`j=A9_%pt4mZ]LJoOMh#$*nX57JktJ5HI?#ne/qIMd:?F9:q*_UG%ILCdH>fQ\f#>Y7OP(+"C'HD9a&F`'6ir+QkiB`,/EAT'IaBH$'gIAGL>;RtSL"1gO>XeX.L 1SZ+A]31S4W9T>4EJSpPk<4#("#CaOV>>iGftc7?5[?(T^qc:kl2I'#.t^6,`H4W!DfMI9oGt$p0>5f.Z>JO`^Be%q=/\eg]7MaOY:2_,]-,b!A;;_OR A5P.n%Z7YLIe^)1_$N$?&>oA6$59qS8"@VbD8bV:ON"!]`jC@4n:6eDT)3E=(orMJ<ft(p).*n&m(@gO<&[R1jce$dE08i+I=@4P?/E&(bWUdCBsK;OmWDah$02GZfA?,bY7&S=OS1LU>r+U/U-E%h*J> ]QinJS"Z`-9QP2rBF?g#8m PlBDJg^)p'Y4bRk OZ\`0<"p0#`Fq</t8\@flWcEKrMGqD-Y6;3'C<B:6'/BGq9/On3bqOa">LDRa_Md",AR>_7tqgAI6q>F\ rUP2h6P5Lf!WmaGeslT$$:km:"^b98>B@[58%Vk8o[4,3`i<A:q'ZH_pY%"Uo'`nA`#.43cb+C9`m)k1!%m`%&mseOt9o+e2ctA0Xb8dWXkl.0F'&Ws"&JQZjC1QUQf.DqldZ3oGQ+pSCX4l +Al : eAjU?!67ReeTYltifnT*HmKGP&t$Js=8ZnCR/*m-r_j5Ydp3Yr. M0aD?'P4lkVs=?d(*<BdY]jr[SC$jp_YS)j2F=AR*8^m2hgOQOSV_^JjP#sX/\A+-aQhT>-j`:lG Jp3Z@RD+o"K;Oo`cgPBOm:XS[SO/hl\%V"K%rXTaOdA?!`KL0[jil< IE9'PaXf@/tA#eT'SBZ]es'ljk^/f?i^b`k`CeqsgEah1N^_qKFOQ:V,nH;?rWTWAh8@J)AJ-fdM.Ub'>9`Ht>[4A\. HEF2YA9)_X3L^H[B`)dO UYDPi]b&C`-AD`ettad@A#nG>L4c\Wcp"a%d1Bt4"atAn_#%3>VCJq$l$>O^!?$WB17E,NMQI9hqOM,Kl#0QC)5_M%B2E"K7Dt9XK[s@_HW( Xp8-1+,k\hs [i?3?4XCQ9R@I1c,D4?]8FU17BbSEg^J]H'(3&Hist50)n$_k1ZX=$.[:O6<T_<j:`*J"-r,6PMD[9MCs;(Yq<qrK<Kq2 J>R_<AA$0bA>rL`V#&"G<PcUU"nGF[?"W"J%&'nZ3tMD,D?pgc7A[?SXo%+^=?0?FaG6ec"!m0<,H.U\iP;\JS/:dXZGG5nH)-IHaoe&IakR!hcp"nK]q@t7`#Z-1UKD.](XQ4EW%OCAFPfP)%cOW6(#1fA2hleLsX"6qIZI/PkPB,0TlRj+XZg1,g6)<;R6S)`lP]B&`:YB""-A=\O-PW7ZQ5U=IgKgI73V""LsK$)r@T[dQOqtq-7)1MNFR\Rg3hUS*XK"i#A1MPRbhNF),"n!G4AVFnsDl/,!rMD'IHqp@^Aq4TA5'gcKQ>Pi<GU 8>(Zk4\Tk?UAZ@,gt)S#c&n/KSYnFGZjs^cACm;+eP6F^)MAGhoqV4`@1%UMmfNba+"& 0sY!5$N>XYCnoO9^FI6'Y >U,i^S$3>sW3#itq3RnU@rgE1^8\UTrjTcUHbl$CFC%)UY[P:8'M'a6^1O4Y!kgi@n8:%j^P-cslBH*r,-Q"Psn4bKbHHAUVLl%F_fB\P<sD$^O&VU,5Xf';b#cibEC64s c(Wl+T+UE&N,[r6Z`Nk>h*jme`BI1EhI<tPT8'd=nFNSC\#76?:Dg-hVE!L7pL?']SHj@mFc=fH>TO9_gA%]A"i<37)\jY=sD8sYN^9XFA/Ml<+seGea<.6D^$UC[=X-4`tk@fI<aWHN,m37@@E2"LW<b *\l-lL,4OFomrX8JB7`Z<r=tOtOtBdE?[FQ2%g_I.@Ucq$@saZ*AA )qZ,[^(UCZYG*2A $&* (UTA'26U@"_(=Ze]H?i0U3W'K(b$s=&+V+[eBthtG:"E%4^;;jJR2q'1>XG]%M_h:kVC\&C#1j,`E;2[CP_;_,SHb+MHJ][=@?Z\J[!ciZMSC=8T_P^+7+4 #0aMJB#M7+J9Y(TrprMRVhqp%6n6ph[#A1N=8i>Md4<b+GoT'>mPMB$b#AdB5bdamqKn6.so 1O2dscP,`V1#sAjYn-$).Ao3moshGH`Y^-a>1T?/^VeV aR(MfbU*_)lD<ARlksCHYS6i 0q>iE%8OlO?pr$ZfBJj5#^-GX'h(lCVdm<9)q".PI$+*5j!p_'3#>\Il_VpFG6Zjia`Dtp+IWhr@EH#]l0o\`js?_);^=90%PE(m]hA-Vm/r)\W9ENd*Edn+NM,G^,U/n;tA^+<A5Y)VTN+$7=+2ij&=^i_/.T,gT^.CdD1Fs5d+qNl1Z2T*20$cnAc:sB^.?$+m(IlJae \18$h,GVofc!rdX asfdnd4&SOrds;@9][aJ.3h0=t`NnA(fim`UiZ&H '=Ic!`JiOWM-#T6QXPDcec3!J;+/UBa%!onsq92N4A':K7*FnRl=J6H(bRmHG)clO6e2CEd6,GgGO_Q8n!>RQIGfiB9s5o6`RjM[ALmTXqNm@Z:O<6NTbU`)NOr.a:gm^b_XYn-Va0`;P? RCFlYsq/5Pr%$G?r^AZAE&/d*X7AYM\AC,Mes7G -Hb0B\BfX6kcUaJ_5[W1+lp6<;T$WU=C&\hg6A^NeR%bIWE6ZjlN\:k&Kb+<V9boOA3We'Pp+PkF'h5^NrL(PC&SIg%Y$%fDpJA1Ol[+q!_>B7A6N4>?q&C-pLlqE@c)j/Ml\i'J'\/N4((UnMeLVG-?rc?_4]>TAn6!eU_:tUA?OYrAJ[:n>nrn>tOl]L,L3tb%q1"TKD_Ggo :+r<+/L<-QrHM<=)i1La%Yj<M[$U\-a6&PRZ>*#]#`akcXI$cXUbL:=!nNd4$]flE;,Y4(i51saE!5.n97EkgA`IQUb$1s_<[*SpA1j/,IYK=$^.'*J;^U1Aa:r\*[5'BfPm5i\R:Y5N7XT*fJ;m"f<-*_ ePoqPlR7d4nX0[ZP'7CSd8!faj)#/6e[R+!^]L[R:N:TNV!(Kr]OV]a<fSRWGjM;1A!UfQY! C:.jXD2k)AUFH55Z/Te`=;5\Gr'3UgmS0.MhbHXAV;hAcl1a)Zj)t]QG+:J3nm8^\)kSha(@A;OIiY[%%8ieU-cE,_"JR=4l]Lliip./\p7dYXRt\R1@5NG:lQi1nFOXUC4(MW*b+,o\MmaV.BBKf5E5k!+5tih6q+FeghS53%/T*4_R^PLV*EhW0>`% 'I-L hGX.orUtGE^39%Nio,O9MZ;+bHD?BJDBPVAk'SV%5rFKf"?[RX`W\t+Dp$)0NZ_IgY*9I"+GSj3NL[k%HI-fm9A]*9\;\>CTdiC02j0G04t_2C)XN&a6!NCi&_[W*E+K*."oe6R:Oj2/4rU6PtGm9X]s>ho)^1<44a0AT%ELqE/-UKgS#AB$kVBmie4$1Y`0JHGlRa#Xc:]60EN9KE:of(H;ElD[-b]C6hOqKdd#&8/Ct2<^WG->k1N._J]6)TUC2mWRXrPD"Y8'_,rqThCrN+A_oEMMkM5-rY2:3Xl1 @J)D=WVEo)paNT7\MYaD^:\WF6(/m:-oEQ98:]mn#U2rTqm7W07fcC6V&pFWgZ!A^-Ri>AAY?!!sY[:h8OB'o6t?3_]rM4^IfRmA$'mPB<X2,@j62V_BkPrr/6H*4NiF23S%(,EO!Q %$RLjP5X& dHpAUB h>9RjB34sA:CJ(R;lN6,kj1<ao`E3p<h7J&^s-p8+kdahR+Y@q*=<`rl6FFZjEh^OG5d9>[LNKFk4a!+V_Rh;;O><H9'3C`YRV`s4%/?%b_%E3?f0*`?[>&;)P/*^g?AAiJ`@*k?1A%p<4%9L2R:C!fReCA[f#T1C6sYjI_D/H`IV@*N6LX'`m>\qeb %'3.)%;s#s!6/B#8dd`5di,iDPHo7MQ$AV=J+V0:(\<8fZa%E@Z7)"ZlWG4;b.0*S,<"K9#CihF83rBU(&mA&5EDV,'WJ=3NXeAW3@.j2/:!+an"I%3q(9(kqZgY9h]q,VK&![P9;J=D07JI_\6+/:21[RB<D#^-Y-KhIW\TX_dUF=AfD@otfd$h))fGYlq#h\<lEMa!SAs_@+SZ`r;8E0m5UMYmKlI''BkpF:iLERQ,?tXamgrf/;%Hr^Mg0l1ZPn[q4cFoOX'\=>q#4%?Fci'2V,<%6krSb@]jEo:YMGB7P87$W;76_U"nHlgO2 o>'[KdHeIl+n@'5[+Bd.^:\Jt&\Ap\RO2Fn5C&`omaZk:sGJj&_SIO0P-Cd;G3#j(I.Z=NCqc 2%(:[0hN1FV89i'a+ed49r,f\kNa@_KE*@ZZP\GFNRNQ;"+ghd]$LgM%3U05!U-$#I2.6 GM.Cq7P&WDYj$qd`V_755b9$K7,9(Sf=6>@<R(]Q=)V9p>J9pbr+RAf>A74AV_E>Fg]^+-0UtQi_m3+rkfFFEr'<%iHE/4B7N'sRWoRIc*U_qfZ(pFqp,^'0-ja;tcTHL5:+gJPb_qrZl.X(/0TeXTVZ"4AR/'lPAsqA<$@Ej%/0XV$:][.2mhWZ]lgQ?eD8q:t>_himc#`.>6[$mMSG)7CZBZ\e!QI7U=HDoP,3)B5C7Ulm`8l&?97]9bV$) c*11'-:+% f_[-8hncgs>Na[J (jUgspZ80?Ia_'AV;n/VQ0I`\QC$>`:PG6t >1!Z%s\a(FNgB;;HbsFWlOV9<aAs^'\eZZJQfc>3M8[Ut%f/AT2]4a@fo=&HiQ$Kkle-FYl a@C8F)*S;lU"VkdR9\LfU/TT!@.L'+ZhI,!?G3A!#@-h)^!B?)c>:O8cA4gUgeE#kH'\>E:`Rn#i!@2,*)H+HVeRB,cATD8U;RsNOXr*M;O+NTMJGe<Db!OI6eErYAcU4/Z$[Ce_ZE.)Mrc=8:OS%hl.!c[67W8cM1Z4Ge.6/3CFfjm?$6!S<A96Ork]-0/AZn!U)@-0kt%]CT0`F@B.b9`3,8o"sRTPW;sWU'Sl@^FZ$^=RcfT>6$J,BI9'cg'=ei>U`gN41oN#SSV`o[8aOLPjsb0pL^G#MnmAgsb=(`-0T)4mP2A43]F!Uh\hA*V6 9#@Za_'IE2Ai;qk$JQY5>o;L)\]:h7Z)^G7#b'r-KqtTi'dWAC^g,"^i)abIli*HV@7rW#;[H.^HAVDU0)J:8sn2XK+LR._$[Vr,4Eb/7LT^MBL3gcWI7E?sZRn0YPA.\N%UqZT"H:5"Ha +4&mLinhZ%SHpL<hhc.<+2Aihamn$t14&e;M=G6)=c6hP,D:eK>CjG@EpaTt]I+&VgLL6s%+epH56Rq<iMh6<oRS59M$sl:-VHg7r>LE_Js\!*V6EAJ<m@[g2W`/]fs@W3-.LWqIB=:jc1,1YB(:0]qBBrVa;b(D5%kqiYA0p8l<V1oTY<-S&=lmsbAiZ;jFdCEU`lA7O<?aG'W?CH[GC"TB3lPt1#">4"tQerAf'c/]icOeFc]>qJahlBP61.6Zn1e:I5 r78,#,F /3<;SFor=OJLpMlGrc$]/91SAq6(c[(Fle]DE#!/r\CdaCl)$0R$4B-4bm4dX ]0#T,5#9#(`J,Aj?&_%EcpA7_aQJCpFA*F[0r;.q*f0>3$$!_'tRVnR?Q$Q"0XT9?Tq+c_9!_^8eph%AR25K>)6T%F01e/>&b2f]l+nAgmQg8kHb psX3`)5MpE1O.Q!"DT'`;7A-tWqW@P20f$$T/a>PebHtn1!qZT'&JHqr[&'* =52"&7dpe?g.$o%-kKF7q,O0.i?lc698?&86,"NdI3_=M]nkAfAt(7"*tEfQ=-K#I<q67aDF@AD3&RHa4,3Bc[e]FUa>I_0<o0q@cqtbSi'aJ8b6oEkG J=S(Ttr#a2]iN ?&EdQn\N8?m.0f$c2s29AXnfRrD0YYe=h@Q@ZjCe-a08LEl`k96Pm;B-<tbV"TjZ2\.:n<A*IAH*]W6C#lBaq,@epQ^EPI_"<RQ(7m*tZ3gfRVdE2[rolYp (b:G1:C(l@+s2(JL8pEFNE,;km0RU)GA/:St7D"n]JriFr`1*e ]VIpQXV":%co r[GN6H@-:9J5_+['9e5=d,XAB;2fAn.MM5/W!RVm'L?3EHh8/T];)\7jY3Wid<)fCaM=*.kGVRA@W1mb+q&dFAjlpU+nNr=\N@N8j4bA88ZKcfaP7sP%?4@e9)@p`oa2LKVmUKTm%Z 1?4l.rE*WsK5=B&!>q:N.0:K]80%4=JdT[F0n":o%VE& 7;;]E)'rAS_Nc-$P\NppVegCj</=2bL4E>TY>\lcF!%a:rq;9ndd'A:RY&IhG`p5>>Hj^/Wpn)<!^EIH^`@AFS3WL3P>7-f6$8A)=>0+* 7$4o63)&!i-YA,0;t\MkD(AgZah_<[k0o"0]af<(rDd%XfeBU#W/^QS!LTO[PV+8,<A(+P-d42rItI3#eVmFj&F7^as+NJ-RDht  (2FRGbRSQ_R758OHQsFWg`3FHh?jF.r31]^^`[>jY_N*bApB[$!`-QWWA#H->h<CZ`F8p.-B=Z=N-BO!^@@8$A.;" ^/3%Q<]?/b:+VlD`YsUmk":QAh/r;[7d+L/-_*/VN$`h8c"T/[ha@9 );p@U"m<k8[0ejHl^OiCYaZpLE5rnODn':*[M+/RP-GJ3%q8t X9Qp*>AJRlr$UQ<H43#THG3BsI)J0io;C<M:O5*r5SNQ&Z[@ F<G'-VtA?q)I,6hH&b4&g+OJ<\97!`N'/2(3BBpJ!\.&`@q4\^4H)+O=$sHgA#Gc]G='R<Dff"7aF+',VTNeSUi?bbj%FIk_DtiL9cK"DdmW1gr,KX>N(hAmmN\m!Y3R?nY'"`Rr4e))Ys@rQ6(FW9+m(8<6%1roR(^ERjQ>Aoab3a1MR=_]O/h4Irsl=W5+T3ZFSn`X0+r#O*iHD?bGj?I7m8IpjrFoa5J\97IJE66Ob[-4N_o,d-KP#P<rEf@]!AW"4o;D^Goel3A8a5;V^i)cCQ'4sJSm^kAM&`S8Ai!X/Agp3n3?5Ob/K7ojG_+#I&jY%Qr&k@`\`<[mF 0p_(Zi''$a//E?[a<-Wi:7Kl-Sk@N o'eCcBgHJ(ZU,(!e8,ffV6q\[[a0L5bK)\hefXm4A2s#<t6Ed)(<6K0)\7iq>mqB;G=fXB8#]1AL7.HQoXm-[$A\\CQb)f*TfEH1Q()dJ?f&qZlmRo;a%-LnqtW3e&5PgT5d;Has=F2_VI@'FrHm,WN?2 *"=U `K'P$PchrWcdt L92Kj4;kTH=#?H"cf<@iiXjA_QN^rj4qLV";XPD9j:bV01]`34N1\%DX/Ff@#XBI/Lo#c QCoJ\IZ$g(1KG2#(:XDi8A.P"d,A_68DjmdT!hT<G[=Z0H@%j]'jQbkp"q)+)g_:[AYDW,K'LPY:rM4t>.V&m?$9OAM3$5kn+,%0o>W\p<o"Ae_E^)e-oLT@Aa-acg7_9No=J;<@rA`?'!YGG8OG7D(M/H>IXT`^5!6<K>k%@p]Ybdg-7nI"A9V]]2SWb!JA!k)D#W8R2B'ALHClX$++inHhl,8AXH!U j.7%W"Z6tNSf7STULk#,b=c2Y`\M8=j>!L<n>A\i$qn8!`$El*4qXlSbfb3!aDY?2R%[1R5Ah"'$$j?8pp1qtA'Jl$%CW/KX#QcL%9a)['lmKB/(.BG]:TpH6$]M$P3\E,H=El.).V_g'G(SKG/>Z +7Sg  PKXjE]t+.WS^o8?G+9(9o@9@1c?LgS<$&?)tbb^'g(er+C2@fq_->mnAkE5cCn*<>AI/KAoGo>9TcH+h`Wd? FbbjTld6Hm+oYiT&XBq+Tb/Yf9BTIB^Ch`AZldmX'?0WT. @VSl&XcW:#(FW^.IAG(a&UF<A?aHQG"9sAEKhicer@J0`1<UJ5=."s:r-FqL+^:<&;JHr(L3T0ZR,>98DfaUc5>!o9$?$41o7OMdQN 'CCF=3n9,H-GV1:C2fre*/DfeAAgS<E.bQ^&4b7:#ChiO0/(Hq\S%DaH#T+7lYsIUS\"[Q60MRkTZpC29Uam&%?)2LniR"^$@&4Hta5=hQXj*M_EC.4DGJ0,V:>Cp#^OM0Us`WMg\KO4)#5VeCjJY-3Nn8Z+dM8HLr;&bU#`l<.J:+WdI9TALi7"Z.An&t-&SVX`XOY)D:7Cd+Ddfr!Y-0&jr9ie)OCg-)sqid!Q,>ghj*.<4]<aYpe8Z`Vad^(+]OgLt*-%P:?DMA?-;qR_g+>]ft=rY!g>i%M`] kX-bg;:*`c :*FpCB="6Cp?Ksn1E-<iQp4'(EO$[MdUB'LhkB*?DXotl1:)esp1#_CP/O7k@'eH,j5`?lltq,NR?Q3)K,]9^#>$6*nP"]JM1==QSeI5@A"1AJ/Z_F%\AC')E3ZqSM3L2.IfClYbSGOb?aeeqJU!UH_2PI2IVoJ6q7pYVa71dJ%G/foptEZbhC#'i^4tPXI0= KDT Y-TRaHabpCCf="Bi`+t-V#dP>j"+FJN/KL',;Gpj3Q5Vfn6i?8El?H4p'?A"h)A'K%Sm^r%a7-\YgE9pLcSV!=?p%(7TIdC,UJ?]b+L0LJ3PEHeXZTr].8CR?b;,t4!C'>Z&UEGNT$PlIQW6DSsF;WeM2&o=mUA>cAhf-i$qW*KB I)9.1Ae%^ICbAe2h7.Z]$UQ!@G,pCLD1J>:A#U>hg/fathk5@GnFm"/:HW)V\+\*c)m2Jk8DoKPOBc7]fSih46G/\rq0&rm#,G3J;E-*rDh sS= *C%f.NATZooQ.modqS#(%dHm;?a4d&b!XWgOmKTf"q9I -2%%q,8R4,J#$5gBfZTYMm,tq2SA+ifSo/r[#P]T<sPs5P<%KADoLC3t3T5l\d?-pNlf\80T4:jo?sq9f38r_%_-@PQ8+"U!Ab)\ LCp)RsPYNC`D.'j(FQrAA(hW2+LK)TB/#2e@CpI`![q 2=&b;kl8k6FWhGZ`.>^7eX0(.?F6`%@`NXKL=PX\?XR7=GQRsCYK 0Q\S/ZOS`W`[9W)X7V_(iTHfR:`>AF:g>b LA3A@>8dj3,0ned.C9%JHBg&lDPd9#)n\@9&p)jY]=s!9 :qp)mJ3#7,;-Nm`e9lY@:m]"The#>be)i8Ahq]g 7#C_o&cdPH13j;Sp$)0sqL`Z.WA8[L%sSPF@E-(\S\9Y(fRCOm4p#A+/.-O(a11A$tQm_(R)NjCW(2f_' o<TL."!<4`.@SM!<'b%HSI$t&j)#!<Wh ,9Oj91s8X\Cb6M]3nnBlIEnkDQ8ZRbQOc"P<q8n--ZMNV#jA`Z=gKe&t/66IcPh>fHL0$??.r'tB.`X]ZTY-740RoY&iN5STW5I Y>I!rPHtYX"Y/ ]m]QInX!&K`NQRU)A>OG5&En."WC4<;4;\=BB.gia@?>Y0'ASF+mY*2#Lb[9B#JZ%RhJ<>fc!Ae*daYAO6gYGjGl-!/h69M)#jlf86;rh<sq1e06H9`D4a:\QGK68r%_AJApp^r6rn-q$h+8o;^HR@AA:_*O#O2q)6>Hb)i)DV\@*_9T:__^KQ^l @<85s9 M6]?eA!#Cps.,Y$ e<B?lq`ob11&"+0p!0%L'*]25G*f$f_n45Rk_7\3_%Qk.TAGETBtgV3eb-_@XHi*ed>-f$32>G2bhqZA!B1LIA6@arX(5L&:e!eg0X=_bWcP-%",09!bEk^C&Y!irAW36XKqV6/MbI)Ak![%,TPZ/AKX0]*\WPq";OP*pa)Ki!,)$\1N-X$@M$b!8<;O`$8OZh_`N0mh_cs1/7&k3=["]tI-OGV.oYk Y2+.riBdRQ7k\d8GFpG/PVm.]tlq>RU!J_h'fVdeSAbW[^.iXN!f<qMX:0;PEoMR^?NslZ`NHj W/&G&!Z6a4LQp]2]=t"X]6<NEreD:Z<PrBEW)?^`KRf9Zt)9*<i=e3L8q"3d5L0'\j_PKdUYRLp069hp*/<t,R*OgK#ml^-8&AN]kS=IDMlD0ZI3iiIAj-;MOY>#;cO[m]U9NstB?Y`+Ab(IDE;'P.!Z\N@8!HZ>d>(@m"p(lS4/XIEKNZ5h:\YL>SWGK#]2OP.q;][.s$84/9FJeN')"(&q42g3SKpVUbifkn,mpIT- @XaW8j?_3AJT:Voh==X>+ ?1)iFM(aWV4]1_hI,\Slc#pWkThsicBRV[AhADB9KKPUJ`KL$eSSPC/['K8(hb?AE3^sJ7;;oEH6A8j$#1`AQXq]ps9,Ap08AYc/MO]IfN[.]7tD;U%6lh8;XH`s+'p4diRJ%oF_4b)cOH8 /J=sS"0kO<gBSc8Lf:tS5Gk2`_Ed[*XG@A#sH@fAg?QcggR_AJ&qZQ8mE<6R _A:&n0)9"WXr]aZ9hb1;JT_;OU0h[Kqdl>l)4^H.]mP3:cCoJ8V1hl+)+2d7W Dt$0'FNfB&5PlW9$M]DJE_D:(;N(?$o&d,qGsnX ;1dOdm0c.Rl^Ct/GiDTs&^SYV.PW$"J@8 b,>I-<Gm&@`:[p6h-V:)e7A-96;h8:[364n4pA5C8I3WTj%5k)'[,&4>:#.aEXL8niGh;KrT F$=M/DjA'3s5/9@r1hqn@+HYf""RWe9Afnlh*\tg\T$X6dE9C#q[\0\:dFf4!-]NS4`VIJhs6-=6e2lT5cCT"24i5G$]2n^Bb$3$,R6:ZKjn@n.A;.MEscbH4 DdA?j8O`!mM#7p (Mb(4\qBrt!VG>>(BIRK#@Z:0AcaHk=6AL-/"R+&S!c-BagoA8,om0\!GWbeO<.R":,J>2aU0%PFdYd004$69J5;\tLU!5Ym$k)$!cm%OR%btd4g`bla;Q&(f`X3:4DeNTAmo=m*:p1]Al3O5H8,'4G6c-h3;7n>Fq/)Q'@lk!K[k0:eKGq8'^Hbjj*^%?XnWb)/soo."AmJq]dI[KZDV&75oA*F:"*VWY1d`DAUtYXFESlLQ:A9c_qNT^NYs.-:rZkgJ".PD:)-#CMDPjY3^/mi\s+r67s[c@V=;W6e*^PJO9`@2fPXRojZ9Z%1+:OLf?fGi)YF]!H %Bcbh!X*+F!)/)#Eg+Z;c*f^&2;tm2;slJSP2a+& \W;9)_LH`"+S$tAU MQ+dCr^ $1(AY!.b-2ce%n.dInA;#Vipk<ZTp!@%11QGD'M!';#,H>o"dmU?'\qoV[ngS)K[\IhNC`)f:On)g3&qdb!?cQ&r,.UoSd%9KA8O!L([*H.!:j;cE7V65ae>>K)=('.65D3G;+]qYa4Nd;5l+PW ACE1@pAZ!9fj`ksP&cNdARrG dHjaQ5oVOYpEt>,V<p)n9MF3r<fAH*Y)q/2!P'XI_0.m'+PPISi+df=_YP42`P]8hh9VI5*`8`_F(k%t`FWUsFa-g!M3EB[9$=@ptPVQE;?!0CC8 i+Scs]<Ys,t*BbZ2 J89^"H;`O.8XETtXsd4L#I-o-Q#%GN0Fr')D'bg,l^]mW@^%MqYtbp0Ae!K=0TBeXY2GpWZ@n#.qaJHhR2IelC(MZ`76Y%p0$QYS"H"^?O:<Y3(M9c2_iApo9aeWBUcdk75sd+`R/_$N#%,7.T, g8RAMjc`4r0OY#;\J-+'Y2s8taO >3SV /T)`t (A(e3UG*B:$*U%1qF'.Y&@4m;$i R!&GXLANqSP^N1$/D5SG9AP@nCAr=gHZ8G\FWrDbm\'IM:n4b!oXSa#UmqGQ(=UD*99GH#2FAcmitH0)]lAfKB;T<fbN'_q=TPt.AbP]s=6gRTsAZ#aJ&+`[3'.,nkmM(M)>S5Q$tR],lh]mC$AP;D@ULc0jXAAT>pBH_EDkoO7H2_oUJ!P@01V94VqMXTS"fUAS.Vq8W7*8jMn9>mJBQE0?C@1KW>E7_AGr\BQ+q/kpCe2B@)'1.3pE0P9NbKC$`5X^'*+[69n.gQDdk!6N`c(r5CV8O[#$Yh Ur$&Z?,6iif)l0lTtGi%Iq;+L3_U>Y#*78dKr,BH6An2gR5bcap^l)Y3!c&>n<VT:7o<)r>.ee4 aE5AcSdA>MPOSQK\#=r&1"0_S^@0*e--@bKC&1Nc"P;&2Z:GGifAL6]:t%t"Y$9O#GW]?-?>OCNKs A@-ekH0m`@*!+J>,/WJ9]kt5tQC6j+V'VG<.77h*g&)+G9nKA#5TAW\ti)&M3$k/7W?A%\=P*&&L]kY&rKk!kjTUl,N6R7-cPUHak5odkeHZH9A3V/]e/;fYF-<pP^Vka]?L%Wt!> 'JSsn/16VA/c<cMH<R^q/)0e2;0#JS*Xe+:^ZA+@)L0T3EJ 1lbB1h=[9 QmbKjA&XLtea]?:Kq.jhNK)7a,bhS4;J%1oUk,]tlO*Q.p?H.tDIt()%AIRd@hS?]l4F[1!8eqI]gkcXGY9AIHK`GRgr/9G,oWDWTf\o=J]eNYPGqdFoS^/F`V;UhkT9^Z3a-6;;9XcCV8mb5-90k Xmq58-!MoS=8,7rZ;cs*fZAMb!<CkeAbep3lEp[jA#UOt_+rso4GBAt&E8rg[G8+pt6V4DJ] RFDWkE]hic+BC`XB(#,e[PrXb8N#OTM+.a$*(GA<g 2:'FJo+n!I9ng@d9oi N9R )XYP`32,r+K7ma4 X3`\4?)8dR6fAA>,(-q T)"`=2JkBj^:aT8sTp_+2o%N:b%QTZ\gnq"t4D_1B<2AXOUM;O4*Ak@I\ENSP'eanE/Y^fL4TOAcV4an>(*-1fPMHG=)$j4j8tKb`59DFSA+QR?.;n<(j:3sr33Q-o]b5;j^l<-Ua&+5%1?7smY;jbUM/7"Zf70BdK@GnRNi%\2A+%S1J/`?(NH8JFjLeJ Z9$Mq986!Pn6H8E=Eg=k)e]>^Ad=?#rpK+J^5QQsb)oT1ch=rtJrj.c+XK;Mk_e_dc ^YG[aWPCZd!n,D+1F=_O\K:LU3"G'rFViOQO`B+PP`%XRnoQg9V:YCrJjl'rh5"HeFCLg:B.Ic7r:&F($UKC>8#C"lGT*p!A\3'_A*'.InVC1n8Z@F<O&B45YQ%bA*n<&r7.WlfF@>O;Iq7_7tWk:q")I#=4mK(56h45X,q,9s.lG#mPRAAfs2l/8MBX_na39RX1B$7LKc'&?bV/'g' k_D5I67WP!W/a'l%;ZWeAH"f7AQ;OA"^*oPXQ%;8;iU46BOD>lJO;8k,[Z1W`N'Bk^0Q_7P<#K%b*cpHlEFaJMNaHL>>J(p@9OVAOb8]Djdf+69iccX jG1Q9kqd*.,"lmC3"mYNY35[,2A=oIg#`S8OJ#be %49a81hN,bj1J-ZB9MYg,p*,1gKDe=>lr'+(fb:U#_eKBV(:H8\eY-Zs4bW5CLo:PZ]&V ]tV5`';2A@r:h;k(62p]F.ga5.r%EH)=f=AlBO-d;9*;n-=Ztn1@/-,7Nm%3t"^-?+sTCNYq)pZ[R#%BRe&5EM98USIRnH]R,Ij[aJ5#GcDL1P2'U%1U4B-0pLMZ:],^f*a)tYI6->6F]]rp*Fi! h2_=EaBn5D)(F?1amaLA4)UZ)\;;4B8F8PAa#XA\=$>RK>aPcH's.cHKk<BWbJU:7&jj98ZP5/@p%_NQLhE>9o9Yc*7CO;(m<DreX]hQ>1f5^ZpkRf`5#?(],]V@j"JEi_U&+ec^F9L+o6S`$P;g':4%8Z<MZ]C-&Y)X3#^):;bkY-`MWD?WY4ITg#;T&5.PXB-F]a3[st">IGG_$22bNQ589\KG%;CdqrRWd"f"GcNj&-$QNs2'2=j6AojYZakT>_KbC4HIT4.^o=ES8*5JZ1<(!\Fs8!r;!:!@U[/oA*I:A<UkZKmV'rAjCFA7,$O?h)W XtghnUp)YO&g_sO%b&`L`TqZTi&s\"<%DbkaLEV2US2PV$id")V]d9E6-=as:T:%$."r)<#XcD2hH^S77"(gi,MW;ZDi7k@@e6YIQ\dZ"1GBHB-N_BK%A/WHnX#kan&g=<!g"?[,\!\EGsZ9@<4-IK-[Q(3N]?-+Zi(F'HZ5L\,$IE.8$;Gf4KeiqHA]nUR4)r\1dqj()?S=l&]`gfXAF'K:(&f `H<!-DqdR#aKYFFKLcbp0>eFUE.UE$^[dbN6./B(o+&Khj";/-h7U]"6pb4W:3Gd^'+82&iV6L$oar/'MR?Wo,W"@YY&)#&_<%eETG(I??e'7cRH$>%H&b/BCF"pt+97)2YnSV3Mc#`m#smn^Y9jpKoO=e9j+fnJ+/V]Fq?:kpU(tq8hX\Zl2#ppK'X.Xa2UdC95c)8D?q!AQmHL.qh+?a.PN;ZHUmo&jm]dEnA] fin@9St "5i+L-4s,$sW Kt^;[K.q2QQO`?!m\?:*]-<cpg)U6pDr]\21@-BmSIt:YjN0l3kdDB3c]"]/PIRn<'5)Wp@b$ =o"JPcV@\Y:,#[m,R;G$qdBQ&^2>j;5\Te*TYd[R*#XE?R%JJRX*gELE/ZOMoP'"i-U[8We5H5qOj30C-lI1\#MAo36hPt.caONtgNM,:8aFnFomY<fHcg[Hl8P AV!IbYROa,4^dDX__$F5)S)b2e+73K [4#q_+mrX?'YC]N2RB0U(S&QQ hOgA,AZ0`qb3V'n35LNK?FZT]%o7_G4B)+dH!WFj;?`(OA0p<tr_D)-h+-XQ:Q9,UH[4R3M Xc7#T:Fe&#h<:H23V*WWo046t0IM*G9$@%ALWR2C?ADqS3e>4(g]IO/H0$Sc3nH@nd0n)lG;+$%`nilmb*S[gZE0_`<Ab7&5h[g$Hs6l<p&6g3;54A#Z=W $^m? "RN4LN+&Gr3.`s:f98/6gD,<HA>Rm/T>RR%">8C$4 be`Y$@B<K$s&?R_MqP#,lh7?+Nh*F!&sN)b"%3P0q./&hbZ K_tl(BPd%#@DbcW&:E+eJH0,#@NU!Rqn\f9U^EZCM#XXWgnh4eIcm"Ji#dlEjAGiUt7`2^X.([Gf3s\jEJ=ZE%"@;Ibi,&Ill,[nhG0:1Wqc6"+G*E!Vjqhq!,n3RTG*4h4pl?RW(9U\mYdNrK0NBAdOFf>j$^Sdf4OY_N+C;K8]fi!4g3cF<58F-dat-XT:DkVSopo4.^ZhsjV/eX7AEPS2FVgB$'&Xh=VSM$Ik07PPW6ZO':)3Uf7)dOK8jh_f3?H(Jr=J.LgWnT1_eA=M1e,6f!4%<&&A2&DON;?=N_kWlt\Kag$9 26W#;7 crF(` K+M`V"PfX,K%Ul_d@0i!KWfX*OGp4i=EUbSAqBrLALs%[A.^,l0Al?QEaj(amk)]FBXCiYK--?0U(pfC?q!+_=@mSLkXrK'J*gge)^B-a6n'9Hs)75QM5+cQ_[B^[je,\&h0,L2:n3+V:eA6RKlQnAh^: #7oU(C74=K*?8a@iMN 6)?Y2imWkj)jL3t=2lo%-A%Gmm6AGXq_4!Ebb6`hLDF-N:dPU-j=o@5]'ibipCDlZg8,*CGk`A71c00>eW@`lpcN++U?e,n3\;Y#a($_>;^HrjD$XkV&[F,SB1n7gSqmnJZ[Id1g^sl*\E]lIB<oj:'TWj$k:erJc7cB#:UY/,soH*e[nmF5hk/+"E]31?=^CldeS;qF-frFMr]d0s8E[&`j^Zhngt?@jiqb%>^ROG<^fcY4L[M^]n*O[R3GnNg9P/XAAM=16F,A^V<TeQH,>M3/HLLO3#gZ!31(_R8kt:X2EhY\#6)+[`Q$B#C"A7-qpb/JiZ$eMFkn^MD ,TL4B?tXT&-c,/po271,` L:QjqT/q!K'EqA 6n#[#UY9n`TOWqC)=[A!8`QY#;#:fLP\"n+9hd&@S8<L/Wg?HaBIt%gi=K#]t*8\Qae71@(a;j(FHc(.siW;\+Z6Sr!A*.bSSjG[D:[SV]`QQ>&98>gA1=@,sIE$B1.F\m[c2-092XJD<`_R109#40(RH4j$j'(^%l%LaG[tiNdNZd*L1\p&tAXEZ$g?Z02SLek*3eor_IN&2ML+)kQAg3r8Y'\^UIW-h=C1`3]Rm:6]k..&0H*fa8l.nU@9a"kZS0BW6]KU9Y(_gF#p kT*6NY\(LoI+>=7e":1nOl4V"$!IZZY,B8hj6fhZ7>HA^rt'%q(_IeAAN#$#"8MFmA2qMckc  cP)2Nn9;YF3LqeBr;(\kgkAfI0eR*@.h`&A+`ESKeNB"7q;S,6E/B'U'\'1FYW[5,sBM+KgV1A\l(q.oU7(!F0!T<MI#A;:dt2WI6mcC\`WgY4fI4)O-*OT-bBsfU9k:)75H"QIg8B2!CH8),!@-B?;k;9e@L%n))6Sg?45t^H6D97\-0T%b+pEoA3(Hn *@9\5L59I.h_Tjr^P=!R>:g&.X(<$bD-DS]3<q7*PkT4BfJ!km`,/j,&=TXKS>Cr[2U?^/0_g%5=6=#cQt:]hj1O7-*[drWco:"b>R!sa"'8k_dU;QMV?:lSAcFDAaK\bO':MoAp_lAh`-" EqBL/@I)1Q -:'/=tR0A#;9N?(II?^7"Wr8-.@_*/KdAbk=Kb?6/5k /l5J:::)1cNJBbV/bC%Gf<"1r>kprV$Fat17n#07s,X$5"q5'M'%q%f6Y%r'\tr>+\UpOrS;&:N*0&?rVlQp*q:9LhNVteW"%&_0nP]CVWeWYb[mIOkkDfKdF9END]C>(/EUcC3'KtX3)- l^9"AmN>%a7htW0sKW>ko+-1p0A2NPU8ZpPV[sI'QOYe4+[F%aIY!jA `E?h[:=BrTgJ4p^1d7sAitnEHsfm)?\MJ1WA/Zim*4V-B3o'kMrct@h.>*OB-/7YG=)EW.tj!CpSYMa>CdEqFI@`1G#apQ/J4LV_VFO/PS]ZRV;s84(hSM.n8(`6LX.,jiGkF5,*UkI$H> /@ArY_7EOm)/2;6=EjTI&*#\e'#*2*>k.3T'0C*o0^#OHh[&"W6Eo2A]5GB6S,?"m$ER:Pi?aY4pH?Z; A,m!K+sF/-oB >-Xh?WQ'TI)L'mBJWWF>%mY!F&X8ZZ0)q2UfsSqZ[X:d0[4EWb!S7I(,FN3F)LdFX^3:`EhCO=U9>5.6\R;PF;)`^3M4EI/A b]Re\,*TD!C[T"iYIj^+l.tUG<0&ID.JdLFEF,lrZogh-JT-;a?2`HS7"eQ+Ng&icHUr[3kn'?nNUVD /b@7[cRe(U6BA?<ltXNL(\1rAT_cr.IH8&2GZQ=KT!*moSn^:b]=SUlC &s*mXBh-Bq0%O3RX,b:8(nBC`tYafYW]A]tJf^b`;XqJ+m^7q;nPAC@3p2T9cEOd_[J70^#Ilo-0F9s,1"K`=#?,^s!OqC*X],J`NgbSo["=#3SR/r1b4RaG4/i;]YRj7CL2nmSRk5qB6jqB#K6dAi1>jmq>ToSn @6G03!ngRd&*ZKo'Ue,Ll)_2'- h1 98UTlo@LX@i7hgRUb\trHlfc\epd$Dd'=6=tm#:U]S;d(`ABONL L6q,6lVID;;TVbZb3oT?#!cjhA#S9'J =Iie<<Ubr^#:;pOhpn.P%"P0<&C>3KV%eXg9ST cf%hT!aZIG74l-^bP>18WA:NRE,1<W;ioB;n-g*(PJteLc38\2U-"74d.Nt;:ofSZ=rTdIK*3^n&d?['Xh+L5n?C.j]T6l+>VQ>8n_d`II+&4Ume@SeS  Q`+>dmTNhhU+8=+gc/Unj=\LBal9)>-cT7]Nh_,!'&2Oa;O_!p:NEnWnKsUke-I@`1=nB)N>[)qs_)>E2r3B@c%N0R7PVUEf['eeAnFZtesX'+o>$5Y<"i?6&_Fj*<nm*UZ_nXNnnOek6 W[0ej.je1[^M?MKO,G%A0h#Wc!jZQKcG]jfmj1 gE\Ti:!7"`A#jO4Q,4!TI$2A(S;),-k Ia!GO8inSYdhSrZPLGY5m(S4:KAn(NDGpsoLs^38'`,>iY\Rr]5)>7/E\e?mp^dUAn]C3?#T'`oEe&3R5g7t[U21bj#^2t?>HAVAb)D]gf2B,<VBQV:PMU,nkL>esZ"shAWS=WN)5P.p+TZGN3\KAkAmHN2DbS[<A@RE3@aHjXFA([<8LrV%(pD+U1@=%%n2/'@[\%?E3@emR'38S9#&e!R1#T?B)b:AEU<flsK&_XEV+QTV/)/:_2YO,6nEkF6'T9?A.'?r&9VUnMC!rRjX%W9/A&53XK a2;9tcnD"Fm4k<ra0Ycl:*?IMgkgm_>V@R1fS`Ti50@R0>UdFAK1)Zi*-J0D7j"/FE0RKgkB;0iln2Vh\<@,l5r]`W?D2+"Bt/6X 0L>3\066f+6tYoOA`?=e&r+4_C(F?pZZ_h]2V0&2bBhp(=3Q,]EGTePc&Co;lFKHts'mB>=OgZJ5rFRoEQ6$W?]j$BT@RHgO*82+a[@NRqTPYs:Ks+^NG+2"2jF(5!\ kq?M9_GhBC&']rj'Qj)AhgNs#OnOQBXPO!o0&=nZ!i%DX?:cOY,2Y8#2IALK1>:R^N+o6hfqpU f[O%m6lQ%_I0=>bgAG'%\7(4Tm+LtH]Q(f[KYjRnf`W0-B'*LL- s90UE*\Ks5< =BWFKc485lcUBk(R%gULHnT^KAPiWSR6 ?35O?d<ff?d:X)7qR0&20Y"+F# 2jA>mV- ]mH45?&LV=EsoB]%)!*$'edIP"HG+V/A::SkJdA1k+@K%D:IB_ENml)0&r$<b[)qh6Ke(orDDnsEPjrVWQot<!(j%'>NS`[IdK@Tg>Jq5]_0*/Q-KIb IWTFA!SDl%\-:M7ni6Aj9j[^!L?`ZS^T47j`e`*lc!hK=rb;UG! #CL$5c(3iTti:(pCpR34`];*shFF]-d>MAn59AF1a_E2<NF57<Xt4jQKmk 'ks'DL^;mk\(#\a6&B2pYbaN-R(oKP$>>SbWX_0to6Q'":44aKk4,_OWN'WE+p9GES&eVmA!ZAWe;*qsL/ l"?l1kg2d9<5jgb5\#OmVf3WH4:$$ad*b>=G)U/n7dEeC([U<@`qL2iOVk(Dib2-K:K?Y*r+^A^,72qI)JQ>.VE SfW`"Qrg1`cJ!oAKC>O @CpG-A0t@M*]8=":inR=\NnGXpC1+<J7f1QKdi"G &GAc37cmGW'*sk!8:G:AZb=k8LcF0l&+[H=]J%m,:gflb*.i9I?=;h,UcLclokkEijJRo"]LiA "]#^bs*Qr-PeR@k9`_6Z++1R6"&99>]3lQO]i?5P-WsB0t^VO<idr5N0l8S=<pUV9]A&#(PAR+fAAo'$Sr6A5BgK#2-amroKkQAjs7'beCcn/;)!*lkA-\iAb$=*_DA<OdZ_&i1rN;6/]C"@14U>rZ8kQAh<njeqG0W`F-t8J7e 0f[isgmTg5$:#KpJ[q`""%O28,f>M+-/3'N7[h^&Rg#A%K\^9XV4Rl gZWEKgPEG;$^iRChYtWGPlp<8#\6OVm7h`lDL-V11E[4QN:,kH"\o(<)7`=*@tKW]66kb^#jiLt*/%]!)A1(Vt]%e;K0FREkVrC,XQJA'Foj:?I&Zb>^Z+4D4"A9)%`=J nAEL+q\'\F$kAP627_qJekh$gb:_FDj/*fM#1<(;qNmOX/:T&7B?EMGQ@QAs.O*o2ld' jcoHpC:/'!;`CX*#, 0M[`0HB[A5#]4&jDdoo' tf6e",o(9HXr,bVrfLCL0Aa.bjVFXVF5P>b%R(lpHU*'3XDQQWq-8MECG"t"'dj!OM[_N,FfE40OCHd6Z!'M&6f!PZ0t+@\2#)&%Pk45Vf#O<Zbbpid[CjWaL&nd&DZFr#6he(H@s]ah6W\O;[S2<>#be?9I.6D<CG0:7!l'4-HNnB/`+>kcS<X6L"SfT*23Tm`'LY4/)o%J3X&0%2R1&)qc/0O3LH':V?g$d!ho*i@s80,\#"aM;BrKe-ggAW7Ib6j^GsG\@:U8$^djWn4)8VB1->%rV/\^Te((c*l7+cDmM<W>aFCFhGD$Lh\ID5;V0a7iRq<ss]O[b<]O1FBh7:*jb&?@?)&RGF]%/C2;&J+%f=Q>dV)<],d-e>q<E[ (Zg7.D!\4EJ4[U(b@(AhmE`_OS9G&&pdj" q3PHR$me3_LRA`G[9poS)*7?L"2_qe3HXV^[%rTArKEAa:S-aOS](?hsJW_/7R\[+%;pe*kZC<@(q`<8!`&7#4.OoJfbpo3ig@50+m*eR53JRhqrU=H3GH@^KM2%<`pA;<7dUD]9FE$tYmlj:4&)lWpjoXje\L;nHsWBi%7Z8@%W2M>]WatN3e&Ya$P?d1_j+a6i$FGQe [Ho%f;j#Qt=QGoBobQN?lP1[f>fL:)H^SC9A[ ;\(Q=WAm6cp29%SZhY@(8TsDkq=l3_I,h(g?%q73K`h6-55N<Bb"<b\qiCo;QrFJc6nT`ib7p%bK]Z8Uo(27M&*.E"M&]] lBHe%8YL9fSs[i&o1*qNq$\A[-3b;`9KbFn% MU:@"4#9NFH/B`<"#)MUg^'k_Qf4rEI<!.UE0!%eTi"%_=E*?qJZhS7.2#l/]i(bnr=1mN@N7X,5(WfVX"5(.Ik%gY]ibjqG>igo3:!<3c4d]&(M/RC4(eIiBKW,oI1d4!(<;B >+HZ0)e.bs' II6_7LN#)q0a(AMn+>q59C$8oU0La]iI)[H9,gNXL4&:BUAfcT@[h'dlY4tBR90i9=:_bb?\QZ673eo0Ea?JZDAFnHn/UH6]C0 /t-;)e78-B$@6G%OBSL1a.\&FQb-+OUZWrW&1ecmcSTKW#ZdeT1+ApR'5DMSQ, 9Am& )+ e<2W.Wa?CO"X_l"lQ/Q<*[.hDltZ^6JjC[PiL2814O$?j/QC&AnOGmtW_ b]KA( 9LO*7FOICC/7dr)pPH!B?r[;`^d[\V2,^08jK/C[M3k,V78XZ@LoJf\HmZAko6>rTepb]iPgEQ!;4.c$ClmhP9 1T=6&0GBA*(Un49P<=BR:p2#p&bO!H74&n`98`_,VkA6]t dGYqeN"grDXBFk;-"dNOARN[5-%).(_NSa/RE::QKJnZG%f"PP=6#l'D'I48qT\;))GTV2K1E51o"_6-RH;:$cTB5"48AR<+U*>A7N?5J+iEFl=h2p+f6]h.fH!E0CtSO7t;SK_/h/-^ndjRX4$rfi7Onn#B$CE="L=nLG[]3HB=/ph65]@7dB7P#"MTBSV[P-60h'#NLG=klt'MR8q=d)fN])[O_`Ia-7BMX%eFAkcF6JT8MYV!)/cL3[cj3VnH$ &O"iJ_g2HosF.@ To@V7OK2"LsYYcAs5>M+d:Z1XW*e%Gm*K>tcI$W6Q<m7EInq/U],5G4#:r7W_snqr9AieIk[DsKbjR*EesIK>:;KCW8U@<iGC:efI1WKpq5*f0gc`oGC-Wmtq\Al+mGjk*8c((J7.$P@<I'/PWQ^3 [M<U_1e;3N#q,e*I>LOlgU_)VE bAQRh6)apl,gr3^#@NbAij\=Sg_"teo$a WNJ s\q8+r@pKXeJ*sh/T'M=L+QaNK!gdrAe+n+FWDCZ"d73O&/!Sn/0.[9[> *,%SA'G#Dns-Q(RBA`aO7ehjH)t4Y>ad%^:]&QOX[M[-RS5W3846J>)6])AO>s&GdJcAMo[mJfU<>YkGoI%14nSCT:feac2KYL[-! Hm.Y7Q=:(&B9'$OVjb=d*e?S5Zdm'Ar!qIRLG soR`^Z:ee +eAt.Y.FKE>i*h$BGo)I4L)L;A(P!-]ofO(X1\m,kn4&;6HQO>TAD--L"8+>W$6m'Yod<GBS 4$1ens['o\'?+TM1IQpBQP/k*mtYi%&NL""F7t rD]T(-,tU=e&hqTU8r$:1KQ4m<DqNX3""9'$,J-g+2>9bg_4 (a%XKa;;A)@t<"XXDf7j(g6/+W`o$%[@-c;ZRqq,9bkhLBa&X@(>sY^DS2fB`Aq]pm'8iB!RsL`MeG$<n%bbFG;ZT_,C=fKoAn0`.Z\6>atMf'^$hk"i[r`s^LS>(WP-?,oPj_BMIs_1gA$qsn?'oA1Z4Jf0BFg8n.[sf@=gm]33!6m8_IK6',"ZM[M>)ejl/M5S`7NlJr(^!dDiF[.n U%6S]nseSC8=_2>bcCgcoW!sAbgRfL!kgj%81")9\EUQiobP%Cd`[S%t/WV[A`T+_WFrsRlPS]gK0G?)-l)i*_X"4J,[Vl8mQb= 9Y(a^=%lViQp(feU]A`:-MU`^2tA#_mKUj<U[ln(,ai!8it:%F'1nCeHsVA5gTbn-@*%Q>SE*h,pKKl^n\bg5LBF6AiUAt tY,8kJ; B:*CQk]!fn4<&*&JQh^]rJ`k'j?PUQbR*dHa<FYHk *jLESb/Y3>YWN`IqI5d'*L%2HmA?=HK>g73Ih#/Xke3&c^@O\rHG&A1&F!VPW^2'H.[9WD[;=#!Wk267l\ggd@B3(CnV"@!^C*Cm!@8dg6pIAiaVY`omHsisq:(]e_iW)(,.3 '_Y;bm&qQXO[_c2L*>=U)U@@V6QWjlsCA_(^TWdoAjC-6.^kp/8PVmW2pMq]*<[rB(-bHH34$OB\;86lq!,hgU*m&"",6e6`dNO<;S',0jLdrW/-I\P1IJV##$>:TEl=B"$WHA0]k*cVFE+#j,%ZID]KSV=&2S=<XD\h!tWF-.maP>L(qS-;P%%"9'*IqA5:E(-H-^IJo4-*9<L[D_@F$SRC/WcRbs#2`+"a/.2'(ld0]^PMBfHHkYf42TWB'd`9LA5\>o]ogIIqd^G[U"P(ngM;AYL)a\+on0"<DIEF&RKY b4#dbB\N;Ss_eI0cNWF;S6$_N9ZM8`?4IN)9-[M A?H=9RqL"]"V30(Fn(d#- ATpboT=s  B\q&s6GkUoA 8C7.Q.`dP)!oV%I$cesctqRsMFS42&rj]@MZlMsC&$&6P]>$+A,iYLYIb]Ok8_fBRloV?h^>i?A;VM,5F5!O;mI)Dp(7"H4E+C8$M,K=ZHM3iaT?t;s*\4tZ*f-!dB\9UqF>siV<VlSi619AtA_cRe0I_E)Xa5 3aP7#M+6\jp5\D_I?$-Eh*5gIaAKH[X#_-rA%PiIQC-XBWRC:Ct^<fA$RdGAD#VN(&6l)"otBPmOtE1e=dY!,K`.FS7%oC.!:4V9ggb,W0X]]%dtHA?:i[0LKsX)?_lmCg$Z9b2V8ljqB%g/A"X6BBAFDA16s(VLC2f[:mAAZQ4kcBX%1^h>D8lFn_eNf,5In:nD#h.;fF#M\P=*J,DKKM\,pq1^`=90Q!e=,@LU-4PFS(IJm/J$0mrDUEDlDZ/<>SUO"MSWAb 3ER:/YH*ab4%E-KC g>8dCgZT2$7/5Q?8<p*T'SHb"$frE4\^<KG*A*+pSj"",Qt<M,Xdb]Y8n4!@T<:Do[P/8dIQeA@+"sD-$63g);7?\p`ihr:<\/ nf_A`RaC!]B]-!h7i1>)&MqjOj34>%m-^a8GG<j)p+YmhIaGFg[5=.Cn+To:r:$Ido$jIrj;9CIBUB<#s8oO<tM:ek@(Y@$,D1(g,^LIWC;F5]r=6Hhe"L&l]ohA9Kp)QLtaXKB-X0b6:4:L[bPb#;B.O($=%0ci#agASXh4p 0U6l.L:t<_AA72q%Y"1e6J.WqFIgq5!MDQ$C\0U`HX9hg L^6D,BGRt7 .%fX d!N:ZQ0j,Fc9 9!HBmEY`#9%bU5>*P_fF?dA5rhNmWU`@XTe,O;S)EF%,0Aj@T#8(1A%1:!?bI?U_mGggnQe\\1 1cTr:Wbbg\b 4RGo#7Tm]\@0EaI]:G>4:2CR+IQ<r=/Y- \^QC<A;@k3Bt=A A@QD,^$B$7rt9gKP4$QlsnRSU(%aNGmBsG`R>dHmOsFBgo2;1g6@jt1pC'GAaa>/HM16?i(+/9=h'-[]OL4Q0&eTGc8GP:A'>O:1-$#.AdI0Bh9aALq^#XbS:^>me<*kQ*H&g\+oAO*#U;L=M;?#!"b[^K*?iL+1seV,e4]M?p3A(=L3ee/?D#p#)2ffq\lXn;"b N]1#GMBaGDTEk:[<n-_qNHo6M4EH`3*\\h"_a_3NZBf4U?SW<;YeTUM?;ho_2;(,]`5F^f7dF>r(4ZF$j50M\K5W6NIMrpMI;E5X@I:nZH/sA8 3)#,l,1J`/8f/gJKP[h%Z5\"ir_WiH; Gi.-dF4UA+#(<t`cg<W$J5s=kj8&$lA4pS\G]FF@'NfC&YU$99[\lG.F)1&s<!XIrO&k$r]KA[*A=WGf V2r#pDWYD]!C=DUW/dG<J6a/a=FHG89&FeVM;*_\i].@;bd\Z03MrH(9SU9/QZ*Jtm)Q^6bbL)6 H4C<tOF+\s"gYp*[n-OAkRA%cVYK-;"9o\)I>`EgB$!\CiV2nfHi8bde#CNHNg20q>KZh[O)2T2+'cUU'#L>iBnDJG]`*"i>:Ua<\bJcA,b&*i3Ct02M))Q$$3LAH90I7TA-$1SWpR3JmHQ<)ki([726n)#$LJV,] k[ZnXs")+P1EUfF--*kAWRgp(e%h^Asmq_E<I.W.q'B17B\ND/M"HDt-[;51:[%/"gh)I/^b#Otc*V5tSE+b AcfWW9`Q=B8V3J.Gn3(_3E!3@^p/`V^3GX`lW\2>WK[CS,AN5U 'bgNLlUYn]A/FkZ]F9KU6OtVFV$>M*1!/q2pKO]8#:Q4'9BA@6#N/UWi@^,Bsq!R32;%Za_A-;;q+5&R4?8endVAd#"[O-?b$`E**Fk "Pl@d?3>T!taRl5"Q=GW"IE2?;81%s_b-n7jH!"VH[-Y8`^?[#8kBfN)oKgJ8?k8lA#0V]i'[dc'tcNEbaV9XT_85*(<5S_W!m^l;h`Z$`U;\rVKW:V8RA-ctGH7O9roE6;F0D*$C-(n&Do^tXcL3=$%@K3&An&/?/iFaHn6&7'X0!G0o5ng*#fHsl*_BQT`N$fgYGUC.Le]JG@NV!fAS0"WEGApT!R/^)#D]m`H'O*kPd?&kkAPrR\(3?X2lX1D'"*Q+,A!:</BMNMZRMT=QZMAgA,(WW:kr,tpU2o5%YXolD6!SO7?#9`;:PjdXfDA_ZWVB2312&rXY)k/GEQ)I]--qt A811(&AQSTQK[_ K_%s8AL^n7td8HYL'!hq^Uena/5L6M"FQUrU17qncbE0F2%jTj,e?*;RUE)R)S;,r4HBpG -OfI?2fBE(U&$_P[U^o[AQSp+BKN8P$2Q`B5YA\rda ^8tTNUr(h)4fYhg'Dm-U6%m[7^Xlta#gD#J%.mJhc#`qaG2!^r_0`cUJ>&N_W,l!H_'PZIDVD:X%1j40E[>c#3l,j,.JdXlY&NsLrL!<ZqXLS^TeAb\QLkQ-J7Z$&4lr]sd(kVX7J1#5:X0cJ9="( lF4%1J@eg2IU\dcY7QOE5$B41?BR-onR6[U'P(Rkn;k%L)"'B^YF+Kp*p`%T1Gmb?NTl*e'-sa3c5qb&6=pM#X ]* ^3WM7JP4KgG6Sc`*/TCn5[":$96sITMJC`US>Xff8R;6#%K2R(p%O$rB")9tr3-FJd7EYgTS A(\!dgX4Vq_'_<0FJ%8V_0U>)jDSpRJU0K79LT2(C2*?NnnAIF9a\&AI['B$EkH,Tt8<rKj?Nk[:Um6XeGmVQ')F/jBAR=!?)MA%@rbNhA$T)j(L(A##"8=Ap-J$k%korCPtLHoak4'Yosa%Z2Ss_TK&eiQlp;G/LAa*SFGY2#VJQ%'LM G9p-b@!I^A/dDob-/".)9LAarqp)<s DB3%dA8jD-VN0PT"]Ab6];DA]AIt/m0ec_3R"\CoNl>:JRd7bJ1BW6 imLcO27=Y)o%j4C/6;%[gAZ$k#`cWCil0[:sbU)Z9`f46@>X/\>MO)3kLnmtL^/4Q=^9a@N/n^D4sZi)R\]0\4Q9YE4VG,@d:V<V6NUq3^-ss4'(]n@;a8.k__Z7QP9\N#k:22"l+#Fs=9G8]!r:JGZ\pOZ_MFA7O=#_EZ7aNLn_Y/(;"E5j0!*F](5 Zc5edh3[Y7<=>?\ jrTm)Ra).W^YAq]-=@8(kKhc7O_^CUdZ$3ZlC-ZAP0"s=^\Q7':=+X^r%*," OZQ M, l%oH2X7Z-1;/p,f%i!'Qg90ll9cE.]6)=J-71ClLUS*D?Phg.t,`^g6c`q#O/NtWP1dHAWYFIJL/;ZbrMd6Nl=UDrs:\9D?#YHFK\ BUhIJD#A`faf6"1X3g:bkS?4a 5dY`"A6Z]+jeU1QMhcQ6]@,e"3)oU<<sV1`r/;M]$)/HUT3fp<N52-Q.l_1Dat5o8m5N8TVaip8'BAmA[g[V'+=jU2iop!WX+#+<!X]X" HXGoc'PIo5p](%7d:jM8\oCJQD>Ql59VhPq IO$3[=&)j] 6e!fY+9$Z;C)%p+csgdnAl;Ote]k !n9/`Wc`<PfNs)&S-cG9O0*^?N*jOt$+HB!3E@c^rN$eD8nj7A\mNG4'9'70<(_".;qVTM?:L6OB$* dl"tKpleT4\"H5h'1L(M6(A]Vt76dgr=S-;2*D+$`!kN.B@Hg;A^1-M. .OCb5tAS:S`_.Ijdt/T5Hqr>,9QV4F&Yq ?[pE_lG*nmem<mTPP'.2a2Q+6.BY0dmi-U$%.Jrt37@07jAjF/'6$!#taa;Z_f0[)2l"8L8Z(-+5*2s!PB9Z4K+dUcX@PA olt(IN]T3[r*q2O( gV,8 \5 pQAUq`ln#KObIiH$)Cm=\<BS0([.VmB3K0$1Hkq sS_\pl/t.GDGG%Mjo(P5/(d5'XOrgl2N8Y9s[.bb?oXoB@Whl^A%BT9YW1#'SoQA22s7_G$"QR7B+!$f`kK*4=B"9)*P'm"EL>BaQ-[+WAEL@:j@A$niZoQp*\fOtI0/peWG0>OL?Z`B?s@i_?J9<$23sGI OA.(ZQ6hotT-KjOi4ai.T*27jrJ!jg$3WWE87=8\kBD`%53Pibf[jBAnKP)\XD*6Md-nrH Lk'o-Bmk6SO-l=d(Wj"Amp!VC ,iOF8IgAb7_0*XW3o`/eXiVN3q2k@p=+sSiPVs7$1hR4b@MA-'Oo_9\hcJV7AA#it^L&S$GV(m1FWq38)d\CY-Bq)\ZaHDdA%"5-IJ\=S(-)^BHEJm>Nt'fdfi-`6%+o(qa2o;j$$AN\K=I$'FEap4D6p8,#L-8%-oUFbgAWiG[nD.g5p<%0FM:ef*AtRh5jJE=N2'fciPam'd#\@*R>?2Kr3Yn'RA)($i%$,/eo_p!8^#<[%1Q\7p!i5eR9dr9jkE#NfPej$kW,&Ar3N;4ab-a-7a,qt<ZH?g6f?jCUc12&^W=*3Ca6$Dm_^kWTr!mgHS:1t!5qs=#0Y[B!DGC]GV+."]o.-g:M(oJ>rcArL^+7<6@aO_ph;C`j$TeL^UhfA-BjjQ<S$"QY$j'\qK+F1lKi&?lA^3K_?JG2K7^-:9*;Z*]c_ecK?_Bs'@]fb6e_Bp<NIPY5X'2Fg]H8^!`P#e3g>D>Xk]n</0/0W7I/tDd))873i5b_lF8eeMgWec[/'ok2AM:CM^qgtP<+V^G+NWFhreGV*\l213MJGKWA# 7s1t?&VVGiC=1:gW`D jE5-=18,V+==9hJ<!'%N>EKF.?:ASU8\$A!@r+XLUlr?[a,@dfC_fN810! D!KYBtmVs5+#U"I0q_F^A2nfF0RE>t&P,`0V7._ajj^<>Q?tl^!#$O:tRlmHd!BejK]aqUCaJm0[Vjh/*iQ>D,-;S]XY$Pn2@3lh1(:AnX,,`4?'Z0 J)_` -Wr5?\r2PV_bA:h2HL'/?A1($g0?SG!"=:N+,8"hqI9VYfN4Y3?bS<;[p\3/qK:D80G$EUb]tgDU['[O)7Ht$c.7WT,dep*%&Ta_R,XT=n4G4M!/A.04d/>Y8'Z=Cm=,A'a!=bhRADYk9?sqics9TMQL_1?7:O6E:/^de,TXZ3A?enI)&@ir5U*9;HYV/nn$F"oJ#@a'%*,m6sHScVQFj/Hff/Nr ZP<'V6FaDYH4<8&i7ZTs/+FDH>2F,BP&BAM`9`$NTRCi7j*,a\S!>O?4rCV4]N1_$p>]E2eg aJ&sA^,T;n38Rj>U9,`mY@_kIsY[fbd%XmmD@p<rAa$GQ#X8&hXR@WWr7kT_j:fhUR5%S;-F7O`L*%PX;^j,gkb]f+0.TjNfbZ^M=jLf >7A6^<ZkgVr%$jTA'lpL&H@-L*Ar*^N7P*K!&W<l]jWT<bLfC6nL'*PATq^A)3Gric?9b:BW<D5DBXdb:)Y-E]i_g-<dkK'%EeB,qN+oI/e#6\\dma)#dGH7\>01L1XcG`&t+ISC\L^,7GYI>)'Z +t2.DPsGO!hae9L0Bt*U6D4g.edlmm;g(;=:h5:@d3k?@^!mnT%cjRpQPE8r8BA-70K6PS,#8mFp!^_lDl@kYfL#XtMEG]fe;d%%5:(>Q dF`cZ/\eA)^l.`3dPAi6(4VAf7jWV#6jfVD3.-7,$aT\ICYnB#et <i-HZtij'H?j\#p*A[skA5[3o)o,rG0B2&G<IpnV/fPfeleD qPU5HKckNI96ZNfK[0"bhi$_Vs;[Hl(s/U*L7"$"lKF+@ n#1"%5A]_5g^$T@Ni8M:=!doLsS>H#1SLfA?KNg6lPl`.0bdR<h.,!/AL[E6FNYF*:^t>p?Y!rl(JUm_?X)l>$/_!5]r.]f?M/B+HiL7Mf8P;A@iV:]'NX=K8]_^$, <4< qPK[gag.V7WHF@&ZKq01-"8g3E?]VK2ZLpmLgehF%SfQ"YM]W?FRJ4.V/G:jGpe\bjt^io$r<30]HJ"2n7H8Q\"o.=BsfZ\=+o)@B>S5.f]]Q'K4^bP3Tj0(q&)eP><p/6If-[O7ltLCP@!R[t'IC2BG2r;4p%%^37c#rk'BDQ:_:FXSEC_68"\A-J\2WNb$:Si?b\3jM5#OH`JS/Wj_1(#XAHOR0kgB/L.?Wtf.R_\EAT)k4o.C&Ks!C@]N!@JhaGrphTns7Vi<MP*gkR<f+"N6hj6b;0](MWcrD2)oK-0r"Bb7_hZO>6LjYG/,CVt3*DgB`+_q'9lK'-Tb+s)T27FJ[YR$Ot^!(W$beC?m[DgRN548N&LZ:/?sXs!F4]gFV*DnRMB`g8_X+a7qa^QAVgj.q0!rb/]OCp01ToFb%]qAHj,YTY2:PrfsO@?ajC$P(KShC T<Tc(k@sA-'Gj'><[^'gO?!f#B]].HBS`3X#p"':.l^7m^,J1843Mah@-qe*)t0ZG2l3FI*f]EU8CldM29$WUD<2m-&'B;kGU\$gsib7(8d2@&-r b=Q1Uo^rGFmaO5?iK+A,A7">b#Z6GpJk-"*^VNlaY> qTlfc5K:mCT4t!Jk&1Y<*CU$'d5qL">s#F4@%W&+,[e&G(s3%Bdil_&+d=m#oR/j%1CBn,`;\:Xr-` &9a#/A`5`^e%V-iR4X+N;Tl?fAKpYV%kWL2P)'<tB?j2QS-4Hol\:8@&N[^?aG>eB=4P0)#T+B,Ll@Gr'XZ!^g7iF liLoAG#MX*kLKX:'A#.EP-b$ZDpdgQhP5A61n:&B r_=$g-4AR(IsV*9CbiWH7Gs+mA+o^sQ?4(p"B @?:B!'*kpj@'^:73Y[[P.WoRKeOm%066gH'6B558DqDXp6tOQSsD"c?6;"h,msOpSBYjLq(QAJZD;=Dgg@Nbl/ d_$0:fM==X2\=m"j][d/Z7K1[3aGS$p 0.6ED74l;g"3;7*C'c*7U8NpCZTr$^9FZ7AWf.EA<X==C-+F<:$oMa=k `=-7fS!_qfOJciIZE18f_e64JQU/lCF9Q`"1^qX=6iE)Pk@s=OPT) X&<$!i7Z%]alK'D Dl#VCl,h_c"I:^IPNUWi$dGM8@sSr@&%B'XcqA_@1a6eMZ,)ABFo< $hp]9!R_@1daHGd@)*KXpgHq2b6FV7>O1&aYi/!mVUmC1Ya`` ].M=JYJAX#q9F'k-#:.Y<eI&e%J9LXi58=L/ UN9#;R11i.+9bD;U`(>An!foVE7lb9Ab<I#G8c6g#WoAc8?)fUh:Q^W2OK4A9b=P-sP"ttt]\djRc.AA'+Q6p5JE`<&L!#CJ,#PH'4!8qk5/i83976'Ys3Ak'&brS,HQ@pF1=c9r8K"sHs`a%]<`-g"t!Xmf7#]bpN5,C#k .\',-q$1R_9<m1fE%U\^4T7VUc,hSY8U?F2n>BTX$LlFRR&Qj"%UdbQ(2CIQ"6W$NF4P\-e,<OsZa<.P#_aH&SXJd^G+rpNtIl-4#AFM1&m,Ja$GNfoh=e>CRGR;G72S1&rFhlAOQH#85[.Q0<_FNk+F\iUYK'Kg+,cYkkO!Ad6S\<9`n`*AL:MH*-NMo %1AQIYZB"lPft9$<r@48lXt^mGQXA^Oi#@T5aRE7W;)ms"Hibk!m7mD<OZZl%2r_='"8E0:'FZD!!1F;^^co"cGA1%38M/=l&l+Q\5iOq!iJ4D7H']K+8jmBR5.ni)I,0n7PVP#0<Qfe"r>q.4K7$fiQfW@05*2M0Q,gaBYj4]Al#1EPUWAs)pd3/fM&;3C*le=YZF<IRDn9(#)T-_q K^Nr8\2=[)T6rs;,fYqHbD\Q=VLUOh.Ncq$O*2R6$ZtWXjZ 5%pPF]ER%++K[kb_AB9ei&hG!DG0\ 6CbJ80;Q`DP=I\o2/^;XI#HI#*?B=dCn#[-E4jDQ\Si5K46X4VM)$p#F?O\#'7IZcdYIg)o?fR^XGbNFPb*q[]GM)QS4b@cA*IDad[`0ZaJ.[#[@Lr+.K<hRA\JhY8"8qKR`LtOcFilqAee3A+a=j<FIE2RamDL\s5_%%X5&gK#3<#=`>'sQ!pS7Tk1+hGA2b`A'cs*&&!#V2;*;Jd\7a@S/R$8mgLpcdf](QGWYAAF;O":QIe()+[::3&Xms,6G#7s04PXnp8T-Dor6bAZQ.i2VIC!BCm&,D5j!LJ?/+M*i_8t+s6:!pb^;qM#f:)k4&!rfLUQ/-GcDq9%?$qtNFe#4:"*"IQPBZc]61@(fT!7/.T7 <t'tA<N`"UB!SdX7Wq<WO?$B6A>-Vn $2>%%,jjIb,M$`L-82!q1&?%W0qpDCFWg@U18f*%::H%sRL#n/t.r<d,SNsU&15Z/p:sWb@,kQfA[0q\q\#>!gg/?VeM"DHH6AI;+%a)h(ilYcP4<e#s<n:m`PUYmL6WJ/)ZE<IUel;ZHV]IjFFJA8=O@i\oB'T,,A?O!$Qe3plrcfEADaC%d "8fLCXlag_-F3o f^0[EhTAFd*o?bA=NiNL.m/4f49T-4XLLN\FLr&PA&:4q&'gCB<?rCI1=Z;\aTg^mk)mMD#JeY013GIlU.*DW,pM0?1o11*\N,oDGU8aD=fYhqpM&C6B1cF1;qYFHpbdgsrqX7S#n>B(lrq?$s3Rp<SE31\4UV(pb\D@eAq#p>ED3%\:o!((N)]]FrJrRO)"A)2i)t&LQh-l]WbUX=iP>l[Tf6MOoM7"f(:hfim0*@?Jio3=If%"H96(<8;7(M;/Zr!t?,=K"[aD:+6ak;,?&:;;me?f'7PGmT(B\`YmFqTaK7 :]4\Ap7$R:^?Xh0qGB$qQnjGaNR1]"Y9T4Z% T@pte3Q/??@4*p\^73Kr=s*FGJ5e!#/m33Arrl1]/1g6)jSIG:ah@9/aRR]To-!toL!hb0%I"1mJqi[H7@fT8+N:)"gCDr2\<AW`3q!)#6klf']q)P WODh:JMo!-AMX &s9ZA%:raO$)=,Sa[sBp*\`U.^^?J5nj3Z5Z*O>\Z)r([G.%e.QJ(DTm(TR(F\V^<RqBNZB>WqMk,^c$P=0^/noT/An<j9J=DG#WgDH2cN'UN<sfZ3jg1r<"i*jgtDe%_(e+*'[8R5_A^D]iL9P(/"b5c`?#ND"GGWMWY+]a $DfC'4l:dG;kXR)!>cjC,c4>r5OrQOU#M6,e2U0YcW;*1$P"ZM/1Cbl0?dQ^\A6@3:Z+-^nph]2 2tc#>6&Z@6[*B7]fGA[cU&)Sk0UgUe.<b%VLB76X=qo_:sH;f2fYgaX"8A$fVatHt$B0r70s^&UQUJ4O,<A@\sh*C7'_m%Fq[s8*DO=aE;_F=AP,bD!^e)*!C'$` P3%(C<A3-A<_d'B^(b1(H#Gl@+rgstJANe:DJD*)54&)H6AD=JY)VtJkl_6"AjQfEaH%HIaPh9DaMaD^r>FFA,'Xd`6epPmXfSsT )-iT>MAi:\a5+gl/.pP@DE%'St*+r;51hk6,/4TCUejW %7e$D4pQKGFW"r=h=a8^s.hp!?.6lI_:h]fD:i5=@m"3>^XHa\R9KU?;Vs@_%3-3>$H4L">8ljV"O:4Wje3#D<b2:2(,`RX*qj_Bm_%sOoN/dqZ?,,N3`Y2NOt2.dW8U\?OLiMXHe R/m3QtI%f)qNU@U;sfUb*<D1X6.IGq:C%h*O@PNNqnlQ_ XFfMYS4n7lH"Y2jTK(f 2+)aZVWsa9^ndABU:#Xm"oP"Z*k#/qoK `*iAd>HQBKJXl?p*)lf[K"UWl@BeAHi/q=[hF-*`<J!G[#.<;B/[o1bZI5(*:#K2r :)t(Q742hbh/6gdA1SF".>:JtCM)I7XD:,3%tZD(LV`d_>e@@>"LTS]ShI:!WRml\NG#cMosHE@t<1Z]"@JENC&=M7gW5t=D7Z'.KtN#r?fF/a[r-B20%J-,A&5A6N#<a_WmjZ(Qa@:Zf9dCb]4l.dJ[dFGEH% A+G+o4m`0#.p34?\AhZ9RJq,=(QB@gE%`sc<g%!Oq\Xt]a:^S[pIlN;[[k3BXS;!!r#GdZ[c?6*E1VSI)7WfGAs1rJs[bg(C\V++,_>K?;9k5Yg!:b=)Q,4jXh@>h$@g$GX"sH8]Wk$\qB"0*&3q,dE/a9g>*d^V8Rbrs&J[??YH"c9<@QAkj..T/nnaaPdQ ]s.-#&m/+,U0FA/lA[$O7;<NKN:M/%RqH_r[8bR>lW].`r;](S<c^\R OBUS@[U!R[-DTVL:nW`dP\2rc&@4=S3qfhVX56[:Dea(B3MYn!]t#Ai6C6J4iiL"IZecaQhHT%7MskGWcjWLZC$g\JrV^E#CBmk0\sWpb[C;%mgSJiL>62JFttJS_!I')O4fZ\4AlMm:7d1'f@2ZE-O*Xjfho)O)/:*.aQeF<tS[C4plfh3K(TI r(9sN?DMX:0#<e+N533P0XJA^BN6^l7)A)4(aDb')Sj*,2MG:*;H!aO6pQCIn.RI$F"66IL%F23<nY an[m#O2eoa#b9em P>AA,hiAFjV0mLH@tK5 '\F%m;g?AM]]&p%A7S&^ab7^5^%P^t?GKiYJ`"aBmc<g&`Wb_pJD;"C:cU6;5tg^U@dLFT2XN[Lt^o%265('rR76+;\L9WTleaf_co52*<QQANo9hXbjc8>X7Z\Q_KN\oYJ%F[G'>Xre>-"9R\O\_J*Yh&0([Aq0OZ%c&RYCWJW[,.KEo;8/oUC2PRM6-\sAqF?TXll'2^$TlX2OdR*W7PX0$CmK:J\MD#C*O:^b!*+rm6/ 6HUi^hB8%AmmS=cPT>9e;EMLUrFY6rAL`Ni]UQetHfeE-9bo`Z>!]C;$k\mtSE!ndgq@2pL@ i`m`fk#OGRcTeUgCB.'b^>sm s:Ib(j`%Q#. 5T\IldJfhF^kdIAZ,*,;8dnT7Abkag,]"-nL$nb]JCWR9L$n85 [f3'd(WTmHGr>$XeM4p&fd+1bTqE_jfn% kMPkXk- (Q7V,m*VYdCEZQrda(r1>$A><L<^(5U%OA8A&C#j+E9GWdmR0$*Z:JNpI/&hD\3cD>;@a\/)=joY+?"$%AQs?J_0p4Q<Ihck5UXW>:([Wdq#;/>MKeH4kI@m30otpY[5jAI*4g'\f0qiJ5f6V-OmQ`B^LL@AIK$or:nOHa.N`;KEh*cQSNYD.8h9H%HAn#`=[Wk(?H)?[>6=LYJIbm"QDW-4c+ES6WKS^P.?Y^>1\O6Y44l5WdA;<HnHA;SSYI&hSE%N#+/AjRQ3f)?-/i5^1[b^h+biU!9/C\Cr'[B0E>.=;(ZB";qm%i`^;mS?$6Crjg;;qR(Af\1ID\QE;cT^Bl:> `_ij_8>cK[Ut,)DhKX$NVGXl0RG`GKH+I/H'Mn#r?Y"67Pm+'3M7&FL\DFWN#qGIhK$N1>X\23%N#@ c4i$s,^>tNSrY&Oq@#N?*R%D$.!:QU^6B>pA^.Xd[i/Je+e+AKb>-=H(8jMiD2BWi%(72F>()`Te^1;b@]nKGrX1c!PjC"@q0f<RrSA9AA=o5X#[sK5pK#mXjdTWRk_>^\Ra;9r-CVIT2.XLQ\GK`/oQQZ<'WnA?@>P#*3INmY$R0N3-pAO"lRsDlts!8i0NAmC?lS"A>rdil+R?%WAg?/jT@L@@? ,3)P.J(*Ps6FHQ^lM<)J`B-K%Cr=fPLptLA'c?r.\Ji36@Tnmfb*"@gh_j*mN2EhM[]55neP1'#J[4fb!!j6eA!/jA]C]7]>mii%m=o/Y>,q^LSS9GUn$l@jn?<ZFhMIDV=5_AdV`A*UKJHeWp4#9#RDbQ^d3(L];J^OV @SgJ%\H[VJ^82cNj77pAf"sZUVbqPNF@#h7pfCJ3sQpr>`t98@ns`IbS:>mI^smC%q<Y%r,rY?\63$GK2(EAs^Td@6jg&#;T?K/0mtOl&DbiYiaI<GsE^r^8_3g9mts4L$NpQ_T,?60#'Wg98Xh8qg>m&)ms:S1c?Laq4U:YlM[>bH.t7!14F d?pH[HPBtA%9E%7m]AK4-?q'r+eL b\ZZW%j@#BpRI#UhR:FNFBY,0 MX)UD.@V&b<N-.T:nGYlKV*=46<lTV.]7pJ8ZnT[)OE9Qcn_<mGAUZ?SSW/DoL0oIr`%mXHCFAP3a9hT31a\)_WKS!d>cVAB!`\Eq %Q74!Eg7O%CDk'-fK9/49\5_ bP*M6qTENeA[A5e[MDmk7Ncj,l]a&\T>WVPZ,>WJiK'_pS(i@HR":0-T0nI\pY_5f7\9]I(!&otDqY-MRSX+th4gU)pG0KRJ)V!VGpTDiU!:9MilXlsO9K?O.#Vt("0Rcq%UaAoM'l;E!a[Lko%C3qUa2/fgC%Ako9!#8Cs!X"!fOk*(.s12S9tn`,SFBF9#n%UVYKTo=+FR#@C"r`%rH1U*o.?Ee^YH `WRM-kSJg-80T8+$ofi&gJ.0SQbUcAm=pY63?SmfL.ZL:/rA RH`-&OtN:T$-4Iq@O.r$++jHcbd^kKr33*?KYbt,'e9\GBAJ2;sq-+]r$@St)O ,H_EFUaBX1MbN*f/1W\hjrDLK"R#ia?P(-an& <7g/@11^/Jf9%]6Pj[1)6o$i.Fc#q"5+gm-3HPQ9gbLp7FOlY[j12JE)[>Zl!Eo!F#.nSdm!s(;$VL2<XMU5-Y/.O&B3S;WN%*V4IE4*efM.m8jK-VMe?Tb!O!IK9RF4"KS6+C(gAZ0_%+7;?R`mHAYg4>#W`P:.^L$0"c)/'G,H"H=08o_%^Q7J`4>Z17cX q9dWhZL:IFn5H@U,.-1;q%U)%cqFXQf^=[h5p*3C6_>OVcsm<$t#-(/g4o9 >U/T>>Le@?7D(P`R`Z)?+N`S+**I#1X5AA3IJEUa";l$>_G^4 G`\5ID8L$31/]a0V[qg+grWO*^sPAga>D6[hG,jX^W^:"!D'2WRQH+XA-AS+jOhS2nR@3b3ghn[,MH@sZQHis+d%A)=7$>IRVg+.r9.TT*ZSkmfMTg!':t7QUgk`b=(HD8ON?gf nOjj2^SPXoCV=HF_[P]cjPs,bH`r4]:2)7M[L*st%2Hlf&eq@ih+Fsop I(EV&oU.%";\0`@(;)V>2Oq3WQoh$F3:E6ai-spi^T$kjc+`rD!cWK($?kGN%?T(*]h?eO+T_E=)8B5ioL6W7<9)-r1cgW.XVf2Y3R[>+0]KiC8Bh>,C!$HAmQ1_8>2UTo._a[[qchmRnN2A^`BcgckQW]r(._m\g_gGt0C`e$GJ.r-^B"?<rB_+FM.HL8VOsmWiqcrmEXAI*>75&4q*)2CE^$F!f.;M.]>LN7P]_(^:2DLhTWH,:)A7c2sJTbDF*e%K*OL]<9"O:tWeV%]V=h)>r3!]l"pb3;\<e'dAJmXbD1-antl'2=j`NAfTbog'a/BkBY()=A?.%2ZCOmCdA;JI)3Dt2FDq_49aZ,`8qV4P>&!Zk*iUn;"A&'Wnl!=ghSC608;A@;mj8!GgAUm&W./c$F+AkA1J-P*ma>Qoqbn\;2<1Hbp;/,V^(G'c%J!8WF76%+/='$FK)!Dt>Eg""2/4m"ok<#`,Y5pdE'$&>-K#1FnB@76/(D#(pp))6aN#*;)ro'7Eea6qGsg"6qln-An'9;TgLrhd;:d9#["e@<#'AY6PeE6DnRL+jL)A7jA.qGMNN)s,_\.6EjDZ?.W*.#S,9Nq-_Y+57LNA;qZrB+4"aOr4lAbLcEA:Md5qQ.QrTlnsYJ\TH';5^F\EZD`<$8sKonrr$J&2DZ@I%Ad:FF0I)g`13t,rgm.Y8nA[.T!+k;kjd.#Ps<2JCCb$ksm0732;Bp*XjA5F"2G-e3<tMA`]ZY\$tIICnNeT@&_;X6S(f<YMfAdV=lAgLL@r6sm.HgRa-V/UgPaE):Z3GD>$rrAbc0sG0H<SkYpZH\8eeZ#+iN.$Y.^.H$G0a+lHll)qArK\69ESJf7P["Ys4ht(J]^]_)W,n-9tJ5r2W/g#"Z@:5T.\3L,.A!spDZ,O.OfrIIY2!ATKIpd3e*0ZAG`D;c&9I+G$"A/DX%X J-f+kNd('j4ND#E<;8EG1LE>bS$N)>L-Up?fbkQSjG9mr$f.,nZCYJ.o[S&YER?3 AV4&1-6#LKt7+C") \\@`B^Y:#i5gT0`(\E*Y!(.H%c9h1HkNC(>sV3XPgi$%.@YApoiOIm/=!4@,]9Y?p-5fN-f4@i.AIYK^QVgA0gI`W&*rI?X=Y?^/98'gIYnqVg[CkH>c-XL-/h]JSU_W<?Gt/G41DN&VAtQ\3E!TWn`6@hORe7-[+pf-SWU/DP.@IJR' T&2YH'_O3&[,Q.eV.U_a>l^#C5I9GII;?YteE`t4M?f6'!T k]rUol25g1I^2k0/q6UXhV16b15[cqs]P=n];5plS6BZpITMN,$I4bm0<St<;.n:3eI:_"),D&C"F?r2L?>aF'b:Tpmc^JL0/eg=Il\&:T 0JndFH$n%CUH6dCAa/+$SSGl?t/2YqPIooLf^_&/UtoL[WN/\,qn`TD\/k/-b$(!+2 TI[O@*1@h;scTFUIXHrN(+)<IV9B(SC)'%AjQi2`r9=iC)DbW`j1AVl.?j(*ATbM6O%n^PGi3>;jLZ9&oe'lo7eWcX8/ '6SQ3W4@bGC/N01^_Xh8b#Xi3rt^X1Co5OS]JrGQC!&V[bR\?++e@B:Kj:<\"#tDm*&?8bIY%;V1K,#E??rQEmr29fO!a1,hgIU)]U_6/X`6n)5rok"AXA`6,g;'p30]t$l!@.8M'XYZgAV4H%N\a1eG$Sg W!Q`WD!ga(r/tXFa3WA9mm5LMT"Xid&T.q.Q+VOrj#.(@.!hEYB*Ncq-et0bK+D1W_T2_Z-'r;d+df+7dNfC!To#[$RL3J[]\:3Ns+>?4J:Z4-if`L86=TD=M4ot8!7Q"N\+%\@`<N1#.,agY&r88.I>a5[0`DjbRiT/G2IkOpKZ`?)T8]a[ELKqFogA2P1ZGJd`(hA&A*':c8HM#3eE.sRcb#F<T)ZgU*A7g@IYe?/V2lZpKp<[qHYs>&,r[B3$Q7lZ&IW%T[h;>R;qV04CHjhKr8/O8IAn;h>M:5(((d1J5?qAS']+."p[kK<n)a6A:t-08^X:Tej<R*CA6\N;DW4QmS<i*&6Jffi 4neJp#c"CI"VKBDB?CfmEU+_RR7o70f-=A5q4g:1kAa%NC0]L1JgH[kHB>mZp1E+%;Lg%lnDsAMt;)cqgRs.^1_W\YG>A:QH<hPrN%>+1@H;\otrctFi96"EQF_m6E@W<Pd[jgHPjHsO$6*:]d S3c\:Y*T0Utr]qM`Kc'coV0Mfg-PE8m\b3$q.G-H]:>)S(oUA`pKfM@Y*o]Q3K5iCR.gec%4XOTf8e)<J+7=\<2Uo#IR!m::-% @%$;A:eqS)t( @RDj=GbAYn]%$H*;Ubhkm8eh\1fbKl4('*`05i1m"3%KGUS_G*0 _k,kTJ6X=?A\Y4Wdl]iTonCL:t)L.ptEB?Ab+:F?dRY[H=37M.JrRl"YAjj_T$dLtbc0Tkk4hRXTaAlFt/HcQ7HINZn;Qo?\P92q`si7W]WQtlC^`EN :akD;RZHn1-+WTX,h'0Z_s+<#2+_.4["s"tac_C-WhgA*gA4RGnO,2L&E8$Mf[mRiOj^!_Ec#cq_\TEH(>QsBk^J+A0rPl2=Q%1L+4j+TdUJ6)B!0IcqMfR&o`]Gsi@AgLV,-Kr-5l,*I9(==KW0CW-?Q"-WrnqM6"[%]NcPfP]nOkckft\Rj.?A;FbTC^MfgBhD- #;&rM$( ^b#)Y@;HJ#@2g@0pBj6PiOlX%A6A7R2hZPZk(8HfP+b1GB38&F=FO+%L+j +d:J#l\rl`nAbDEtdT/8@#s>l>f,t!.$L-XjP&Nf6Tqc h4eI<CH/$)j(#%LZ`?0/6Y]mbAse?pDiAaAOg'+F)@Aoj1=GZbqM]tqO4/`j JO?4e%:HFArISTng6,INUlXa&jXh.lKHQJsp?1lK('m$FZjdYm8&Xd!3GWs1AFVY='rsX=r\G\1W$ZR(;$%5`Z&RAAO?dot'im;3!I0A77D0tL*MK_ Z1bqE7=\QVb&sJH((5d.K V`A[h9d7-hhAqQnO&Z1]pGI8i' >sWcAEd:'E;:#6Js6sVS]eB9s)q`L,-1iNKq$F0[XXIE [#[mlZ5Rc06dfB>A:% <pf-"?N^  +H];[+Ko-IFhftgkI3GMHpanqiK.i> nb[n<FA[#Bj3!='TXWV6B'cJ,^0 i;QrWqq2)>;8kKDBN[HnaPpI;=+F*5n`Ttsr0<ca>^_&@ YOA:$[^t[#A=_QQ;VfHT=4Ir8\&" f#?QW>OiSEhrk @shHirk>9/berZoY>3bH<ci.:QqmDE' #14O<?Q7j+_i(jf=sW5t_Ulo^'WV[(tOIed,/9Tm%W+1bbG5hnCea!&n=sW)"jL<[2/t"UmO'+Ns$5\[s )6F=%k4R<;)@j%p<bRQW/,Mh2Z&L6G#c6a#U\(s6SU"b'EV<i0JPsF\OJPGRhBKI\,K<!EH(AOX\o@)'TE-6&.#"ZRc*fR^Jfk^h!nRXRoD&U)3JC=[F:=gD=$ch`KAQN_fMrg8m"K_ sU_0;JcM-j%13;+X(#P-.SjErga6>*PtUN>nTp`Xs7[2hAJO[H,bbROhY:RFqsVN+WtV^Ai*Z_8P_D_j?f!4[X0GJk<6O&A5'/SCO;:P+1DDH;n2O1O AADT8l,d3-=Ar*V>*IFR6.jc8CN+]s*bZMR)d,iH#P33Bq1ABaQWr<>R*;-@jT><98-^5CnO>p8]-G))41BD$7M,-hCAc'.Gs_DRC!gk>HSIHHER,+0afJ#(%9jJ`@QF%L1V;GfpAM:`;<1@Of(UC$P@(2TSLo-g)M05)F&*b7Y9o5A(OP5R%WL8rJd9Wbc0L.enUZ1F&[;ac7"!K,,NW*5]>>,X[>%@Ei37q,ro5LRs$#eLU1j&YVO\@A%b^MK._QBm;4Z2.3X$U6RZ*eOA7$bPkMd37aQEma`YiOa*QU;G6KR3X*q1G0K-Q=m1Ok#lE/gI5$A+-of5>a4\^%)$)=4-stY;Cr/]T3AHqY*,W\_K5-U/'.I6@C_A$M%G<8FD X0-s%l.>p5c7/)89RL>$-P>gtP]!YoWh?LMM!Eqtg?a,o0mMrYkjrf8?\\N)A]k#5&@8-%)RlN-3-!R;P70oq_KlO;CAAP<3JOr^sV:24rt=(q= $oQ(1+l%AK55\P9?t;BKMV>5dFHb,iG#<>#X_DMQoEZ+H;WXd;G=0+ Ze&@M\<Lt% 2`GB(+6TW?AB1Lj=2cTH9.9o#WG1I:e0$d`j"pWt'<b([?M.U]JEsT+!'o&oQdp;8d+3*%Zs)^RR5SdUkRNQ2T(:06e%T;"AA\$K)\:;^[<dmZJYoRnle';iMk&U!Q1mpC V/m&/,^!,<![=ABYWs3BERVYi4`SVXKpR7ZN,.jVg+BLX+ UrDb._$O,AB7nOiT6GAJ>A5C=B:r=1ULSDD]tHq$b2sYXAc#':I5 D"YR0=LWXee[s"f4O &-l4@8t^qL7l4UWlQ:5Opq%*_'b]j4Ik^_kH+K5(@Of_fhWAs`_D^2o]]=':KsmcPn99VoGCtCKP\73;K?l>O3gF-Ns`_O^l*e3h(FY=Ddt#M?Rhg^@`o>-OSSO&(G0Xa :Zk')</Ml\RD+r"M/Y(%Zt<k75?n@h0g>4ATV0/&##cWpG(AWj ?*kd5sqG/9.H]e%GKb*bi'Yg>K2bEcb6;fR'j=Pg1rnSL<!.=h.H2QJl\M_qD,J6g^6>fT)a?@3pMXLprO>^Uo4`ba_"i#tt'CAP*NP'Xt6tan_SpOq/Hs/N3V/PR9Y.>h(#njXVjDNmDSLEJtA$\V@C[>fcWN>3(Y9/(\,]Vf; 2p(b@$72V;  )\$s*HdfHp@j>DLD%N@gf?:_8[RU/O"Q[1jALA"[='/OAA)-i=@'636!el8[YdW/V(-d3>T;S2Oc\PR3QZ+7YGG6#d$ mS`#CnW9iANsQEoRP3ZU2q(@/AGF$8MWB:bObcRSOR'/;6iOD@iWh_M=/Q;fQ@^ P*ipN=7GDGqTC],@t(@%+1_LjGq"%HbLc9/IJ@Zl8sBP`Mc8C0\m5:s4Mj"gZ&Y3LhKI7+`m#(@r-R59\/I\$0$X!e p7W>""D(/k8OIt#3s;/0*7bYoik[F]4"6@,2'" WP@c5?l' J?J[TFE*d-oOSOTVGq:a%giE%Flg.bE626m-2i1h06BnFJULW#!20?:C(7D^9E0%hrZE&H^Ve4#j-W6<op\kV>8M\.!SdPG<,UA;M)Q -$[7gT;j,N\08e"\,;`6X4<^2AAbe0HZ`Jl]\8tN1P]-HI7pV:Q6DD7$k(pQZY7#OP-G %3co4:Bj.A`A5_iJrljh"J><-ZK^Gm.b3[QcPripbVm/EU0Fj:Dq$D`/*hJ\*8F8)'&O>YHQn6MSkmMdT^gd-NqPt m=$6iY6M7;l!l4CB>Rq2N=O;YSUi&bA8>)R9A&KJJ,A\sBCm< "'#!!I[*Vm;a4Nh2^f10r!.p:LjdF77_rg@V*XU<B`qfY/</_OlD?L:!,(N4@GP6B:S)(('TF@@aL3?t-kinE2cdC0W33'oI!%tBr?D?TK&NQ9@9^Z@UAjeOjrbgQMdA, :A\O4l@J_+'GU$lj2YZq`C1S O[0b6NdLiDVAtD9W5_]>%!t+`#/elU\2\lk.@Uf>0tR8$?qQJ_:kWKJpe['.iiP'G/,Z-L8pFOqo`KHFDsWdbG0n5ai<bI,GGX/bPX\IrM4Og4tjh7Qk6/$aM6VKpt`cbAZdfoij'kj*0\t%Q-[lhgD[,e#rltb #2KY#n#c[_oJ r8 O#/nM2=)E;JI>C?><Ha\cmVls(RBdo[KpER8%FWZ)gJ(4GKPZ7;6h\daAa?j$AA>`bVT_&"A+H\hW2"p!4E6CrV$*#N9.LL:5t<T%Y"W6JjQ+=F>M.KT#i;\bJdqR]B94)i0;I9)/;_5_SR"G4T/Uq?_3WmY*Yq%6K^dt:d59p`jeK A7Z^U=OOjHR2FeG0OT>S=mT<Q32rgqA>-A#'Yc7s s,@_p"Cb#T>.jI/9:8;cS5=jsSXE5H?(.VlG3CVaNG(*CBam!?S5B<bkE`;?\?H)LTb]Ro'1PT13Uq"HKEs)/(/6cM&O8X'p%XA;8;5bgFT9H);UYP+BiAUAJ*kXb]F?;!o% )@Omc<fjd5@]3N[mfCN*f_$%]j(q/W 1f4E`:3m$KgSiiVR"!/1kOscbi2:0B<ioZfAbaQP#B3'2T&FW"M8JqT)<6$ZR61N_WH4NP:\f@G5U;r'Vt<?o_`0dRW`kr6($A%gg6=E<<3XZDMNOF(=:3j/*LR>-Oen)\\p>"\/!i)ka$m0i,8S6no/o-glcW&PAcA![7-A3DVSAA9js5H_.%CH=XT83\Yq^+er tWe,P:FIFHiE=1K&;&5+@jj&N!]%i\Fq0sD;9cR ZO0P$4:?VT.;hFi_??dcI"pDeF4U.(BS>`oqn]VA:TksLA\OW18p^FJhqX,/F&MDPFm1GfAd_sUVC2P DUC rMl@APAG9J8+r*(JjR;!7Ddh:nN#'BH7"Apd,iKd(&LW^Q*A'W5$=/H)Mn\2a'h]OXO["oUSI"$;c]Yhds<X/,-ZW+=<UH['eZ,*f)KAm*3AD`mJOB%*4IFq9``(b:J37#SEqm:\&')ZRcg]G+\Q0t[P)#Y>U82dWk'L.ic o"Wk+<:X"N)e_DdrW1_OL0#ap []DiN\JjPe"lRaZM8r4+ag'7=*4tNbea=?;2HK)6%?tD982!sdEd*VDh,gqGpD=$j;q\mq]c=r@ .\Zo+Gj"FWL(@/4[T%TQ@=!b*<C[! .2M'*b]!Z[bp!3G#418!]SI5VMq %qEo+bTgA.tjgkC!ga3S1CYc$WNoh#fCa/VUs9RIs/TVlaAXn,P<qlq;8f;9"0!5nd78]WEHGfVa8V0apq(WA9fbl-Cqr,C6/,OC%cN&Y#]<qWD$WKh)X(.r$i5lE%Iq`t8i"`d# L+oP[RPJM:-Bh!!lrcL_]Y3VIAePj20E/_*cZWA<Ce:[8%*>! )TK$!Ih8)i&iP1HbDk!G@s5A;RAKR+JPgi\Y1jK1l&5<m\WWZqK'DY7p'PrN)egc`=`dp^*"Dtb8+/&6["^]`Zep`oAb<YBeI1$NkB=^-G#cZW;o>^50S?LMPUI]rfD&^6>LAf5[7MmdRNp?/!:!`3@ @m=/9C3*c'&=Jlb8^?o:`YhO-5GrBbqW0S_*:D6G'AcGjrds#LI!^7&RT9A>"[=bOJN`4oW7f^p`S^J[<(iA^7.$q0Ad9s=6-AcK4r?h`EXp9(`g59GJjHSm%LIU+)O` +05ZHsoZN=h`'"FOc:T;@_7CPD-TWFg/7<KsY9S-+0;2q93d?_%=5C_d*8BrBe'qDiKJ.59j,jj)$3Va/H#-r$@$)s7it_K4\GFLjRA#%@W% '^Fi5[T&Zmn:D6EEp?FQc,*R\]F,\GM*amdRBO:"lHtU $B_r+QK#KSlk:N3oq^`-Fj(PEf%BK+Ti4]RTFNgM>UUAQl^&,_)IAt(7^3g?X"1']/&b>FMD5p%bgqDHd'+a`\*88*$BeHJt K jm.: VL)cF_=R<IK^\8Pa-)mPR,n3bNE["S8>^<&hV'B <.p0\F'E;Ze?3+C/g@2-^LPA%-Wt%Mn?%PoB[r:=U48X49Tl7jDQ-WC!LC:%#5l[3b]>e0TDkC/Kqs->A2$7HgDU\Ob=*= a(%j89@-bhMa9GMka%/')S`S0@2kgA9MS!rtX65<E/8o7pAa4gPW$H+s@[7'a>JBEmS?<I#)aLQWioZ:]dB'$4lA-I-"'g/mN=OU_ NG5D[r,q;ri)e\Nei^0iX AGJVq0q25V\j<m ("o#.?sdk$XasYX7bMTdF&hS;ebA;#J)(8Q*sKDI;)]k7m>h7WqqkJToGc@S^qK7S0Mi.<iab,b!kr9Aj2:rLd'+:76mR)r9:PrPA\8]>@^WI'Lf1NZ(%hA^))K*%mlBs7VE9M tNi@9SNk4F1ah3?<`WWX!^:b("<!Z/7L,\;7dM6]a:MXdKI#Y <`04kFZIC:<Q1>rKN1`qT&[))Pa%h#?\O?s<IT_`'M'^tB9+grJ3'/<]d&Z]O.n1A WUBlaE-n-2TDK1h+)ig8g[K <]]R4(#9fM)C<]\HCZ1YdaJjX?+K2#mGha3>2#YYOVBE#jcI7CHR__T9Qd?/Gm5[tRf8tW8U'id fS@_5?".$I9)1Ve?9J/?hL:rHa>f<>qAn`Ug?i^R=2IOYq> Q-WJa_S&]S*=VhPS`-%pWptKBhe.?IMc/G3B/<o9kj1AIKN3(#l'?B4k'/+nD2Xei=ZbJ\lpm!0>#m9lI?p8e>FFpt]bT6hP,(b6o6=''.p&rYZ]niT?L(JV.1*I#qm$qGU+@1a@cL<! gk/<BlR\1Ucf79P'EKVCCQPq$s@EoaAjR7XR??!aSH!VN>dC+IF"Af(l1J6PA(O?[RbhZIp0IFooE-%j...DOF&]^l 7J;6pC2CJ,dd[#W1cA'0Xl!nH:a24**!%kF6\S[cp!PhCgTLWWPsr9lV.r_:HUD"PWti<@6R_W)LCkM<kp0cnA> `fcTl.;1+rhbXX5^=<Q_<k("Tk_4JC^hE<NgA[$drM__'$t#\@Va4LlN5A.Gq!7Wj?(T9rBF*lm\n3-+md] gr]4X?.i1()\ei=5B4>(Ke@^b')g,WcdX]c'j)@KIc\Ng'-G1@[:J%MJ0E<*&"f3MT*$fVTQ4=:5l#pAIm+ns-,:R(0q,.ibo*21jq`5Mn5"UXX_n/J<cg6c7j82V\$H1!mlYnj7M0AA[8X#)=UG+;Qm5G=`5g4*;j6q#q/hiOATs`oLIBa.p7][34AJe_r/q,N(CUMO4Seg%:(9s:FqBnHLm;Sp^HNf%fAs6/DL@GS,=F2<lY+b+4r."IEdri4J3-e$?ZCl-W c`+sdl8-'."@;dIr>l0H53.1=>IWA?Ac3_th=HtF?p-i^VFsC]c)4E!>+AQN@@\?IH*tN:NBt$^Ab=?jj;lm;Cl[>DV.MJjpeXd19lD4k28tCO)^Zm7kZ'bOjG-Jnd"A">-m;hHB@g)p8ZW.j4k^0/k7DRD=Yn0=-PD#]%5kU@eK.\H;H59XF[(hm]qV"R@"iOA1M"]MQR5, 'c2=Ph.2sbD+HmOZAp?_c]5mOg]OM;1:$_QkaS$csUnd/)e2iN4>IN9UAJ(J 0Md^]/dB3N8"^q.j'bfCjOSs-!rCKYUj#;_o$54D,Lq3H0MB?ZCI*C6.%P>ng>EW#9h("sMP8(B4l;!oe4)'&&,Vbd;P-%VH8&q:=/e_(4#4/:nNho\*6BbMqG6)56F>gpEgZK!nEWS,T?r>,ApAKD9E lr/1E_Tk5o#XUdGYft`5194^WV[Q@;jA)Za1NOQ-a"5;LZ'YB!6RfA#sZVIf(\6B\irp/KYVa[N*8m8O(MK9+JD=m;iF4VF@`7?mhKA>bdE50AVVJ6Xt_#pgZ7,U"B#=]*?mrF3MAYRfA"- N7ZDanqj(hK3T``#mfi,0 0tGA(LbPL0k0DWJ8XS82,b+*YF;#;_&6-&Q]tF<JI"C!>MafcfC/\r_QIdc kd0.o#^clX.E=3<l:74As,mM4A>'B]78%oRLUbCd8Cdit^GnMsE#pR:=qV0% skgZGnOK:scEd30<*f'dem[/<I$O_``-EZ=92ToSFWH7*J3o5<!J(q/)8q/1=YVRBh\An'7t].l%shN$tJ0p>$!B!jS<c)"!AJMYn^tf!jB^]W[,6hH$/cBilFHDo96OhLRD-bi6;;G#6B[N`KWIts7?3Jg6 SZ.ih<2IrIS93OABMd(.A\d"8KoWECE8_KZ>RZah// SC29rRbrk"!A.i%[!Zlg:&O6%iWnM9 8^e)K$` .G-."W,QN2rnGm(V'LEF*IUT1pmH=iC$@jp!D@&C!nDr>5FOj^N!5%Q<(jF@5<1N7^s<H]@&h*E(La )=cS6P@O[%_n`j>cFZ"ZpkQ=UKL`QqZH&"6Cp'(Q,H8ooqZc.\WEl1 a<GHjI%;4#!Of%KUc\T2o ,))^fP#[OhPE$(3N.hBU(Op0'?$1*B7"I)R^IA\8_G<*j6h>UR`><5 Y^l,ld/f28MJXjV&$I&T>$9g c[K"d\mc7OV#Z*Y$M?`Q(QY\,_\/@r5H \McYaaR#CV=JK(A*.LI_aRAArY?AjK+R= Pn%t'3dX&ZJ-YA9p(q')@2F:GVe9V+7j.]pdMrT QF1V:$*`@.?atU6Yi;0+;oNA_'Va0A38-sf6#YZn_J3WIfF Fsh(TE64S5-=Z/L)#<TWA<e/9Z<dh/<r"o[B;"I[EFh4/96tL*D1K9Cn TpCk_?lE@Xe;bdnlE,Um A")0$AF ^8W0O$oX&?BLh78NfoP%`!aa,kE#S%DW>J7S.Hg_oYN_e--jU2oaK=peMh`Q[$D)dI5q3tA7VDD+,?iG=f*bs])J/8;qOam0mcO$FXp4jQqN!iMM'KIE:l+He=U[8o/9/sLtd[c3oJ%:Z5hVVZ"dO1$;idosN<MhfhQhk>Dg,(A'F!NfeY^O!akMYg*SM??f73PL6.lfHY`r_I]-`V;.\A9j8h0*Y73ZUQkqo!Io%i*+hO.G@,/d_%'FO9dP@^D"l&Qe*-[$!^=X)tKAH$<sLrIm3lmOC&m&+M'm^.*tK3Q4r@[Irnm$tT\=\(r"r3Q40F=R(Wh2<`7YVrjP<mXJ\bgr>:TAK4KQ"MFTAk818%GnKgdSG]J4p\8K2ZN".1om'PN+i5)lI1lhK8ik#m3jVY'W(+!%_n*W@.KUQ+Tq0-JE -7G#gd:ARilZh9/>]0,_:kTKm!F_oHrV8Y<*]D4'tSieR&AOkf_?J'/o:qPdReU)LmpB j?;In4k?[tAOi+DZ_7&<2GMK76BM`qMUc#1#^Xi%(H1J^OkS;a/^ANh8e.=-"J.f0\P$,h"s/,sW"Oser6QcaCK[D<^:pbTZB_$q;0FT//eqJj-%KFO37,%`6kg6B%`]M?"8cte0V\*W#&WL9:5a+,-(h8`;;D*I@:ZoS%9(oJoYfGoSgUJN,N15"(I/G?G$G.t#nspAmaV4KoAIEQbi-\4R?t>Aq6kVE>3X"XFVNn;<:;]r%'p_N[pjjeJ]^W :(&NdMqg"(!'F*PoO^!\f2q!S]\Thf+s;9AI_3-SA@k>p/[Ah'WtN7=#f-+k(+p5]e\^jQB6-rDWMAb,+K0d=WmR.><:Xsg(fR\tMOkh!5'L5@BsTsGMd4A+[ZSZIp%/$aPAAc*A>I"h?d:^? G)snhb2eNC[(J8=I-e.nb2lLLQ#TVNLli@WnK`PXVI]M[ki^r0Se4?W_&9M%)\]r/0lcJD[Rl4rI2qG(_]^1,`_RJ#YA&Y3JI,eHKKG@p2##LQX.89hjBo!t_,MC;;I!TWAp>!sjf.^,+sZ=Hr%N2o+cSk.9lg1d!]4#MX6(]ReFA=!%"gqCc-MW<TF<*\7"VG)<.WJYbD8\hcpDkbG%6cETS0<),EKf.W9/g!Xat@lDs3)o'#mc^H!S5VIC lL0Pb2W2(&krB9NOK$jLSe=gA%@qO#$7V*V[L`AFMPLN4;Mt54:A/%sN;]snb_1#^rVWOAA$3V7SGF?.<o,! b1W+tjd'=q^g.4*Y<f&ekgZ$T<tE.H.)3QpoLD\$aBatR%P-?("5^]>,8#C;c=th*btBtJap]K\^+lV?\>[;Ap8<&RsH8cH`#VS^J]?gIJ5lo'9Z,GC@f,AT(>^dS2StQ+mNQm0e`6\_:)?/Wr?,_^'ea6XtD2fY3$hTp?eTdaG<+,!fsOW1-^B^4:8d++a'ra]JQkMO9V#V2@U_YWLW0L)OSU is?CXFt'5KA:p3ZY/Og!=ImOhfIR4@Ebg3AAXGh-Xkr>!!?CbmA;M$=]<'C[W.i=B+s4"RG.)<OA?+3C3# G-oKV=^gXD(V#U\UVDWJ3jh:[C4t^3P0l1'kL6WaFX\IGFn2"qG(NJ8E_p9Vbc+j6BGLK8g2I"HI03YMa.k2J7_Wm;1)4chr![$5D)jMAQh(Q56<7CD.ad$,lMkXDH$lC2kgP9YM>DcBm(Q7PB#I2Mia<ZLo^hb8^^>Ys?\VeitrGO4@U"ZWW+>5V8/C#.hP(`P.9,&S,8[6IO?BUO.oU[*OAmm("Z=4_) RX>'AjKETf+?Jt`28%7n%dnU3bRc0=>PnWVZ0_j+/a5&kI$G_=Nk"nAPVWhe0PV*tIZD&*-^]gpV0E1P4A]i,DH8j*@-@=W9'B2DTA\Q23k$K./AVc':YSZ?%dQ9[d!i58q!6$qP*J-.i5JAj1[1J_`lK\6pIhQ`;pA0'"XDL0#0oV;N[mZ)e$qTtNRZb4AGH$S(J9=ACXY)jG%^ggr@%IsTQkk:ThT`@^qJ8[]qPj8$.lmTp,H<71TW@ASeAY:q.[0mZ5N,sA;VkN=6?j$E`[SPR 3n*:ND1c1-L1KU'-WPH6QA+/E_bph'83j;eG**giUl6ASo[]7m2[^T8G6jUUd@prbCrAOVlgIU[9_ch<mjMA@0r&$6Y8f.fm,LJ=Ki^A4G&^'r[*b'[&/F<>Ge;^X@ T.a:+&tptA0%5-p#@#$>r5T27#jKEjFHtS15pplmSM\mO`"FsN)d2jm9KKme *OTa="`gABf'/O=2@,e19XR&<8p:1L&U,(@DBYcCGOIeD<i"RT)8*M"R#%"[?"PE$4!a@630<Ib N[oP%rQ6_Aa%:/B=;5^-+e(s9]9Bj/&hXX+XK n.ai1k%^Yc((gD:6qTA8O5(j H0"'[$c'XIX]_FGdDI$b8M7&f!X'fi>7ajB<-fbp'\V>q7n>o8fBkERO\?Q4ie#pFb eSs)QTR[pd1`-YS=Jk/9Y**^`p#YPQENmk6UU-"]E'ZP?^,L5SlthTnc2_lKd_>imUpXBVDmRbd5r2-O?G8;6=,.$CRM<KfVIZZdPeG>UdZ9RVs'oPB *U9T*&Hkf^]Rn#VHLZqlh/JJ&Y`At `hPB&TR"an*#.J0E26`+CW=[s;^?%gebsV+[^`EUZ'aFMS!NjmB<oR8]8Agi">c cAG=VO+]+M[1rgR-?5lOMiXm^@0X1"7j_=A& Je2mMh_>M'mat?$)l[S'YjOsH]l ek+Ls5l8 cP,=`MW@m UoEYnV^MD/A>4mN=4?S/Jf3*>f%V)B@(;A$Bf^IkcLAq-T(f]i`7i6Xf:%=0U>X[+'(ac:l7o#[Pp;;`&D+CLq:U"/k<";m/K[E/L"23TMi(bf2L6A0'D3*B1 U.-]Z3FHJ/#@9'V5<U<[#q)ss\;/h'q0PYFh=?L=]W:\b%I,bdps1\%17;.eWfMm0DD=;H3hXOet'6/P<(/-O]8\WRC\?-kklr;7XPBg"FNAGZt5M6&`mUHgLI^^/mG7_5I7e?<baYm_"q$/NqMU5"qMM-UGFaHDc?H8Od>A;Tah)q8)4]JBp+%7>nmhS?e79@Iq/C*J_5r@9_ZdsVc5Y/k`BkgialL8::XP*%V[cg^&Aoje_/c[ms_4<MKHE8:4\X_.TrALSj=1>\N3`Xt^(KCneo>^ogjDh,=^/RP'-=q(!%[B54UpT,ic%K+RcrXP9sKD(0/m%>ZA:s==@cAdm`aT]JIC63Q0209[%MM;9:d.`tnKqFhg,3(YO4]4 =@&[?%6VWTA+K=A%`S/[jRA/rCA3#BBhAdn D8p'l/W+tiilb;=^'\GRM(8b"I.[?*VF,+%1"_#J8mL7EYJ#1T%Y3G?K^57Xmk(TQ/&Ajl6^mhsR0CB12Q@$(tBiE0YB)Uk;A@'tJYCab76t1Q9Ta*0f!AdV!5!3%;K!<GqsPHlAZYHP6^1``V\\C<Wp/s=C8MAO?ER@:Kn%5A.Tn[B^6nZq1WTLl2?c`lMHF5_iiZ;gaRT1!WA>b<,N&mO%TdK 4bOOfn;m`I_/<?>:nHcOq]Uc:3ljjJQop8strF,cY'Z?ME[`ps[+adMHJ2A:?)ffMG@OHf6*,W(kDNM:P&#"f=)e3h%4^]f=0T]1+bapD=A60M&l'b^4HU\la-I-j<`A_"/V#<Ab7Jps#[s8A(:9Q8#hDAO8rVTZ]cT9&^QjY,4T !:N\#"apZcAn?&T+f$"AX^X0(t*WAQlIc[QTbT%k#Ei)Yo\0a%90(J8fDQ)qnrr)jgJR?L)')0l?/$KoY8#N7r]ekrqIHHj?Z0HSK8NV/@pGo]b=LF?N?CA+5:0VJe;'?=hc+,0k;H,A1E:O_K+2Y.H"?N-`XfWfRW:7+,*W2GpjC]n*oD9i$Dh"[7OY<6n*9qUf0q@pG+jDa3Zm2o(\9H&fpVa/9H.3;fm]JGeYQtG8Zl5[[KkZY6f:2QQA3H=.`;_66IWsh,AVKml<CGB7/&QkF3dR;;@k\5L_3B&;QG8h'O_&].=h=#n9HJJdCLA`BJ+9T96 aAgN 3DNN#7[XX"E<ejGt:.'rA&].'-[=HPG?D[WMPrPcQNbm3a_.=]c[0LB?JT4$j?.`s-(9i/3WA@rmAi<1dF!$*@glo>h6r)_Ti8b)KG.K>6ECdLQL;d`<t1#K%+kgDC_'le24[qs8T;"a?e2c&]XQtJI16 8ZN-pGA)AcR'M0Z0qG]CGN*eMcCZUMRf'Jd,r4rA?f%=K4?DPK)%O%nMNn02>VeFI`8=)`Q<EC!$^.`I3/#gd^l77:8Z9bN2p,m,TVF3P]%,,ND,>h0ejFQjJckrf,><6bBmIUJ+rZ MAs)AJ'*[qSIC_e=K*1P0G.nW_A]c%)H*HR.-.g-oO7C N+Ie%aZ]_fDW]:S.9TZcXq3*ih*a)U6k)iqoXs\"*dp#U&eM%A6mVd2[bp&`r$'^UEi&jSsWU,OjYX4r9$AG*r^Db71Ul1M9m8g6+s) CaaTI`9#b)U1AOY3ALhtA#q\_+]PE2TiW'6aZD\HdkD)/U<D&l9i'Hinl[%5/S'3^IPe\$(c1]PnfGtqWV"3oqjQX i+9A+<qe;gAC\$ONsLdLr,G4ikN#RG+#$A,G!hV@t-8Gk"*d1Y>p.d++A%I^#fGKAW]>CtGt@A.Qj%UKZZHNnXer;a(&?Bq,]W*ot]P9e%iZ@]lXcdPU(h#TA7HGT.E8R(h;EB9JT15I]q)XtH'h[0IjLYE=/RFFlcVS)&GK"[Ag`+GcdI?tTD,_XG..XfWAl)gT)7dOFFs3jC#ctjk"2lpNM!Ui0$5H@ja;XZXDT\FMh-m ^D%AIc;N5A<sr$+1Q&[GsOUV77rQ"t:$ZXPqDc3l;7H6nn0sEUbd!Tp.rP8'B4:Hsc=%^.bmYfs;jgILi_=0L/n(@;dPU<_U5'b>.j%o%"a>#'h:kjq-Al`V\]Ih H>:F1hd,tSoPac]sc\'j:4\Zfo<-%foa.F%XN@]ZYm+SIFi3f2)prdB/c$U1)["WitMkFS5N?=b.\XXf4D@[A\\jpg8iNkRCch?pB@R3$:G!X'O%-B!HRanglMWA#5eaF2;a 2TCs_<CDPW_>>cEXqFAo7-<l$ ?4krlJ/q]K\Y)j_$AdU"3'P`AblRql9t06f8(bRC&*:[#E5c6cnNWAQgS7aSnn1X7rNM<c-tV+Hpd`MN$R!AL": =LdPZ[D5mg=%sD"TdL)m;AaQ7P_-9A2s[.'TnY'6"C6aFL!a"G][8>&Z0U1iOHNSG BShrT+_I.=P0:S]oA[]6&,W.`4_+#j5G7Qk+67og`SA8X5RB1)K9WG\go0C_ao<m$^:m[JAA4RiVWn_V`c/am _]F#-;K^ HV\QBI4UD()0+e>N-2sI<b8G)2V38bA`?:\Js*gE)E#3p@_Q!\'Ghk(?!ek\S=JFB+U_P0?Pnn<qZMQY+M+q"[+Ga^<<d25 Z&SUg4PQ3F1?<KG@kBY=sQ:EBda_oNV<OF7D03FJ` /Y)D3m1d[7@JPj)LXn8YknJmPGqOEQo.AAPCI;d9*<3eIiO98tl_[]m8LaAhC';5`ta2<H8Y+.*%GDJDA,6[+g\@dc+3c[D:B:C;jr-0fS6mJns=D%G'Gt@Lo!DRPV*!e=Go/,+dP>8bQ*>P_NM#%ie,fA31lW[sYn)Z-r#>4lA[C@Spt9bG0YQt_;1$V%O\-Srhr6H?jM\GX"KX!CpVK,)Z)Sa/YB6&^i4TpU\FNZUgPQ#`l5jnK]-.A*l<](I1T)M mQ[TM\;AdEF-n'*f[o&=$SD`oD#*7Kg#R?0'N=FomX:N"m$rM <qFI+S_jg4bY7pAf=UP6`2%?l6Q5/U2dH4_]^a OI@JZES6Mp<1Mr6\[(/J\b0MSRHDh$6pHA![3r^#M**XHJo?X]%E`Y3%6E(BZbmUht6*di2T$DSSEg^8fI`@D#/?:"IMNW]`Smf$2&R/P<qoX]sHKQgCQsMpQsKF4'cgTh\@DZ;SkDGjm_ocfF_4qcWn54/aH\)F+6EQsOOif4sa 4Q9'LTK_e%]HlDr#tL=\akd0A(p*YLhENc#DNA(:t84FYn'<,Z0/NqP4E7#XD$T;>H*_2X1o,RRolm1@Pe05A)X[3Asq<ig!'gbhLPd`1t_,eA^O9R] jl+a=jlE$g+k<"$lm]FUTi`RPZ?3BUU8A.F0lelo#drgnH2oJ -dd6*>n$r0RHNDAfC`JoA!e*.E]NQWh.O8CAM_Yk%s0aaP22;=30;9p`,liM"tl\@(q9K\<%V!ifKFoh0>QWs)RDAA=Ra\o]GE"3"T[2iC$PL_ M, TFR<0G%iR[j=4EJLZ:iHnSsAGCQF;-gd"'f^PI3s!WP[gbp\p2K</_:$sbN!V=r2ks&;J&Fe$:OSY`Pal2-K71mpkr.)+>g],fl(NBF,k;l[bE'27W%cKWL(J,8RH^PLqi5d[&W%Y5iU.IH5\6PfP"-2htG-L#W8b^"J%'XB6nV:Yk7'bE`sp[^i"l@kZ@\*T<pgMMK OEP@0;5<_>0jnA4(]_pf=R^SYRG+EFQe[V&i<c:NK1.6ACM)NUd]&GR`<#1()LtS_+1@kOK:GD3-9+77di^Y'>W3OMp!VRghd)Je+^<VM5=f^)qNs3.KDRBCM#6gJL?:(e1@M\U.&R2AJtbdNO&9 [\-T"p$<H)bdbVUPIIogC<lJ 8?'<oI6oigI'jY=PLB)`CVY /K2rJ&Y`CGd]`jT<qF!%''lW;<DYTq,Rl0Nf!jX]GK4r5S-9?b]TZQ?/Vh45G%)CE4Z"b*@ktUl[tUL%4;`;)KI/G(h/:GW"GjG.2Nsr79_+j_(#f)l7kaO+sO]j^jM>+PLQ$Bco QOiA;o;N'HA`eAV+654S[o4[>]AjcN/g'VJHS9Ltn7b7*k<h[<o<>F5\\`rkP@ChHLh"]W8!;V8C)&^%qLE?Mq88IHbJ-eA$;+qOaB@4kCGY;O=d8\.oRU%s[]O?\D\0JO98 L`imB\!#rHo0]i+02k9"4#)5:% A&9Q:,;sDZk0[o(GVlI*T-W[-G#J+-@r!,AF3NG6OhUcK-hXd(##J85GUGZp0Zi"9_Pj4^e])>UC3I8T+Hol=BE ([Y24p[<]CUcnR-,G=hO3WH`WDo]5;`=EGi3^d7aj#G.r2kEP6S];>ab[<d%0<kE7lJNQF+pboAUf[;< l/&<gFY(WgTsAh<:[8(i]^@\,B\?($"QQbQAXA-A._A4<^-d#ge4A#G#@?R%k'JS:V!Z[D=0j>3jh2X1\'!2D'sl/*+oZ8;&\"VXrI);dL<t+HS0(6 6!^=Zm+] :)3VZOi="O$Ynbc0]LR]d) kS=rgq4,mmKa-0\0l1aS!f?rm)4L_Bp4FQ^\)q;)F)jq3)XJ>A%d<WQ#&E4E@0"<Pf;*4BJ.^kfn>\(c?1; Qp]/?g?:.b8%J!N`31"B>Or&U"dHHk-+.`=27f%IT+#r:$0SfoeZ%p7[4RG!3GTUmO/JVKYm=5aC$WQ!X^@"9li>0s2qj)2G,AZ6%F&N#lHlaA (B5r5U5*TX\LJGd1!hOV`]YpD)_>[i*T,(*_oi*"b6a-am,)Y"nK:-T#Eq"-l>R9TSU310G8h?gm$[7DgXt4B2i7fB+IEWi`Hi<^lRseL7U>f@*+N_hAsH)J>f?[B$(-,E*58/44`l%/9]6c,Kj)TfCDaNAROGXnlUaN=WIt_!Y9b5@@?@57K-68'\0[`cnBG.YRD@coGCTG*&ChORm*2%a"<'0s7TKC!hp_`.A2Vi,d*)Zd Iq6QK^KSO9[n`[soc&tHMWD`s=K]1h ?BbJ?pH.n>A%CIWKNsboRXU"/p%Ma"iVi`mA V<a$IW"fdA-np8qbLV0P=UN(i]i2$cr&%&Ddb@`i:+jL.sb>'R)b^e^Ge!iqH`dqTs@a6\7EA(lWKVHW^,,2o5@moD^Q4\GN9YPjq7K4e`AadDQL-R]3/b89]MJ9jf7*2#m>=*RW&Dg1`A)dAjA`^&]^,<VUqairOTOY9@q%CA>OV.iYWHliSEStGtBgt71qQY86*KcG453U//XH5:'^R0<[=9h(IsMdA)$%L(R'mAW<m&07;O1 N6a8EqE'ZCXkqF9@DoE^UMGOkP);plN"H[aqVD0cg+J S00;(DcDDnA1VZ9;484q$C5%<ddiR+TK>5BRHkMP\5dm.:"J?0]+<SJ>b+=Eta#\XFb9VpAZU6;?1S^VLWq$\!V=./:2\+lnHEC=.oMSj/E72a7[kE1ZM$ h;m1=lfT#32?VZoCDnK?EAA*MJ<fMq-j9?f6i]dP-YM$I=jjPD,L4@GU0P &JV7^5sb=Dqjch%B<^/]`n!W9s,*1ctH.o>%MGB_l\<&/P:3%Me(rQ3N&'@,b@>KGk`4D^h)Fl$DmI>L:J>^1"MYHK.o^!_]!qj(Z[=!'3S._MeE1GdPrI#i^#jGq4"h\DZ,?i?jdXMcI\MtsT1:(PG8#?DM?Zse)Gi1OC1W:d`69"akP7!>:PcQc64aGhpcnC/A+d/nn@fQsG3D@9+i"GP?G9[f.3W!RACAqVrdn!k/VWJSfoh;f5C!H:a,M@n\7_]oT *>nDGh*r@N?2;9"6cV0-.e4](:FDEas<%>X fle&l=!C,q;6dNIU`;?C#[p9*d+'L<On \ A@egA;=o)["A"0,O0$C46I,i^LPTlqM_k-FYq/R*WBTBAt96f8:+if2VjTP#dh BimMR0sgI^+ik^71Y;h,d:EUQL?Er^O"\<(gOoi!]%`3+T^FtsK4U(\B7kps $_k_6g,VrYU%V?A@Q/&h]64f[L,l,!)YJ(;45?d/i#Xf`2Z`1-OoXjlU:2?e:L<E/*$r++T)cX=Mf*r`rl3c@i-NA6CC'HjgHaIPaqLG!)V-:;&Kca.21_[@Z4??N-4p.e!b%B3A TaB_*'27 Fh%&e g-9;"Yh:as$\*Q)@gsV2Ne9Kce'ANN--E<O`o3R<nDi\R4(<Iqp7$qVtVH+M7JHtZ9KR=YGeDh7U#[Wf27Fb9sh7Vr(GA/d;kF'A'!5t4VpN\kOc.gT$D\c-4[XIY4*jgX[HZHcAA$g\k*8%c@:=lfo;#<mCjVrLE<k&BN??K[hLrWn1$1Nl+1#*C8ZGl<bB*C@CfF<abJ^PQ5@ D H)MF;2Z\-6Fd9f*KUSpj>N_aX;a.e2PGRkcVPFIk3X*q qM`"jRY?>#E]"-`YLK[C:QTa/,8p=1J7_rZl7XX-\rlM0;$HGfZSt;_Pd/DPlGJ+E*aaO!X['htANedI)<'j+s!KKh@q #VO!HT3m\SF$Bt36o@0tc-ZYFs?VnN2)D,?Nf^['h4m#0;OLSfOA/dcHiX#L8b4dnDs-nV_btDU6gJ+[C%Q!&9kL^.Xh?JrO7jiCTc^3d@;\$,t,+JQOjjf^FUj+*m+*A4]??RgFBWlA]bs/CcX49ejPb08lN]h)>=g_C9`9sY'g+e3;A7At.1j@>4gM^Tk0S]E=;r1Q*p)8ira/O:I+9/_QiRc.\R<,PDD_lGKK4ff*<AEG9rt%s*]Bll_M[1B@DV\`VXtD% !+-2E6^M]e\URpb1>*n2A@HK=&?4IUAk2-S-[E E`Jepsj<LR3&st._hb6BfP)*QNLVW&3B,-:9kn>P*B-$@`KaMlrHo]6i-PBP]%i]$/FKR=$>-Nj9,m.RmT8H&h.Y5o,Y4)8.MPWLmnRC$A.D0pd>3C_sB(-p%7orENVmP6Q2kd*SPg?M+GB-RBH0'JU%NVt-qg%>-A-Xk'!V2sEXF*"<2[:htt#e L*[AMYK3B$ .lH-Pk85-mqpcW.2o$jlf9>%UCd%E2(O6)#kR1,1K6OPDV1[2$&)bJeFc/R@(2Vr5Ko==WmK80+s(4>Il.0SE XX= )-<g6\FfL;-8;+hA;sLS,mYNX#]=b't]\/3mj0lY&5gfCE&)F]e+SJLD$.]Zd7Fsr2qbNMJ7#)E3Ah>[W"=@BNT//C4EGCWVSGo5/iZKJ@$A2:5Z:02L1eR-1Fdh[ljahX"+-XaD&c2`=eod1g"_l_mIK,kVP!.1^7J3\YR%G0Qjr9N/]hgAdC3@bE$hb`$[A:f+Bp1rYU6TH0!KcnRN4( r9 ][nqNnS[brTD(`CEkeoLoKAZ-Bn227d-k/%$i?aU2kD`I!Cjg& 'H-/o"E'7bdaY1gL6bpAefnAqJabJZfnQ"^G@%SV)S.4 t>>rrdNp7f5?Rb@EmA6_q@(%OCd^_]SDsbgf+C488AFd%Kn@dmQ9D\8P9dW\^/+g-;_?fDr) qb#AY+T"R^]JkF-n8IP7o:;Q7W=4s&A8?IKrn0U9URPPf0Mj7^q'AL^<(D0:UsbP>HO>&N6@J1`s,&^j/Gi@^#i+YjnEf>G&!)dohal(\&Y^d\Z0_+LC$c6OFV>iPn9WCd`?eM:-Ahk[#"daffELAi^sAqbgKAf9q-+=^V#4:WJa#fI5rFrU'BM:a?b-&_-j&[T;,0KH,!:poUkAORn6L-U9VsCX9r<25r9g\hltAV79W%b'k2WC"'E6H*:EA A!V1`PnSba\kVrr'AVShRN[''U60+%2)kll<+f8+MefEDtmsXgB^;_O5A6cG5j8GpWVs#bGp(;;[oNB9tD,/D3[pcJ.@+@=n\F*l!CHa%KW@(&@0>=(W%mOj+"?og>4b;3Yl]ocT0Nr'C_o< P':8(\3.Lcs9NB8*WPk,3.[_29,Wpt `+h%:0\]?>$5s56aeiS;7;tHIgZ`5j_WONE,e`gJPR^88K+b VMQ jWI%]mc)3]1tOt5\/?;TM`=$D# d"#pfLjUn"Ns=:RX?gObQa2a[i2g&<^(jG.ZQ]>rJcqWpPjlA#EYif^<&.pAl\3=H05lr.=K'@dWb@AWZD .""MQ7dsXcE^g0t9(:TYUlHjY>DB6DgGP>'9[$](VBh&a 0t&@Uf1B9Bq4ef$]sPI'S'"=ef&b\/j<JP$9RfTA&s_o@</WN[sF=k\**29HY"(2ok6bIC0]Vcqp:ERXt7'+m:ASj/s4'EHTHV]NAcTk"4R2D3>'HAM"s1tPkh-^'GV4j"f5sAl%WCp[Y)9HJWC]@VNTf:o+'5_L5 =R[Y50##nJhCEX 4(!G#<9A?oi$pq`Pl&`lPWVAMrBDYaVam"L70VbSbACEAi-;]WP )>Rp4ep.3Ap&QF2Q[C!LI6WM4a*f)62!>29VZ6NG]7F)M'%H@Mjoq#H^+.XZtnV'Fp4H#Cp?055[?AK3LIt#_if_"9msn[+Z V##)Vq\/_>kM%I.,h=WA4[f+Ds:SVRAoHdn!o:CS;ejT"?@k-Qj*2!#eXpm>9RN&JS$Vn6kq%iOQ-g%Cof=`\A']IGVV%0O O7"qqeBtJ)Ula4Mm*86#QQ\TmkR/!J2G'?6e`)6RVq<DCIf""2bATAYWG.?PrG2ls+VR_b611R>5%*ItWq)P:5pC#X!:K^# 3 E-"eF@s5X9=$AZgJ>>YfZ.iq_C1t^JgZ.4D0]^88G[gsRo+=^/8d3e/(lr ?n4_4EL&%VX Sr$$S]@@d3SfX:\!>FphR;%P$R;TX<bL`VU3FNLfc<0osA, ri$JX-IF0(:A)PWV&sfiFpe8h RWBEeEagm+Vf^pnktf:V&nB;XarEd6A=6VqK<X#4L_3=5_+>>\$q=%mafk\@Z(C]/UZnI9R"P%@0U^_pf`opkp8.>?ZE/c&FVqlGBdYsj*S!Dq^8!K1?%mR:g:PCNqagL8hS"T(l[ ^<\J&9ZN%Q!A<P`C:*^c+RW1#c\_?*bnIY?5-%I;+aG:<Q3Nl)KB/H-YX4;;FR>tVKS;DDkbboAKT'nkBqa!Q90#LJR;..-te=aabVt@40KFBr8h/T`Zr"A-P\1`84Xq\fCl:USe'(eR>dblk.#l^YIH,*rbK4(Ws,:?Gp@d#W0_+.,2aiK5trRDn`KO(+4kemO.lJ?g^bOsWG%D^P5NP*$nf7kFG@%0W]MlP*3.$TO^l 9;i'p#OMAKdl/rKO+jU3F!JBFdg!qXAl,&sR<'o]*p7V[A\k[YNQDr@"oct>!h8eIH.QU5'6L]Vpe*-0XUhJ,5aAkK:6]UlBLMtm5--at  3PA/YT@^D9!E;%cUR(ec5*-SHVfBA]86_b$]gT@ FOG!kf!mUmpI5A\lH,^sN- \%?.W&ReT]TN*jJ;3!Z7GWRW'[\]=Sf%A\GK$6,s#'Hr_ME8DKc,HiE*H@0f*_TC(8^'qCZrbh#.`50hC]9A!oK-Ie?]f^gZD)'sC\JEmKPK]1&6U:A<A=HOlI)9?$ErSj8$\)6.14Xla@eN,lj5+Y;g@A,=V\l7$h&BI];RMK6(__^n\[Z6rLKo`oO\q-p/$q%AqSEB-X"A=:AlDR1lKCHVl.Cgn`4?5C2iS>&g)d"GQDg4PJ,1.ADoP36IMsso$*m!@4"Geh>QM@e]Xd-I"C)D=`:$5An$4sle" +\s/k".M#dG 3T/,,LipcSZYU%c_T&O_/HrO+U,IA.$a+=U GBt4BgpPB`!Fj6< &#:5M<??Qr .T[`"[Ag"O^2!9 DWIQp_/7B.t6+aA/W9DZ($JcYDAf+Ne@iLs\;P`/>C'e0o,0](Clf:Dfg@-F"`ig:*Fr%>HIQ#V/?,t5H4B<Qs^A[l%88+d<$'aMa5Cm.fD9:A=LAHbpA3^M%5=rcnMAA$pCANJ0qYil1jG,8IPA/4.,b/JX#nhUbM%Ynn6S1]*=`pO^q_dlCh7ledKj1HfCU8$"sA=<\K=qKAfEX5+*?liR$rUBh"W`p8m(**5--fCj?,18QS"?h5^iaMZZI'V_qC#JPCr.`&0Q=n8LmNemp1Z32eBO;EACRQWaIaj0(]8dalOb@!NV2!tkfsC?+)\'f783/-1Yn4lV7c*A2Al9*8:;(^JSd#W69%p$jqQKJs8L,fAaCLsOPT5=;+2Sq%VmEA<1h#6J-KpU$0A-aA: $a'NV]oQkk^(f?eAnQV&tVO&a9p)A2Vf.ata,FqU]$KsTf']49JId2t'R"j7jO^$A;?9:0HiEi<scJnpnG] #>i(OrTP'Wc_G#2W[Z^L2AieEf%=k&&-a0f..E8?2gh7=S^-JY'KkHm;ABpb1&)@lZ!eQ*!<JE'>QXs0.qnTHkkO/Vi?pGje3%^0`EVcan>G`Xi XZ>kheR`oFNGZ'#pRA4:^2)Y).tsO%g\YYK#n/sA2gO*?MZLNpFXVcY@R;S:X3;9hHgF"N_JDmYCE,=lPY+"&qkIgr@t!!d?I5bMd^Jg2 IVIA=FRRat.b16dEc7l-V\7^Om3 LQd0I,tsa@fG\sOMF%P-K56+\82XDJ445em!")a.*49s@>W6`j)>]E$[5h+b`-ji*Q! gCZ01k%VIBlCt']*a&1 MUSJ4H+AH(BYE&B$7JT/TLbqtY9%K,r)Bt>XRsk[l]^9G?4p&N2.5J)4#k''Mq(1/FnAeCZrKqWASh/Te^'+p>)O\4rL[hY)F-Y+X,,@"#J2m:a@\Wp-]VbfRRl^$tA\!)('E-KC*[K27;KbY#<ErKh#`RYX>$*l")_lb'NtU:GX%n=[BX`.$6RN5[["/$pA^,H5/*/;WF/H.D'As+N+/6.YYhfjX-a1Cq&?6I48OVR*"tDZ=#p\&cDnbnP6Qe\tN6C_Wt2f@ATsR8]9[0#s*IUI>gA^be#nKI`4fl-]%0qXQ_VM1Q=bY1hn*,3UIBIMOF 3@QLV' )F>H>lfHZs[P[B#bRK_f&d-5_d)Lj\*Z\Q+aI#U)]D9TN'&!pG=<8KScpJ'WjU?^5&-EaU$aU5AK)E5eCIO[TKt[&WPomg8`m=S5j9lUKB$`i G<H;BqtakAhJbX"TK-41AnYEj2\$ID&PhN(sk5e1)Pqmp]!G5nD4h3P<Glm!Z^^!Tji.VZ aj]=1/>K5+qcW/>$FKQ:_rTG6K,#%PWP qM4f'E^ r;<_\Ja4/2Bo%a:f>)Z`X?)]0.V-<-=fkPIe$tRoPdI1M;;d0&1W5T$^dWVn!CX&]b8@:_AbB1c_V+,_:NtkthMM6m#Lh/PR?VEqb&lrCXFC`O!6+`Ce^5Z$s83MeK2`Ho_.jo\25P7ghe3XQb.3bfF&+?#sGeBp,?"*KNh\1XUqdn>j?V*Jm87r%U+ZCAgeF>\(9SPU@GI<<,C_j$QjT<fn]FFBGFPo-WqS"a@&T3!;Nj1qjCkfIa>>J3k +9`RcbX bL!&*U5Il$PjL&^W>:$=FcVZ'qg-8VGg17-o1"B:B@iI$$'h:F$5#We1rS,^onIE?S/@%_gc2oC_ AG-MnMZ b[i ,-#BG_Z/jh0;ebCkqCQef8UO"$M1cW O<tP-WA /US8VJNV<'+/3Q#nWt>"Li;97/9?Z1bH8MaXEi;3aAC4-2dLtfU@_=>R:[Plg8NkJF<4,dXk\WOq)tSP=T^)b#G+18Emjt.eHFst,G`]Xa(GBJ=5;"/>;/=&* +'/3`Ag kI-[i*Z/T2#ZK'>7PaVO,kW[20(nU+(BqGB@AaZ<4:YdG6jUAXB0CVteK!7Tn0Ai&rq_J4>O>siC:?#FqO6n,NY\_;SElXKtLD7WQ+*/J_Q19*]D"H(nP4,IA)p(M\X`or6MdT?RU5d-Mg8ZU)FIn4Jn4A-AF[mQ`f.a`;j[AgZL>3$<:jcnWk/!+Jajgf<F!&Ap:Hl \PE$KX,Jd,bnNr<$>:;p3reF6!Y!$f`&68$COR&#9<["a*O2dkf]hWRNjKkl2K/Ts+m=@TjE2%^/R8ST4?+*cSY`M-+`dNhd+Yn3d9GV_lTP/3#^LKM!h-j#0A-QJ98d8(DL]Zjm?',U=+O5e_$8E)ikIY\Fbc[PLVco8:NYq/dIE`bYm)0@7Y+[DtHrlTd[B?.TYG02neN7s=fF#HrcdSe#B'R\a(X%hKTUN>-Gl]VnX-i)cVVI4'TRSP-X3Y<7,K[KAqoOIWbhWD18$\*^h P]LL4s7*KdbJcX5HSOLjEJ:DY&SA,nZ]\\b&^r41nPIoQ(83k<:K^]oW940AVfHYIb`GW2,C.G9X*tPI;G+RrZ^c65Ar0lp3Ah8Pc2CAgZr=9DBe8X8JmT;7*%Hkb9tWg;Za)Lj(VUX.>9/XbC>GrRHqtJMLA,,Sc7X:<,SPAVF]iC?=7CX3(;Vq es8I8Yjb[(mkYc%"+j!hTkP_c+_K9"KLjHo_2)$^;:;G;B@`!$Q6(W0G Ajb%tsAI'YWIFM;gE2;05=$db5GT#s4i9)bp//Q*GT?EknHAdeND;ABONA]*fV&3'-.@ae oWH0dCS[?5DK!gqA-<H:Y\-sC^D0@'p/J1".ZBB@*(e=/@]1BN"8V:FmRc[pWIb$*]R9\:GYip?cI\]jofDFVk7PI+3DniX.[?m<Srr"[F/00.cCMM2GkTq AA<.FT5I.m<BT\s"I?0eA_G:9,$QsXDANFVD(0r/7!pgpp.qgqe83ZmnT74/%A@b'RWDD0hB]jo"8B&iH!Y\8R.'-AB`d4p-r3k$MoL)XHgsW/SA$0Wp?-fRfc[Lg"Z[Ql_XXdd757DXnED><cSga-r`sHH))obA3YIcNFIKq:<>b)I7!6-&ND<'@i ``I/YqMqY-6d$QCGOR-IiDk93Mbrcrl4K9/mK*5MP>AQZ/m6_EEti,m"A3X [fY0f"(g'ZQgE+@-OBg]=U CN"=?1m5\=&h0C2G3t-a.Zs5i-PaF<qZ?g?C.BjVX tVUNXj;q6;.&'M?hVCfN-G?462IPJtK2DIM4Q6YJ<I(LG8!q8#cM&c]YP9D! 2TSA:ABUnM3[A>1;QnAAZHeio-9sq9\UJ6Gj =W^(rnNgok]mS>Fl3Nb##)6#&DVWKP=+U3C<YC3@p <Kt:N@q@KJhWGdM,acAF0<,nGNAY1jm=XGaDBMcB$RM9E5'd5Z&;KKADHAW*#a.MQ>WfV ;?<?<+cZSpd-+3V6o"!j]p53_b3KQUP0G;72`'5_ipWF59>/k\S*EjnOe,Ep=WhR9ABBiWS-%?F.6NQq>n#6k<*H`8U^B88+:-<(^=T,5qAp.OW%i5ncXA'[#2@72_tArB3s?'6=^faQ[ak/XQ*bqmJQ`C6SLM'E8i^Cl`bp!c^f,_5=t)o;\G*0(U/"m:)1qC+JUdt02caN)oS(WtK$=pB/A%qC6:Y`Y4E[UK5KTG47mePiqFRacGX9,ps\M1sK%@r@S,9fV((ond[)ZL8@.PR_o+2U"J[IADF 3!5sF9>UJIpQn830RX2VfpQ@Nomt_h]R0#pa?lXDjhQ/ZqWjnC2T-5bNF 0E'L#-.t+bNE!:U_W/4Vr?Up<\Kj&/K.p@Zh%PAtdXJNA@F Yp4D55m\p]-[/G2PUo5>haQq/^^Yhl*Tn?Fad=jp,=Xrd_eN&0?8G`n%8X_Eq98n8O$hY=\M,[@n$b32JEBA(9=-AcX'H3BX3\Xs0U'9m\E<a" N>Vs[(4)&$9AkXc:(]HX.dL"Kq_mb^2O"nJ_A/EbFNXT9"pqsZ!FCDZ5sg%jm2fIf2A:_A23WTjS^?P5ei0iq)a/#:jV8C7-r;SP>M\c_coqI`W3ripm6No[M0W+jtcAZsPS+)BdFVh>#lZ7De 'ora7XVAS@c<ZCB2gnd$%''3h05S/#%$'9V_Jm/Y46X^e:boLHs+2kIeF_(s#Q'<DcSm+_reds>AC?CCs35JjCXb9'!AJ-D#2U;MO](q] ?jhTskl#V9O-gZL6!ad&b95Qs-UG5hNWd%a%C5B;Sq;;"A'P^0mJ]F`SfkP2aQ_-@BpC@TBV#DdfO7asSY<qk)1\FHJaZ%A##mkbL(CR AC.0Q!QcHL]E<c\'XY4.94r_YQg`TXt%\[sR+GAAgHF69[2<lW08PrW=g=[b>'!De! ]IT$lSo-c;qS"Vle^I:k(Oc+Y4DD jQrFXATEUlm-3&3Za]Fg<05T&^!YLcL%<))1A&>1N.YT/D+):#!go-,+31P"MC;k0Y'p.<;CoJGo+a@4("!pX=?Y"$0E1Y"%UAZo2_]&KiNjH@*PKmTY@7I$_a)$1cP;OVU>I/$tG3RJ02Hq`X^c\_E<!_i2Agjqr\9?K.O$Jj!_W8A+cgJ$Ln2rB)8J%(Bs@A@iQksIQD<GH,tJ/=ekf]!mOtoW. >MCX8Z4#q,'HF/B&KiY"IQ&p+$@:i )1)3/D"*m=2MHX\CX;mQ+]((C]`%$A*c.L+3<kJE.,V&l<9hT#c,8ArOqM"&6GG3o%?PoJFSA, XdL >7:A!@A\66Fih9VRW5dQD?1'"t;fq3.lrYB@R>7Mc>mpp>g_DZ$Y P@VE)Kr.WAjScRpf:d+af_S39#79BfE00ct?DraP:=\l3hNQ9H9>3_Aq!lD^-ta3GZ\W;Aa7l&s`q1?"F\JOpAR&XW!QR\(AB[ X(t34Rf;1X6_%d4-20)=!4!TO8I.&#nl/#.P\CAYeHJr'c$b5EJio^a:e%m4tqZ$3H?%5r)E$$5q98rtU2-QbqXsLmFA`q7AjqOIK&X19N#beR,]q1>^?(Y*8rMhthFT1qsjsTCGFJ6BPpJCVZlWC>hpKt ljt#+1DDZGV@eci,19LnK]j1sA!g>f9G>&rDQVA7m#Y8a6;1q8f:3Gk(VAc8gKXh"f.YXb0(H4mc%#?sCOh#jX!/Qa^c]5@*NsC0.;WIMJ2l W/h2FWr'Vq),Jqd:Zi$:!eDaC'k;qG$1>?Fiq_hSJd."BEJFU!QIQ4TMd%A"5rD-*WAmR_BF;866s-s&Ns'D)fQKWrp%9Vq27(fZrjKA;3"NWDl_]WD+_Ji?lY"D%LRZD*7m+:(0UqS`3WA:;AG=rKUii]qJH[$+Cm2c;?te5#YeIjm@V@(E$U0XQ^-Tq/*V*@hniBiK$M&"3T1]hV3""q@-^7d+D! Ai5D;=k_t]Coa&&4UesRq(3!S5XQB"j`0B`VIhBt&O9q+YUUn1f'1GdoKk"H$&[A:bst\+V%FX1,,]m>k%b!ZY!]mk6Aq0aKQfmbA?`-"@&_si2ig^KnJA2i4)5RR.m[1C?IlAj6X cQl5)OGp`SE>4&=%)ofQ;O9.0r=.pcm)HY*AMC86%"g`2_Ac2n\?AbA^p<c8MX*#*d[SL5N9XSpP]O)JFk,1@P[d3EV44.@RF*^Z;8c3KFKg`iYA-G_K^a*PcbN`GT4O8"n46?A51lACJRdmbsNY4Pae1tV5+3L^ZH!,*4<9Dre!AhUb)T:f/aT_'hI9!S7ZE(->]!,HKj>g5scI[8cL<f/$?!+5iQVL'_^D6WMXg6fE\ImXTjY2pN.aH-Q0A=/SNO'$F6%#B/;AgcWY%cP?14a`l=2BoFZk<`2<q'+$Pq=OfF?s'e'<*@C$g7rm/0PjrW4<e6N;..#W\X*@s>R# 9snT;^9-*N3BG"rq q#R;K<`"eb+`<"ie:@sA:89MVQ8N4>2 9\UKt':KMRV7!>j*Hs++aWcJOOSg5'QcFV%E7"_I`k^G:Rj"rH.42PQ1fF00L`f#pJsroLXFr._M2]-J] ]gPaAi#BA7M8iB1e5P-FSl4j!GTk'*U3PnPZ)lQrQ\Z:EUS:9i3G87PB. GMM?=jr]=D4BWeWWtd.Zp$1AbeSIAr-+-GJDZr)g: k4EOKl8g?EMYqAmK0U&N7ogmHC!A,]kGG<>!qThP`+3n7Htdn9$#*WKPDe"Z@7?MH$6AiiS[RMsq+$2f-3:U7N"X"(<OHg':MM <BWW%-pZA6 pn^8?Ao?Z`OO`/XM/f1OJ#VRP0Ke[>1I?\="i]1(-JmY%Z/4%)^AMdk/n%=`%Nom@Nd13#(YD`(k0FJrFiI:a.F4#N]U2WcD@rkT!G:^k3t4DC'Rm8C]L`ND$q]WYe[5$m[4H(%CBe7.;,t+ArTW!=lTKmjn!i=_4#BdX(ds1#1Gok3GC;DcO5WF:7@BT7[Cqgo> B04p'1LQYEYR50PSsAE@1hi(*fjocJ@L),e>[3AC:_ )0G))l6p?05M\n178XlA#h^lpG)>\]DS^_D&mULnqgJJ:2^$AVa_lIFh^dkAPo[gt?C'B+H>,1`_gboi-M* 5 30GeG$5B*s:l:O(ZBeUt!.o#'^86\%Vjdb!0tQj5Y,IJ"(qUD(Apj-s3[2I]fM41Wj/#c@?HrT+J=/WlpU\>GUj]so=jIqEV5BA13AD"d#8hl[#^2siAb'[&!F_(fRH&%h@NAm3,>XE4&O4>f6IP\6%nTrb De@EYF,/Wbs<dB-r=](81:nZT6%EFam7fP pQ<\U]-neKhi2HQntoU_IHXWbAZD0EI%&`i4Yg,C'd67Oke%iooo[b1,Li30efA6r6sR9GM*r#06@81lGe(&+ R]@Mes05A?]SL2W9o_QDO,.nBjHri+(CC?ILngU?%t"/RdCUZ2<1RroLqSKk4E)+&1IXeCTl]H%mhW#EI5)c+Wc.GHI]cHh8Kkt5!mUpV9$to'0&m95/c>3%@p@E$YHQE&Y`Y_D)g2@'/$4]p/Q#N)$XnatM:<:HrPg6&F7doN,Nen+bHtE4lJN&QT9fl^<G5&h47X';`9=/<)(\s,-p,3tjf(=37aR<n#R+!*(e>P>W3j&Y&Q!kra_M9[192YW>R`!U^!AS^62KE;=[AVrPK:i^_9] A)mU%SMngF]GZ D-Y+5S@HbHp'X^@*/V@QcU*>iA1dgK e<""`R^YY^):rZ!#9#=ApYQo.EkJ4.*EXcjqmS*:kQD6gYdi$.HBnsCni^5rq+SY\0hC=:t:UH,Ns9+V&k,Wt$DpSF!3HfAM^fI+-A*"9!=G^+hL9R@4XRG3S21VGfZ5a  c!(:r+c7iel3<l.?HpmOQnKSC*]5j4H2g$%qk>(ZAIbQR>L1BsVnn3L :/Blg))[-&h`&IR6YGFCXBKV&G0)@AGTW7QY'$XC0/U<(W&*<nT9P_1/bWVDS)q@^?p\T&rQ:.WW-2VB'ms2(0L(dYMXM7N*nt?^EXqdZi;Wne9_t2`&Un&_'N)%n]\`Y6?PM^]GjL.^+1q&?+Dcq>1R'&Ynk*A8Gnl]3j20ReM1';6T_BK*9h@bB k83;"nq%n]FqqBQp'?QD4>6oSb[4dm.:+@YRTFMABYW3eI.@<D@,'o##hVAP?B*kMl@cY+AshCHMM4UClWn#O4tpgtU1a*"<OgO\Ora^o2oU`0k38E\V,Rf 8b&lg,TA%>5 cJ_Zt8E9640;C9AkBabe.0nZ%VYBYF8;kl`9^^`aF7Hg''q#]bcXKV#_@pj:)Pla@+ +!6A*no]^?$V8($RlJc]jXWj6]=9M61r*f;btbBT*(N5Fl<[ON+'4#X'hsGeGWpML5&6XAIGWjsRs]B]8'IU#-m$7i-RQ2HA)Mq<dd>NU?/b_KnsYSD7=8>[d.)*SQs^K/"+n1Qe2B`R/+oK/3Yb@C.1O,>kG9d%=D8A2'aG6Ln-P7nrtoBE_;t$aA#OSj/#dJc=[X9D#^-#i1:\rjR3A*5aZC"2"bQ$otgkEkES\MoU%*H'/i;T:`f(n8:\;"NGRgBQA<.Z@,7a3R=;gZA9MAV6!KOH:=tKWJn)fdoSC=ttJ*N,B5E'_L1?Wdl-M]bGpf@J6'[gRU,Y#edf=T[($O:22\<QSt[,8TOA6Hk4qB>4Mo(Zn1E85:6lg4+"%K3`PtR9*R$tlCJ7FJ,bcbd^T_N/nA!7*QO\0t7JPSrlf:-Tn=q`S7WXG1;7d!N7a5<_6Ul`M_eTE0eZbfb0%E2])7@P3hIDkf\S>#i$Z*[p5>s@ZV9\q=n_O/Y)G;*^S>?/NXM"t$M@t'am*M92] \VEX]^G_XTfDKL4\tA\Ls)J;%:1E%A@8d>&[Am,:Ke\'8kZ$=(+K[s<STch@rHkqO7h.PE[s!nf_Yp% s$7RK4Jan:"Gr3f?nSAD%^LXS7]RIO;VA%o8KAftsO4HE(/nZcZ69k#n`F49Oap(K/C=tP<9o%./Y=S?ENV9N"?Uk50BNnNUA_=]FHA4&-n34/o,B]%G6s$h[0X$ f-/1N[@=P.#PiQ5hb"* T(\LZ\R*+9.3OEi,E$M7_s.#h$UjV)Z[?PPHT4Cs^>)R]rj2X'!ID"#VO.>Zh?L"0o-=pY^!=BRMA5g#ZK8-/:ab3:mo/F6tL-jb-N)At;%VA["R. _o/Wb#;5C_(NbF1F"G<a4IINL+I3p>4N(oh`^$18bA8aL`8[>38W#c6t/dcEkQUoZ0+S;LS;G]a\>"QKCsKT4pG6dTVg@kPTZ6F$&*/p[c+n,XeFd6YiaE 'a'n;C@t".Bt*Z?Tb8A`2sErW0CXD:rQ&B?]Ys)+`St-2ikC#D<I!;ah@a=t7E H/^pR!=D4%K&l;nZ_+`g)s%?K;C+^NTL6@F(5Kb)=B(>D5$!LORR*jHj^^eWiQV9GF*EYnr.:RT<+^oe^,DR XQbpF\JalQlP2%"%Q9U(?XX&ER:`cSLl9-%78O7Ol3BJ")P![X?J?))$2T+395?;<kl$ponP-U+QoLVARdA$?eQ7T)l 5(-A$p(V7BE\3Od/+&U0<N8[3FZmH)E]:N3L]Yh/h]maKhD@7=-C!f1%rY"An@X"V(='5_:7M/\sD,+saTiZ?Z's`+Xf[MkM'eQ\A^,RJOh??:.g+m3lj1*Cj'U3\@X=o,mN)]8m_Bj&75@&ZeRtR?DoV2;Vh($W9'_Gg1h_Q)(/=<p_2rJ&C)^g,oq(20sh.-`i_MZm-Wt` p0$P*'9Ti%l#oNnI\YrtB$?/3.ke#Fht-$m;`phQcSNMAAj+h\fYbMF]*AWAMl.A?),A`,+nk'6^]+P9,d>[A5/<4kqm<(tKhPGf*D.O2$-J4t2p%5OFiQ8Y^1#b,5gNYWsb"e/t[`JqnF(*D+5aD2!_"bGEcQe1V+>BARj*J:t*MR3.\WiSNg8BP53,knAVNm3:pD)pAh.JmTnC$A1pZ_$9VOs^[n26PgFb5g2 WWr1^#5TJ)F,2=R%KSp2J!)?Q_b$s9/?lNTAr4kb:/Zq'/%o%K>Kt1$=L5Nroj9_[3;.;+ld=j/a0kq.LE!Nfn?[E<UZ?3f)(1DA/>N!1@:GsOElhN0=qVR=ik0I<"r/m^ZVk]3.Ee_3;aACWCV<;0thiq]hCj\^3/eC*<Bj:\.*EAXs^C&V26/WA"b(`m!Q!^fJb`Y[sT_;EY6VJjN0jFR^?KGXWYE&.PNiAo(l4Yj3Cp6QbAl &S1frIP3lPta1'^bM$t_j$H(c3br&C)>8Y^$RKh7^lBMQR4<C^MQ5RdO`djRIcbnccbY7a4IU@2%3!M9[6CtHIQV?&c2fZNkCnF0=dlqQ.l8i(c0p.1J?q>0A/7P8Ap>4P).gUKPe2XD,f7pAV:<goLf[JFeEED/H,<Zs%VRV,O..7=<SS[/1gZE*3 U=t"1KNt'%W7]n12+C_Eb*2"]'oOiU3q::&AUbZA[B#>aL^d,Un`am8iJ"+o;p7<(VIN"&q#g's^eQ1lj<G?q597IW]g4! F=<7I5A[,H@5GlLT"4dslHt[^Bl=0fT\3sY Bs",^K.Tdcj1H#Mfb\@cb33pkjI<'\dAk(se592g-ja-6Qj^(PgBUL"iSGhm/-rs1eaG2meF%8i7K0besQKi_?L@54>k&LhTHR!F<92ZQDZg0"GKS&9^eE]$7-fad<V.48j)\=Vn[pl66',-7VMi2!_ "!3ED)fsrd_KD-RebP^>7aLC=2]h.4IT'`XR(_s&#NEptr_h5^>5[2Ff@O[gNZ?\Pe5k/AO?MlfOmHL&jk`ei$r9hArAXg."tEV1+1F^e6WeTS`MjTM4;/SpUt,0&O+V'41Mr%%<nX+!H!Dr=<GB12^olgI4BdbG-lj5#+;qV!TA(7dnFO@?#fd',Dmc9g_U: UcKOXfsJfU[_Mq I\W&,1j'GSsJADDAeSAAn&-ct&YE_Fh'GFl4tl#SE)#qf"#\dW<o/6s`XWfl;0*0j[?T7'rdhGh>ZI^t#gk]<]Q[Epr$VD^00[rP5lA'OdjbU;R1FO&*#)e _"N/8KCEOi)QAWFl<fl EAiOc7]:JKSgSACs5aZY48\Qn)BIT#^^W4E:ls#%X4bT<_`<Zt+p]%aTf?(k,MdoMaKH!rV0"/e5=_]rAMhC)fL&VkPNc8Me`;Mh@,(;.Gc(?P9$LqWrWq0@n8IY/$A]mVG#;\)Aq';\?8-@W*i-D_U)*<Pr!IWri;2J%k&UAL"k1qQ< F!NbLt?PE4/.k@A'$qV7nrh:;+G3b6T>;47_k<[$PA0RR c)HAB52JL<.+TLp,!2sJ;,UGVk&L#ii!4V\mibWR3Kr6hp5;e0<h^W36s,-UmJ)#a5pGJ&*dSNkNW]h(7E]2btEqDX,Qc@9Q*9,d)c#kIZO!X-hH[>>]d.X<d2C'GkH'$?7X+K7)SHl=^],S?dbX@PA)SFVoI'jG8sM@O^\@c;F6\3Bht)lm&s#A]9'6$tjXNNWM-9a,2.p)O4Q65;2ddWA]?V'D\"4qgnmA9-4\-fQOI8g -<.]QL7kiAtUf GBU2K)jG!e3Pb3G-m1hFt%![VD:K]g:WU#qAWAD93O/6H[&8i'gt/sRIrq6%aCBTc!:WK;bm>HrKMg2*#IWT@6#f$JR>W6U#BnqeJC.iTePg9A?T<B(&T]916Aotfn/B$XBV3_D:a-KMhL#(BGq4&r^ZDnh2EtNcH>E>cfs1Ip_N#L$s`X9aJN\Y^,2CTllW%8#r@MdsB^@7&MEfCMpFW0Ze#2!:S0Xm< +S]6lCPk9L%AFM$<.l:V5hF+eXAt]i>%)C8:VA&QY]q]Wg[;@G21#mO*5"3ACRjDO24Iq6pf8e,L_JWU )8_'+&O[Li'IJ#<&(IUnHnhP=7q)kf`2lX4m;fFq0!>)p9k+]a2 c*:'^,>BWlUD^W-63A_O&[X]Zkp\da)r !WRk#[j2H)N^4Ss>k3lVpgiLM&iSe\L4Tt"oO>s=Np)UqfAY/>LPQ[r0L=?5lMWFS03H,XM2J, VN,%O5&2@msR\m<(%sDJDPAT,`BL%sk@a9.sq7*]A;n5iHY^?&ail;J?0**s.^'C].*sAKEn4[D\Qg.gSg>jXFfX!%6AhFBt@mM<^8jOr#Ni6#U:icok"'d!r>k(=E*5sQkX?^<^-4R)X:!a%RCGU83k"P*:eNCCBfLjtA],k&+;oKc D+`slC\c.m8?LVsKJ*2Jc@%FC9Ql1*o%>!2hFqnKD0cUh/Ap(_4a!AYA(gf6?;L0"H0R#AM(Lr6%q@A#qfDo8CaZppKbk:H,UJepP'L" V2pLY@eNG`;m!OQIE!^?RABSLA_pb.FbkNoA%m_'F+;^&AfA@m]<*J?*!$NPKr-=Ze%kRU\gkn^gDsB0_?mk`QFsHcpJt3n;A`.0.5)5Y8NttXQ;]LW G7 b_3,XhdYA0Ak1Go[1j0K!-rC7X+1o;;U<UF+c]GFkWifp/XqsRTQI5PeL_1`b2]?(gs4b2PIBj8H$Yf?mq1fBcB3W8^#d90a2jCkkW&].9Dpt*)&3`=L_`i0UsGc8JEBe[HKqWJ?da9A)*hm%Mg!ajr[tm_V\/PK"A)4?>*9iOWE,C*tkLVZ^U-olaeOWM?hkQ2mFLlOfV5Zqt16o&,C;<p\4sI.a3-1n%"O%;]?@&&Kkb'do] r,Who/hQU<`Jt ^N0?d)pHl2h+,,m3o7\4k/3]4f78Y(=N"JRm0V1U``6Y&rBfgfp?'FG!bIFZ"_GefA#O.b>(LlZbfmbX N0q/c[@WlhTsEsWG&2CO=@@EFWdL0sAsk99o## L;[f4-*IE>7;%X+:A@F(=-U6o>nQ<#./LV7FPb1q!6aROsa[[\@I_F:/<2(7^TAL&8@Zo0ZcR.YO<+'cniABAE'>O:"iq<j/-P-VV8+7JALk;f<Qj:r9cA<"8IC?X"hrkV75ca$;EaA<_?`LFUiT(U2U)'iFY.2F2<'^;rcU'^[1MjdIs8pKcW\fo!<=8rj538'K.6"UAsU)IZHEs/G19N&;U!(ir@,0BK`O3@7\h !X%])j;R!h_A(NhhZQ4g%<"L=8A?6A'_Z!I= WDY4;>>^=5NB-'3375YdeL hRCPe7UQXLT*bcA[13a"@.TWjViKS^rVT.L.`-o iS$!Mq?+K,Q1Y %%94)m#8g*i QMdVkn5X*1=`$&8jNs?/isc=b,*SAmYT/^MWA]"@SM(*Je7G;7QtkMfCo416#Pl:.*NkfSc]T5J;)!ff0m&q]`7,nC`<t,>bMbpUn\)o7=LTfP-c%mTPhm:?*![U*"5((V?8,FC"$3JL]7n`.>D$AjCl?Y/E%%C,+<A[/;.9XpXCUb5G=n@Y(BjcjpQXUO^XYMqKb\#W-C$I]#n>s7\Ee&Ch]^ m-tmoYADoQX6Vj>df&lXVCc?cSBbdCXEVaI-2;72PVG b$RB*iBT]\O!)tOH&fTbp&LMY bAB$3,m$SO;@Y>&>"SqE=%gLQKq@9A<%'_f6r-9V]`eC,9`!"6cEn:@e*W%tI Oh?Y[j9Hj#"t@.#K5tC<rGR=lk;8)rk]BpIS(8MCRU1c<=oq%&ra-'kPVc(PdFK5,ES"@(_I*)8\"M7q^EeWflUAVCDN\M'MJsTZi+kXPX)43T$.aW)-s<:#o8*Hq0MD1=8B23Sj:>J]<f"bYDo]":.r;'L/[WUbel>qVVVN #A0/A#CMrgb2'slW>-"Wr4Dog[$-j]eo"P=Q-cEVCE.4_/'QR8rT+8=8T2h,A@'N/\NL8[.2K0T\;OS A?(AS&o2:S_TK+b`G:bm:e$N)[G=# ,@;\KF:R$&`D];41=E9aA)c b[Ks+K9l,3r>lZ%LBs`(AciUpF;A*$"^Nt-i,F6d22+Qmrk9@s 37'LPk3-dHad;Efkq$nJnleeLA$YIASX_A2)bq15M'1l@W@=7Y&`bqfWO,.6p/pj2^8/=C._bId'"Td*Slpmo9JY\@Fj7L"(YN!jAON2CAtF`:rDtL9b3JQ7\ZmWZ1LnD9bi,liT0(e HY);_0Re%r1=X(>fl\9`[Pb_.O)3TeSDF`+h@nQfA#3SZa@__C#65Gc]$6J2R(sB<T(r=cUZVSQ'1&W_r;I:-G8Ero67>FAj)A+VL/"LGY5Par[F^bo!m=sbTY-&aa^;LeRCD=*JNrF0H[p0U1*U@TL=2]O.?iK6,$T<`@/;Gme^Wd$\<S:W0S=//-Q@mim^i:JJ[7X$ '6Q5=jnGC/53rr[[mTgo<^:;Q)%PTYcli lDS_&F&"GRFhV?B<:&/\T5>nN_/b8Ra]pZFeq#g,t?D-V]M5$II3(^b.17rU:(%_I!:.j3M1oooLBP2SW#(V 3I$9CVNU4mdl1JpP&oq>,`mJJiIf5e@\H8i@IcP@8hk3]O,\;43*k4?'q=12)$A6>0DB:7B_el#XZ!U.5C ;/?r:qM]QbO3I&gA%#.RncpfG@UGTU#M) /SH+"r;Z(mJ+M?i]*CcU#dkn"J<4510Z.$YcW=Gs#0N:7"?0X9(C$7,ZITf$-1<(YV)i/8it@m;42Hs1b1JZnMC[t!4YHU] @*!q^3l;bhQ<F&4^(,TTgn]IP:Oo9.=?>sQGolboCh0L3oO\a%,eHENt Qc&R24M$j0Q;C?b^=I5=A.ZA>_j^GY]Ds/D"E(jRf@p%dkE13&O8AY;^Or,!?IYUH%F5k<BGsdi8df)7&IeBj@Dl+:`fgYdo(4A_A184r9q8M3&$Pf/S6RiOK<ThnJ<KN[CT@MLDJsJHN`D(+P1lMaDc=igP)Q"^dESaAE9I9>_oCpY#A"FUWrD*R9Gb8iJ<p0:p27%XHpYNSL7b#?KfJW5 $STpEPQ_9N!_P!3GT#l!rlA2o$D/U V;H3sQQAb&0c4\A902=W8ZYX*_a3hqL-5PiKYPp-)R^`2I!'\]_jgBKF!PeprB]:,EAd>Oi'HAKZ/2:RYhToX!qIIp+`N\Y2K@ e,_3i;9f.LMmEO.Pg\6b$tjF5[sQkd,3XHK&HqVJJZ'tUcnK\gikacet-hQlH&pPAD5TM[ehBh;)LL1[#-O.e@-*DWq?7aeI2;LG%UF;_QL*osn[`TKs^EqZ8oaE'#:p23P7h>HBHd`#R;9&tS(& qKeFHh<$4'tG!TdG[L*-';>+RKU J$aNR2O=E3I'B__9B_\BFolMh"6O\4rPq$f[mXa5'3Tj4I!5t!D1L4dF'rbiUdtFj!(*fhh`L27$# PA-YsF*:&o+7%AoK!"T^<,Jrp"WIr8BAB\mEBeg,&GUT"$k!0(Etj:T=J8W%G"1YGqYD9]P&nkg2-887t@d^XhEJGPa3RAeiAi9,Pqobp+Xi<0)h)BgrOI&YW=R(J^;;_HiX_f]aY_=8@ZQ4-&AKh<q!1-tspf-U6At:!pAGO"_$b_c3coN49QQ0+ZA.lcM\PqW!$GY-/XB9a$S<P&FXjFFdOY!d)10".lDKA1[\,5rP)h!Am?f^9iTiKBZmA`2kEUVrPFdK@]DspH-X7?0P#%RrH6O+E57JR:-t7iN!A?'7'L7ZelsZ=QA^WGa&&kC/;D9U,"9fGW1kf5@,FW:[b*QiZotGbY5(92X-mp7<+;ES&(::;M]$q4D4q=gWH"`80(f;MVO%hi/p%kl1< MM)BmWa0-BKtVt2\&Ec'KNqd5BnhZQ1Va,5Kn3Wneb79Hl^fd5/4)k?&7I\ods`$"A6d]cK-(,mo/<)5Ljii&jOH,AUKI0GHfkTp"Y`.?th.`eCEA<,@RLmsD1!I!oApIl?+W,G87jU\\qN:;Q,T"e&An;mI@Gt!4hMEEF&N1rIQh.<MKb!3r\CtGT,4J)E`P&gp8;#abj,gCc$H@2]j3=H5W!/li:UZ/7B\IeIH^9a^J*9tM@rgce,HO7%>*RjZB`rL"'rMnZf';Lr+M/E='=$VCmB)O@o6T/I\!Ut]RVCEM>o99`ZHXp]81FIsqa_9ab:+Z5-MK2ob:"c16C [p.IfG",\GPUBAA1JROjW.c,UpK,Dn`'@co6AQfr*G?TN6kgCX^dI]>Tb/H!IUWA`nU@Neb05D0$HQS_I`'Y4!dTl-&:p,+CX^MMj\W@1TC05 icp/=Ce*YVqS+ZHb)Xr*U[2(oN.=CVI[H"!;diAT_Fi!M)L&G1p_lR0NFYj.aB@@_X`sB YX*J9.9'r-`6C%ijb `F]Z6nAL1X/nUas1A%b<C]75JGGe,q",DDncIIX/AM2=,rC6YCZ>M@Ah3Bj]d@ ;"8DcC%orqsAT7)h9\QA=i4A+Ka)6sG,SY6X6qh)h$&*kLL^#&;ei!;()!dVO;>19A\iZajHd;Ihk)I?$orOYU:scq3Tr@<+&$E53g#k@;5WISMGOicsOOq_6n1D_Op4F:7l#&EhbpQO(r6r8aJlJL' d**Nrr/\bfhpI`Q_>q/-3M)!8b<seDU $<4 []6";L]3Og $D5SeY[d/Fam*c1e9g]qA:Yd<>+ZZSF[Y?: d6ri::(sr%,.L,8L;9O'A5cq`KI!$9b0O7A"\:Nc@3LYA"eK3UgBJ/$\j%OI_/D9<;2m.DUr`q4#eleY"&UL!AI:=8'g__)"r#:VN3D1#<CbE+^0Zb%8LF>Y82mEA+Ubb-\g,AaIN&>?POXk#(94`fIfA%eQG!(A(;Z2`mFGs]c\qmY:nXY6#k5"<7bO]>5A?(m?^3PQ^C<>Yd;WUFRs8@D\C9l+ GS9RC/+R2VWe!Q3*X5=_sj!3PDC=A-?"SM_`CG#^.j-E'R:7:MVMW(TL>lO?l=M*4rgI>:052*0b%m[dOD"f AI.3n.MjLTn8K:aDTH@mTqP'qQ.HaB=?6L?b!ma5:"A=3tY5C)VRF'GIdIEa3JCfKWnQQB&^e7BUcZ>Q7?WIP.2dP)Q#T^gd0&AS-<EX<:^Q?j0(W2#REkN2PO^hGcsBpR4jkrnC0,r6(XTZ b>0L<&@4\LB6pcW "VsjB@0LPLMr 7qm]ga1--#P3;Wf4FVM;l]I)[Vk_^:IiW+KG>(,SZr8_%Apa0>SUBh<AOT:YAio-E:q1Qr70gp%.i<*i=k poXAGj8qNT>AW>VY^3\JA\$$3;<KZ$4.saZo8V[QCi=rn.`hE?$?5]lO$h+`0]tBa,dW40!'s-!U+<A1"VH`UJ'_7A=$9*$QNo3.im_qr[5e[W3A62TIJZF%6>RfIph*9P\MbY9llP22N1%o^Y3oU2D#k)*W DCN&1$Y"@TSp+`4c7 ]t0mh(IG"/AXZ,D?i?_-%l.I#g(^#Z3%g'RmB4Us-T`G]:Xo'rE5ZDGl5@</<6WW$M%$DTW>,]3crC8_:^HQF6h>'d&!%3['k9O&gFO)r7SEe=_]=6Oe;DaK:XZ%VNm$o%'>;M>q5qH8@DFFXFU^t@O8DQP-J*'"Fj+N4Ur5[?7h6).#+C[:J6I39>Di2CF+] Po"7*"Ih";gA/nhPJ>EP_9"?e*m3*UUAT##9i7eS%T[/m$%Ab, S^!^X+DI!tf?cN9@!t*L[2NWOZPRfc=Ao E$AH=\("8?oe15>dT!B]T<ro+:pAF1`'aH;XMr*$U;)p%9P 'Hkj?04I]Sl;j&$(,S3IC8,\($%XND-+i IPY]DU7<HN>s`@PBo?d-bfZ")mOi#$b2J.A3A]Pk<c;`eYeeN7\kR4>RtQRo Fb6G4YJcKB2-i^Nn^t7)dTOdC:/WlkR1IoW)GF^*:=YFA>tV94KRfkYnFApkbb^qLqkc`7 K5m4"$[\RIIYJ-Ys&Qd;P&RmL;fs#2=V1LQtJ[K10n+DYg&ig <Z;* d48"QsJ>eYsEbt4EjPP@FsaUjs+^[<i4)Kh$2fe(`<n>b%GqZr_+lgQ>idEMS+7'V)&=nn_m1%=G6ZLP8XTP'rNo>%[Qb4/4seF[&XSB\EVi%!S^SrtiD:ARnB?3As,%L@_OFhm"@:'/=N*A*OMESS2@M1VpaXTd_ISUSkUO@ERQE`SaJm%V0BX$#WAd9Q>_#-]SV'[:N_,_W4/BeP(<,PA+A#lQc+RF![]Xs#_A1Y3Dl_A3,kXp@OXT(i'M>m: +#b9+IMkJ?!94+p`q0Gb<M341AoM\MbiCE6La`EcY'[j'= =k2>rae#sVoTXtlbmSFt(^3P,?jpp#&H/f#9T8m]UJKYT"N?,36RDGgl(5i#j]% # XE>6l$5^?0&kKs&9h#P-G?$q1NMUXc"Y[Re</N>1,K%2RI3D q8?7J6lbG(>jT??)E/F8%MmcChF3Ie1M*d21#"p0K<UMZ/)o[8A5qd=Xeb`3feWnq@2P0a3JZjdIIJ[;D7DLYK-a1KF&Nkas?7JI_GYrK#aiX`P/sm%a6@#+2 BpJ:&D4D)GI.75Hb^MA #"HRlLD7em,F[bXDP.qXNo,`.+%YMNl PKmbRE=Cs5btD(M^QOP#Ka+r<t]Sb%Pd2So',(=Kh@\_(s@T4H'7(X2Ss$N9g7(CjD)nd4f8)^RS#:J7L=ONW'=1(EDk#t\-*fYQL#S$BE*M%g:XWfFKmX9.c=d2p7A/m:g/9]\SqUb:p`p(TX(Uqq22R`1AQA$571UcW7EJ2G&,%2Bm;K=bb<Jd`3YHV-A3h^dK&F$D[[.'W>n0:Hp?8/XRF! $T%-<0f8c&+DV[ZCV#+;a\c'<+^"e`A8+9!<03(<FU"Ih]^.m8SAAI4gP#Cd4!onbCf]W)678X$(TY"C0=VNoZn_4%\4 #J-j`+:M[!LF)feC)R_1QWW4G'0`C?X9TAB%A2h@P<NhDD`%R&*ak3-(D:%-UdZtdCBE9C1ba]'A$AZI05+N>F">XTr0,hl-+pZlKA(9a\pCQt:QtMjYYman]4nI]-kD@`kO<D#+'42ij=b"N1]!?*-XAEZh0;3"n,.'E,-2RDKfs*$*pVVY=AmkCl-6)GJZ1,O@lBL W6Fm&"J>VZX3o_`Jp#@\q+hJ58LT_8El&d8%.%=o(,!f&"Kca$Q),D<F>BJ)_FNob(/BD5Jp36H;.R-.(fE(^HXHeUbJkN>X(Rb-<o!=P5l,68d'GSP.OGs3O$VL5?a)]XS.5"j!CX'=A2*O.K(3Oe^AU3Q?9Ft0Llm1Q;%"4HM; i,@h':b0Xo/icY,ZV1"W B .c2gXih(2%D(Lapp>V!k1Lth&XdW-Afg$@0'@[bcA52+=at?gA;4W#354d(Un]l`=+^_'P+$pA/4&STiO&>t5#`HJs4V%DTF@j<D]H>/.6Z&U<Kjt"Zc:n`0l^#]OBhO1]jInmJC:+nktQ$d,bK'NBbgX9`)A<ZrZ 9c_#f@*MA$DMjD;qL@(^.:$U+!EkPWp+j=jAMjAFn_+t!V!d_ Fa%Aj"6eX p28I4Qf$FkAfr=X3s3*C`Pr-^re=d%qRsqE;rn4fk[m0e4Q`+kX)&I._QTN\^pV(0:TZ&i0-d;(m,_=:kYtm$^[#g$06k(l+()tKN19A#!QTZ>>'*[da%>TkjrnN3\L.PS>_g#)Gk&fXrqGUJBo+7"oS?U5UeE<n6NGTUJJ3`1O:JKBdYL+oC _`^>'@A;Jo`;r9Tc#)Lep\scM8$--A2C(K?SZlXU0E1M00AJY+?XnhSE7WU?L l!S98_XBA3$ddc"!rtUcTDOe9i;hk1R+`k(?a@K1lmZk5:eSAo*Q&n>Xq`<`I/sg*Lg"=Q_VO0=pgqX1+C^rO-.V& :>rMD^#Mc@-!f--t:KR+ 3mC%[4&nS5!sC`E_W,]A?,5CDSA]i)k0pj`_ne@Fi9GJt6q-d]jJ-jh4PhR(KbmDgL9p:KakJZ8C"?iso4%Ar\5[!Um,i)lgXB6Xl-*U8V.Nm<%7Esb_`?*7tTMqhjqTGh0.3[%6+@dC8W+h(WlR2;27:a6_O$5k3gQ_o(bh*IE0H#E<PskG]_#m1?XG_+L10:\U#Ra7,63fKDN#k'pfYc*/[F*UE;dV j*UkX#`(jJ2@n</rWokA5L'ldIqt/[eAC80j*eV8!2,.]>).4b>9bm<%NEI?N#&GDtP6&N>cV3.C"_:O.=!23?!?3LHd5m> NRA)lO3c/d+\1A[QB`C\2U+57OpQEqSc&]@0AX!d,\l: H sm<"j5'(d"[gHX0Y"O#`$hU0mOkh01NWt90&rAen9)NA.io5lq#do>peBfp<>A1BeiI])fe!\R)]1\"OC?0Fc0k 409tn!#qQe&N@*E[FLpPR3HoY,PS,%8A9_iD@Znc! Kp*gIhA*nI"`>;Tsr;g[R`1cmt6"Fj/Pd#gLr!=8XR\:Amg \")eFXG <]JL +Jg@`MOD7a9Xe)C0oZPBE4ikP.QT;AD=3#d^t3sn1il3qM)!2>\c$\12;!jkUftXecD"IYgtr#2tR*Apj$rRT:*-nUY5]T6o*fFNMq#I :.G\ah3sq#`<QX_A()=YA$;/ 2,C43YL:Gn()AlA_5L(Ap0WpptgOlcVD6B%\Ja,4jCg?5\PsO\m?%bNtAnK&!$dR$&kS]3$S34A]Gc!qFE q o1BM_IsZWE875i"VIQES:tND# Vf<hG%TCMBWfmhtI35$A^]q49nrbnaNaomsDpZ<E@jX&so_PKS*_)UbsFT\4A\@NNkbN"_KA7Qt7LU-TEte!XE?iAL_ft_;,rT<cJ+)U<j^TL%&!Qac,@ET$i%>7mT[;2tW;U(Ps(ohs8?SStj13fULZHM2&-[9#/q V\fifI8dq+Bj:NCjEpYa\g<1ef3>cBJ\%8aklFb4V<-A3CW/+L#C(9r+XoaMar ,C:t>6r[KYT3XC#5Sr1oVW=q'/@58a@qD!/X)A<dg@Vp7"<>A_LrB?Z^V?j=WD)1%eD7=o9)Wti:`O3"]r+>KjIK/K+3+ki11&ciJ4+p>`lA#]p[int1`QTg1/K"d:CmhYG,h&@F3#>h2dZ$kYPIT#Bmq6m4f,seO5R)Xf1Fiq$Dk[kHP74lN>^mapAnWrCiSb4HsUYAYr++\**e-%UVjtfP1S7$K)Y6 0 &;OPXKY]L6Npc>SRHd`?n#A4O3[:tCR4*!;&DQ5c0`lL!])^HsDUJ #<t@+c+TaG6ScmA="V/,X%g?gTs'`acLUe.`kJ9!*$=!YSjjk2<=@ZDLlP4sEJd<T?dhb\%!B->&ZO1bXbe1$:q;/Cf:bH57oUE]H%iJ6H8^em60DEeAX6WjXhV0_MiJ]tdAaH<oh,]j+P"R^e0]sJ$AWa?XnK]hGCsYgk,ht]_7])nX2*,+?3eSeA0L^.OUq6&F^[IdF%I#ofm;/J^W#N6N,7(>o-k8\6FSN(a K(6?T($;dRT0Po5\MJOXqg^pVOI[.W<8\3Nb(s9g_-P](("A_3$Ra;b_tSYLb W3RKP!_JV9],P.q!O9>?Z0d^53"Fi+J=bf/0RFnPa]'f"iY-g\QK+ir!K?H&MK6K'Tm:[>+j7P?/>mZ[s9(/$=sU,q787+2bY U#h8<,qnUc6tt3nJ[d372 (WA&*936B1%+9m_]j8m2-Z4OOHppbR-GXQq!s?HhI:JB/4,AO1+b+CtmAAS.In[)=AkLOMbmcD ZP%sqqHrj]F*BeAaso=.5F#BQm7[`h'3l\3Y6&!S2j+pX"]Ma8M; X``jhK(5<%Xm_(DbU^I@1m6qGk0`2hXH5T4Yaa/#m`+cI4UiGc#^Sdf .">A+&!A:gW_MVb-TC/S"c^r;9bbHc6TEtWemNqNn8bbDs9=pGi7A(SoT2d&=Fmo6W!o'haZL>GG`9+DebsocV9q_T_g03?Kl-&'WqE%>sn`4>cA?_@-`p64Z.rVFfH>t:)d#Pe7B>oK&Z1]D^A"A%2`kh#>_[&o/HXjhH2LOC$fW$I1(.?r@VIH2sgDa4o4XM(1VTm4^!Ra6`AD6T;U9d@$;tNR:]4k^ 5l359gf'b"AA&9)^.r!>hA?)]]bW5F>U&Si$ss'7_3,6[`]Acck9AC8Ca\OL3bb@fdQ=VS(G=ieci2BBb_jFq)9L8>o0J)DAL4<]VG'E\a!lI1aEaG-$(3`N 2VrDrQm+!>t_O-X`Ze2 $mI9WQ2T+U;\1P;aAgAHl<i"40L_(Tn.U/97^@]3!EP9.!cG-'J57-Ri?Ms0')8A-_4WV18KJdI%.6W%nf!0I' #\A\$lKr Xsn#T.rE)1"1MW1pT2QcY.Yj 5YW_?41a`<H&MZc!9V.LW)J-8R)D:i#B:;dHF,O9#H_m"U?3D(Ml"BY$M1;FK(#k!8bk3[PBiB?Fq;4Ka]FkWHOOL*kcE\-<on1HJ'1<>^Jn.;-@TX2C.T2J\R9A*--2kjc_X&sk8r;^'$AoDON5=bbk6?1,0rpa#+R&?pGAl8f@_])G@lF#N=_ 7cLY[8JYO'k%d[XS$ LdF@lE4Sk%A_AZbP^7lZb62k3p0?XR$q74j`Pgc\Bkdt!3CODA5Et&8@!:A&Z7B4d3q2'ZH.EhAGt;rWYYo_e">WQ#417A<P=Q.$/)#Z;W.->Tk%'Ro,pS5;`5'&E'<PHZ$\62j3+>\XK>2o&B??IHl,4Uf>95a)qR5F9;Z _AS e-cWA!;?Qc,UiT?iHdY)5rQgD*:"-:* /l./Q%&^b)_`h@37IGR*P)V0Wj0<r[]AKN-d>.RIS+t _UMJW)d^S[snLfAocTL(GIB6ts4tr3%Sr6^S&P8:npYaAlVC>n*D``6F)`7-pdG+L:e_e7MnrfSJntJ GQISBRm996%%X?c<l?G1T'A@'Ih!1hp9U*[Rm,Y<@l.:'.i#`S0,nW6hKGn'\):IB#Na@$,5JA,VP5DnErnN_\>tKSm0);?L&`D0E`[;\Rc-9c\ihT/;*9\j'(V"-T@#>lgS$0]^Aj04nKC$<9/D(2K\od'XbO(J5F9PR6=BGFp#3l[p"n30&NEQUe(OkEiJ8%)BAPiK]L"6eM7Q)VO)FE\bYk;*,R %,K$,^#O2eX7#dEKk>]5(Z_p\B_Al`f1HMokqnGU)L8&AWEbl"?495+.`>IO;o0$d32g=b%W=n[>9Bn$iaB=Lr[gP3ZU6;htm_J^EpNtWp;;9cXO56Rro-'<r!L?`2D:K(/b9_eS&f$AAB0t  P bh0 %^X:fA#W=$$qOZ$W_O_(DVXBMrUPA^_iidVn"5CH%9`bmGMC+"C>13)&C?tq_CX[g'QtQ8fHeQiarjm3ha^q>L@Fd`A'Vc$i!FiSXKDh=*Z4(d6,`.+K>Kq`Np%`.4TZ*d-BAI#/q[Ns+(g(]0s:X,A08)o7ALtHHCOOL&@NqhmikX 8,>=;]!MCea7RC'0sHWnW'L,7E7'+o$A%t2Ff$$a(pZoc5_76#cs76&,b$n,XO;@M"5VRUld*"q7D+s6.kBY]VbTIQAB3PePkO9gIKTSVnZB&0m,AA^?_QRkk*\;:;UiS'6`N:;2.H85:h2ptSR`V24@ H g&F)&@T]A!Z3EdA\L$rnd &D9TN"hCrMls<r^MSD"7SKAN0D#NPe1n<<;e/%fhZHOrV"gLe$1j8Cckl=Ei0\#tpYJ?*pIXc7W,j7r-25<+K`7qh$,Q?DGdpNA7A+Ph<NfHb+jC)UIUt4GMo2rJU@BF5F4RBK>UiJ'Y9Lnad?8r^;^;t6Did,l4\5%jH[;fPN(0.ReEQfRcidVVY^lX]F:71LYqL;A`m$M*t<g]c`hKg UrJt>5(]c5d'A8%"7%!r'mtFDUj.+g&S].*+?7^DXAL[s+/;f-'#&sk&Z*"b\pcchVBdi*7k-Xf\+"-#A-AlbL,NfbcUf6">]^*PmcEOGU4SKjp,1&(]?/.rqk%Jr:UQl3H!X iHpMOr4)0ZB"-0jMqA!>21F/h@%_X$&)Ze_R?_AE$YP6o`,\6&L@tnB#Ja$n,YOF1;e2AZ6;>r9+&IWK5N<3Y'5 GrJcp)ZK>r`TXr%8QWs_m80GnWE[L%geqW7P_6XIq!-'(imGMnKg,ZOUf='#d0[b#&L<TB]iRhVkR>F%'kK@`;/edX)=pGqsjOnGG:467Vl&r4] SJ<NVU8GJ9IL[)HAh6a=H:C>%sI%/VQX#K363QfJ,*YGK(1hHf@G4'lTTTL9>>[T%'I3>rAG-:r=GnFE41HgIo=&PT[^BWH]M-!;W=)Ad!.MUs@p#[:>lmd9&^8 ,T_iP9"Tj1ItmHC;-Ib'[r,l?RRV2$V-j[#;h:HK*P8?l+h,$#=t$D,%-Xo)t2#)RZr:>T[i(jlBc)E+>iPIP+6h<Xe\\5FDAX`8@]aGHjX[Uq2k`tb<DZQl3Za%Q39bpPI'RT?O[M(/UAdRsOEqF6A=6#&_G?aeNHYItHQ"Sat2R+jpA`,RB#H9'WVA(45m#,FAR9&*7TK-I$+UgGmA+R9 PC%^hcZpYmO4f%aQe^"a)DC 3@cb"p,>k74K<ht$pKfm #fBm\3J6"!RUb<=g)Ob9@YV c7;ZZ$3=eDM&`HY,"J'7Q=`AhG6m97g(n=P2#e>WWML4<r38Jt-n@Y+)@h%'XYElcDEFe.dBU^J2/*[$3'I7>*3Lq/<L-5;cdQ22>#6[bn&_GsnLPMI-+cl>[mNdb"nl-$W']\'KaLPNA ml]t^WNOt\W3aS\7]"D5./8lS'5g+Q+<?pNBpNe&M-^g/fL<r!@@43)&t_PR52XG,qil%WH=^&Dqa/^p#Xq;aiNgpGH@@k%'VIOgF=V(fbAWg67@ i+)6W^*;nB!sfa@,"e()fk=""j0&Mp5<`KUNmF*bmWZMT"QY(^3'1L`g&c+,-1JUL#^_4:S8a1c^h#_8!K@hUa>hlZ&mm;OK;f"@%s]qkT1n$T6AHtX0FM^en7eIdsKZ<60;(:X<hIGAP4WN<APS"*D5E`]0n[/nB^)AJPf>88gEg1W@9f&[Y#j!;R_\7K">Y"35C0IXdi0O/$/jm(!l+nmON8=2T8fe&Tpf@YYJi;BrB9Oq&5gDl'j@9F4>/"5J0/Wg;ApWb::fBpp[B'MCUCsKnn#7Uhtp7BqAtS!9f(@Tc\>pYD& Cl&ZY*:5mWKZ3jDVXPZ2( U5KbP]\)#qAK-^NjVd.b#'qk>#F.Gt?ZK@rVt9FgG3mQ;iHIe&m57(h5G9i/8n`:G&HpfXLD:\g:RXg`P)X\k\V-`Bp8Q/[UP0<?RBLNT5Y(!"2fq<(-/q?>OBCCMHY$i"X61b>q.Y^-7K8ENOXTF%A92nmd>72e FEo(JD?ZNYM80>_iD+1.R^H;pK=AAqLFXnDD>'@oV5,tZ6rpcpAh_F-$A(ljbg-bS>N#i5 j%dT6I_[?_$`a-*)tAjP Pf$e]ak1";1#G/\k)%+c[/3P7\[qQ:\cAE+4M>^;lACMfV$_d hcesHeX`,E7a$_?sIZho7V&49_)a<JT* s9EW8"P_\!"QD)_0\bjUYH!80P)l6:'isq!t4c(Fi6Q>>X9]mhD""gYjf[9K1caC:Va$SPl_KLHR1jo?Snk[R-l$`'aWrA@_Iio^_b!2ZsP^Vg_6Pr`8@>BdRtLp2_G\#iSoJiI^jAj3bm[=T"<h?:%J)4^:NG3-SQ\'hOC/=s7mo=CGC[=V""af2R'=)]:f^0gf:e]"Za^>@Si FfG?o&r(?"gb>% =(jheYkR(BWC_H^Ki=1q-aZ=g*h:Ls! =EMTN[-n2 [MC9*d$s2tn'@0fr%KBG"6k784fZ Dg%OTX0PpR7Kb O5..q`kZ]CmirOg6$*Es#<5VXV,Sd1X*$*oo.&ak3N+g80Uje[H0$l+\1I]*lRi)6=h?T9FOHk+>)JT#a))fl Gd#/"!24O6G:6@PDQm/?4k_n7%[!Ei2Ap@VXmP_Agm9?=F:7WeW $NF\3C0%aI>G.ZNDQLQASrn(hjfH,.Z(kp@:<e0sjtMrSpU8WS<PS:Dng?D,8mWo._BL^XoIMU0UA"D-TYo=&pVOLi"tk2X?4r[GV6j*I"G?@4'&Xolk'<.ORsC79L^DjIj:!'C7q3'E`CQm>&jI^.?T2rLol<;8<ngkr4&1']A^?U1^<j,.oAsdf-9%3"P:%f:)koC-FMa/<se*FN+Y31l;(18;>o[P5tS#U9%6Z0UJ!dqs?(d7Lnm<'K%_)m6n^,IY;-j'OTCZS`A*j_^b-G'56kcZ'IGa^kNJF6#.=X4Y/VFIG[19(A`"`KjoTcQHfL9Q^0jkRNZK5I>'&YR#g"6.(A3nU?OIcnd+QL[&jN!<!g'`LOUoTWPO3/W^R^`#a]'En5)=N,?D;TmBitXdG0OG!JD=5;HkkOY>AVN];igf.:E*>tRBO:S[*mYjhPCJC)8Qg2$s+0AKeBd4(s1eZ3WD#_bVo'6m.)\&c-?V+f9=&A_j4L?9ZQ:-c'pW74U%r.m#DEH8f6F7H1BLDqa+eIq_cj&T]dr DLjep+oNt93$T10 GXAaL(Dsb(hT&(:FTX,OtVO\9I4#2%boc,0Gh5p(f]'bU$L1[0ZBkF#?ogBjUm2oQH]pTinSN":cD_FaEkA7_M3\*:Q)^WUREFZde1APMp`McO8pe^:-^Y&H9QR,#+n_JO&0Np+9re11>)YM]fYAIkCACaA\3-7fKIFfY7Xp[s\oM)f&*+p)pFXkpjGUalhQsV6iCn2iOBU"?19Ns7.m-_kh<-qI8OlU3m<\r;tM=]]b,/.]'%Lm*Qg1%jGlE4AfWh5TR,:nCU3ob!)XMF@^2%s<JY"@\>Jdi,@/Am2Pqgsa,nZm;*'%^XK3e9R!96eVbt'2GkO+s,%\F1Vcg^W"s`NR8eM$?&'MA?]^Kj<@nS75,jA-V,`;<J;Lj#K>=V:=p*@L>_Th2>&7TI=,.m;XE3142Z#0$m$8&(!7EC%J<>+$t#-;-"1EVJAW,`AkjOtXb:;IaS#q)lDk6J_DZaJ-P-7?]MO1XrqU4ad)G.lD%aH4m[:`_Tg`5^=Dj_nNSfQ446ffRC<t@?6VaNR/1fk*@-$qb%t#j]aW]n)EG5XJ5:K*=M.R=)eAgsTL@OQ?m$#6t@e-eJ'_J'1BRLF=gWN6;3S*CHM_iBie]VGUeUGI!.0*DR-\-[9L\mWgN8ebjtP\$S0?_cR??[`HX',O=>c=9#Y.j%AtJ4l:+H2oK`*5;d9mAIl_J5LX$S99sY#TA-=nB*G^m3/#T^"sLOb4Lg6(nGa_'L '7$i;bdDq'j@j5ME_s1BPYWDn!j2Z+MtB8*Q_V#ofZ7Z$?K\)g2Nrm@-6S@C=^tSl)`T$bm2"p3jj8a^UlME$JD1#.XNRk/$,A^g-\nj5@>&!-mhJ*XYNKHps?' jG[*S"<piI0HQ]<)ob]%A$>lgP$H"CGQT m"hkHk@U)N?"dVp!I(]]0$0;kc:)Fr2DsMXGZLWUFT.s&U@fk4$L5F*j':N&X(DQc.P]p*0ai)'5=^6r4,)%Wi]`O9,L`mPj,pJH)_S*IoN%X\[X'A+RoetZ*q4b<^;2I85HAbh%l5kgAJ8)+,bA1nL!H:8&F';1!V(6D'O;KDE&PhM8oRQViRNV4%Vb@Ab]qpP:Q^i6iqW%/'m&kBK:dd=j58ITNR+CRgI(g1.69cq'B+'+0Y[5[ti=]$0Pe9=7H>s$oZ,6,^iNoNJtg8,SkCjpYOPlSoZ3oP^KF-s1ME@)n?U*KtaWCpCSAFK?C*CGmr4;hCMd3]bLI2%]m*4'aPg%K5He #TLa@#%"_!KItS"4:;9lZEoTOSI6['*(_I^@qFf0+Kc=f3E.`(0hiK&C!jq/-@HQrk&8T*Y4\S!%q"-1H(_:-\nR?O=jKMFfFin0LqO9#prAm#%HHt[4jXK.XFEH?G;""Q0O\5>;jWL;5@c6E;`61Ne?G'T+Xp"-n7pF=#N=@@ri<G^jf oU\UK05 3*>kG,/Ze-3hB==Zs@:OY\AVX=_Y.UotK-X8H;Hk'\U3+dg?Y.W$_Cb=KUbm=j(AYc\CZ%82=JWY53$D2p_qITtj/.1M+:SGC/b'RJF67h7X=gEREg2[Z[9LRM$fRZ2RL4"r!`AYn+jVpPT kPZBHSJUq?AL!fBo`\ ToH1XVtBL0(r50hV['=UCt7Fr%@&e2K^5md;r30nn^'BL?Ra^ZWf&Bo#Xa@_FFUics@m[;tt)P!43p`hR\@O:eYqdAjmY-K<D6ZP'U,:A&jRGH/a7-LA(_J$-Bbnp%Qd#WEtd1HEq@6=lLghDO-Q%CtCfn-O:W*L1T-QK^ptb':`S)`]l7o95VbO&^CR`teXGj\:PAG!p;&3$_a<:a]JCgK5qI]TE$ipgnNJPV'VY`i93SbUqEL.t$O``NT<+rBjN V$-G5Dq%mO^AtlXC5b;UpaOPgUEgp*r"QGce/3?8q>7';gQ6Fqp/rKqYn]BsMO.-UaA$)#=-b:di#ZnVaSG:)*]AF]]q3L/0n<!AD6bAM1NNR9J"WjH4-^lnbT]Jo>GU"]2E]MLh-gp4s'QhQ0;oq3WOtNCi:ZQBf,_,"3Q;T&] XFgE'rg*;?MD3`*=N-7$#%nj9sUC%XN%,h#CA71ZAk>q%mKB0mO2n.I]h-1I#+!Y)n,R:rdpBbZ[BHlAV!q2cs+@PSb""Yskj.A81 A.rqFgiB6gJpSc.V3N\Nb&jJYI<(+%QZa7dA<m2MS/'R0\Bn!8Zp^0j-hYAjA?+iLbi)*rl,.XN57Jr0FZ7j1*o,#PD":h9O=tD'REC!#CpBP&Yk]MWm`>TpLH'H:F^3FS;YdGY25_U PeoQbr<>J<+Sh(`$P=kcM"0nV\O<OZ>#%lEcGnrN_=X<pG_^5Rfk@p2UPaY%&Q4>n?XBT13k=qq).!E4BsZ2"pqrGU\;l1<Vlb',Si]XHN</7X'l=SfIUMG.RaBe,`$fsp@Ug+H,f<)3F`ge1sNk^*Z9,cAS!T1@RJes"64iq'p;T2+J<'Z59S.-6<,[q?gAL&kC`I*?:RmKgCOsIAW+M5)C8-POb8BA"-JYggeM[-QX5I<8F#!g2H:8LDTcR4b*K_!@e#!!SXX-@='t5BG7o,QJ26>r:re`W>b'.nrZL2I_&S3A4OnVW6V\E%14qSG]FQHi?&2);SqGFPZ)q%bn!O<A' [7m*@$Y@IpG-3^p6Z3lBq'<rUH%^CUir,Nt=`ikL*it]gsAn,Cgd_SBW\s?=l=%D!%^R]Y:]_FBf2\,Y?@Ssfb6FKNrT`=]lhPXN/D] 8kTXTJ0Y`WR!jSp!M"(9TK,$s%=e9QT=?rQ0:q-; 6K0#@pIIYSfO3P_&Q7-,OaT?EjZ 6i.5lY8Zt7-0?0%D'MD4\*XE9!2<6@qsp-E,VkmX<RUUnTG>s(*`5C^(*>0`8f5.D*9CFf8,C3mMpmdn=5E!X+9Kp8'HHc  [%Llr?t-,*RX59hS4_I7gl;;k;%qG4bKscD+RO#go605<^JCZM5pBcdA,oJd'75+Olr=aUe`l>[rL!36J" Boio2$`)Kl-=Mgge*+@Cssq1n:7KE(pZ,2$dY.AK[ D9qAKrjo; ZeU:Xd+q/-KAI&1Wa<VO*Xc:Y_QU:N?sXKCd]R*FW0\0L!O"k,?C9*r:$5f=oO'g]-o>UhsJA4i)R17&_1LViJ!EDpiILQn^sp*1"$s(^+^&]57\5A9G,rI_$&qFSKBff/)WBe!+6ka[k*YaKYG<0RLEA,t:_"g.Y5A5LdTWl"/_UisPLQ:O'Y8-_`.4&gHYIj!1#PfbL7M]g:=hT@M>6<i;+gpeZj&DM:n.3gP/,>%1-t,!2*nXtltI8#X#<)T.L857G*F7JjXq3:n$5q(Ad(<V=+VjV0A<<0thM?eA0a!=8rrO'D,tpXh+GbQJJR`Y&g(:"D%`V<_q%Y+s;R3d('E*]+d`T.ALoYAJ]U5nsXl#W[F+[\n>8>7J#H=4si+]U5\AAE2PP/h*!cEG?JM0:8_5aC1o1.cI,h'd8B%6BH:E<2"]rrfbOEZ+=q4t$@iU2hF1[AmRg@>t=r);Z 3+Hbd4PcHV*i@Nk_EMD3?Ct0P?d<#A:5W_!F+MG"&kK]4j]-Xd Q31oql;?O7Qa9t@bNSe6A%)hRO12E@VNcsSf407#@?#92KeEU2=b<SY4L[?2mL5*80pj.=3P)9b5/Ri0[:`lmE_LkT5]gLKa`\V> oes)XZnaDg[eh)3PG(*G>DB/:^GsR^-)WK<&BA1K*2i8gd6L"nn2 ,8Z4O5[B22Q0p"r=T .]Mr6AeC[qY?5W)<t:nIU#1d[^3i)?U[nd[psX>d,72#0:\d]T4LM-2d1)>79[4qAVkE/Z^aSB%hq7@UL)iN!Q1=(-U+2WIBqO_RU\L$rBJ(q9oH53n9kBB5lr[D1*/fie8Q?LB\M^0dIih3Z*^J?)Z11k@,sa:p59]G""sB#2Q;LX@?Y%NBt?t8?p5FZWl_S(g[!U`oD,=A^_]3?8n'FeY(>,kqnsFAN)9nE>;YpIm+#\0lD/:mU0jDZ0+Q8BD+G'oAMtV%EC>FNl6Tn,714bS2_f`X!lJK0JikF#nqYP8DSi&d0+,%+CVq+V4pOd>Se5meYchh2-f4Uksj'3R[m43qh!'/.Z^3j]B$#CJZqjWb+XLUM&-a`^0)b>g2lFt3Ml%Z$g"D(i#H]4FbD E9G+BtjS?8[1ojO:";nU1s';M]F[8DTpiNP7iaPnr(/&1t,EW!f Ige-AD?U8EUET:3YB= &,E@JT;%7^rdl"-1A-@YLU%?sXNo?r8m,A[-H/2nS6#jEPYnV%?8s[ZEKI#;th?Gg`=[r!O=(PIh,TBX(g_14V[jS*oaF=I$]1sbg[.L^U^;gh(YWfco8K*Yo_1m:Y<9XcMf`cd>4B5_)2tP$jja#:\4!Q&4b7GY`NoO5)l:-NSdZ!odhsA<0YX:T`=U,f\%TD&t:"m`> M!V92(Df#!`WYAoBl/A#NLH"XGVXhdmrjc+[)dJ*dXK,\IGLEm)lgX-\MejV&!G=Y:j-ck52qU]f:?rkh:i7<OD;/I$4a:cAfF3^[WHA5&SP$$p',\i+GN.q@!-$`L#&XN\^sT-BrlDh:PdqS!e7`WBg9t# Y6rq:T@PT>?2;1Q-l21cr[\o+JQb/1L2)t5^A(K(9BMK8ZALi4=<=tND"j]nXAgtA(i`T%!"+;A)nFJFVQU)$Mi`Z:1]_AA[aVB@XD>;h9_6WE(l2Gt>I:nBrLcj(nb@GM"7=g=S T\M 6Sqi_o/_)cOr]O:QDt@qk`:jA4tO^+:j#MPV<1kV^;k:)$;)_?,mGqe*AY8#Ago.k. #OY5g:>?g`$qSg+7\>3k:ak*qmj; gdGUiig&5U,BYiB?$QXWArV4Kj*DB,%W\1*9ES^lg^=HB>8qa4%Pol\nnS8*!r`%4Nsqr15g[*Sp[--kpIAq&^N<OKL/OB'(rOcp.)'AAAgj*FZ^flc#UCg?%BjJ+>b91"\Tp_iT^&m='FeOUT&X*ni*rD\0P%LKUetBDK3A0^^EX08qjg+O2Wrc-@U"`M"3R291mG[$7G ;>pnbijT<Q]2lJnBc@@ra"WBtAUh"+Go 6U6,<Ns]kheJses]+A>)6F(,$e?88$sX>=(P;!]7e(ht0^MgQ``%7KjDdCmVbV1SLTeWg7PE0g[:e0Q+,4VPfHg+k78Hm^0UO:Ea$>AV\hB0c\#Y3NZiA$i(hPh>PN@s<X+e4SOTp\_o?B_7C9t=LAaM>_bKW!W_El%bNg^sA<)b*p66]'!R*Q9(K4'&YfSUHY>`J$0X.Q.m'j _%.LSMKF2 hcael/5(B=mNh&#@7pb'ZLR[41U60[S2L\Fn<pn]6AZrM]ip)-?T?A7B[dk`7?e.CI-9a$ejs%(&:bs,HV$@C?.=2)X39dM5i!`;h#@hn3AO/ pmFMOpIGA2t*@>@eRM_2X+hGYM^-7[G-^h.^4m_d=(m]5DYm[g 9)kF0/=]Dog=hA`@.A!0&'kK'<dg]69t`mqqT!q]K347>#l.2,T^qBde&[Wq2F.bl@[(-?d-,l>[][34(NmM8*/l,m/fjif[+b'7gn#A!8+H)tiOIfY-VqEbFZ_*d+(Gi3n^cWGP9"I$gpO)Tn\@m848(OA=mL'MLD+#aS^eo.mA0cDK@Ug5O`ig%s&b(t*&?T"O3RE;'g33#cEE.Q>B7Dl"&`\[DF;(RL8R9@%l'e4s$(;7Q3(Wo@Dg 1?GW>,B#9f(48d;E3nkY::p%B?E]4rUIIl/S>_Jm)dkbsPX]I1iedL@/lhDr+!)O_):A4=cK\S.U:*t%V<cAeeA1*5[q>?A3hk!"i-Cgl=KA6Xo:/DJ##(3"Nm(Dh+j>tg;ORJ.t'W;1=k.C5W1sm^T?g%slq,RG37+!"TtcTI$U3UA+B+=O8'F`?mh^5=beG\9,e^69I]@(&%moD-N!12*d]<.7Glqa!*ddYLqWLO5j#$QbW0oJP7XXJCRG,iXOM9&\=[HQ"L_I)^,EpN$[A@0&VdqbX`E[`UiCN%PZABVibPN)U&/l+h_E%h'%!t^k?NKmL9?1i^nB\OT?-_+,`*X4'OH8h%\#],cmg1Mr?M3elTGrA&po0n.XNh9Fk*>h$[(Ol cr-K S,)2mYCtm$HqMh,d(3$Q-Er+=6S\YWS'2tE(6">0]g3D.9*7t O!rO1C-g:JTq5Fa/>MoASLgTj9FA%b<sBjKpDQPe8HaMe4(J70%NpK+A&_>*%I\Cj1.jgkX?3n:,#WmA;@EUAEa&bJ$?=5(D.[*4F2ReSk8pq#oOWCW-k$1ks=<o'B"1DB_32:OW)fXK+hR-BpGCdDA5iXgdIa9s.Od6n-^L$A+J,\X4;E(h$sJYXmDel0FK_.ng-;-bKZlY)a-=iG>hN q& (PUA[]iq(elgSlXbZS"I%$iOcin8,_h<*!@@N\raX+JL 0Ln1GTjgIj)d,8ZU#A\<84AiW'@s%#FZ">/ #JaU0hgBgGB0!Qc_RR&rQQh*7 UR3Z9@k-(lDP)XnsEoXt"An-;LA:*4KrF*13/-n;Q&[\U&X'&e97jkU',IY7AoppX5nK<Z3k/^J)A_I`KVGU+X%&) ALWm11a.&;#V&7LYm?]h^"?$IblT7cE'ioPMr7APS+?iaY5:SnJdN?i$;hYAooWQT-oT>Z#s2JS_na@R6ZF2m6pE+%-"$6mt.t5\/:rcnO1B%KC@ r4sr`p0'RoCQ=lfNam6;^?dLp<SJ"di_"pHp\=@P!2`)Um;t(gRmFcqVUc[FA"*9)AbM,-!Z/7Ge@Ad8e<WP=aP#SS, BfpOcB-Sf1NH# #_,DkoqZk8?ADO34bJWaG*mdBVcPt@N@0nH&!!=3:2'["U_[cA4VYtFt6tLQs?l<ibB_:_fi[Us\%@^p<AU8iI<7<YbCKP^ghShis6,Ob;I ^>.'3SX;-pZJ?b,kdd;V 1#KB8WX4`Yd?ta$5CGBdo<U=Tjd;KI?tN-6hah Jj&_FdV'K]M$] r-]3t(++pSR&Pg$f8R<`Al6=RrCR#3K 59r07'5;";`;_c6&0HR [,RSAZ3%<1G1YdP@="),VT:Yeir44h=n!-Br-"a,C,+E@WQ$*%]/Tn`?ZX)=,3#$LNQboQA<!n6%H"YP\]mB5.;NX1pe'?`\@$^BNj<tQ/8-PpUdj_Ck"=&FO8EV0 ehmnK(Hk;n:fq2;G=_Yg(Xo8sq!*VSd5qfW!q6M7@nd$5ASh^2mm/[7rC/k4]5^>1" P+tR< ^o<i@X5f)\1:5JUnj+ CCbeK%jo%Ne2bDfVj;i(;*)7J$6g)BtY9<i!_PI@HYZ^ %[kkK[hbds?-tl%f_:_lgeXGnA5*JtQ&0t<'f>q-F=0-.J7Kj6)nmq9H>5(BWBc)jG%qIn*INl`TI/np<d'%:lf__o"[jOsqiWUa"1hK+"bA7`"/*+/6%?F$n,^rGfc?.&c\iFICkTtQeBgiR?_9g/G9:N$lKMdnpil^^p==U5)5?7BRiM0l?b78K"H]c%)GF^dR<4\tM"qfhWcKHcJHPH[i7a!>hUP<4<@19*b(^]?:KTZ+P?\/GE`[QMki%nfAV?A6l+(g(t$O`\-2pXCkcU 4p]AY;-+khk6PoZ>lUDbnGJm2``G<h@p$t+39jh;9h+?^4%B=VJ<Y3<</0$i9Fn,H>7^'I(_B0^ok/>K!iIe:rE"5UGl1c-;O@Z)ctM#Lft\65Yh<@c#js.!]J"c)jQn\GBaetQ[0WIGn$P(Y6;&:*QW38ffW4DA.Ah?;#-mJ)gi1YT@!fA?7`]Cq>[87HL)rgZ0hYj\<Nm$;h7A)M#PS?$Iq!oSjVCqpaS#WIi(J5=A^c*mX)/#S=%UI-LG!pl -(!pWW2#XiqffEASLdN,(Y*]FR36OZ[EIcY<q%S\?dC-]"I.,bf+rdB)RmB%9I7q8>/q+_t$%+.E7=XB`.PGg7hs7NiZm2`p)PM'D]*e"Y]@-a/7+)DAH[KJ%1?rGgE-G^B1EKj&'CE>-?`mdXs#"\b,P3LmFqOP^d7C:d,4OT2ZpAfBt*^]h"+Q/:8e.3AeW_9;4.fDW@]!!^!tIss`*le='i?_4.F=/L'N3<Kijr1"d' AO7(I#0NIjW]*O-P(R25<&`.7+qm^kf4tpUIJ[rIn F?k"]9mF`""k)%Nn2-+#Xb**5@om2 cV$KVc((.,&N(Sq081q$cI]"U23aRXeOOa7/KcCs3;IXj)1jYF?AG1\YEL=Det;,8Hi\AmQn`c1O&acs=]*0'DMfdA&s4DUl&qt$a6G*/B?'-_]%Jc%!gW-.@be4R?k[M\ji] 8$srTOm'U!jP;W<$pXL81bm5Vfme26?m3as_4?M/l6>(B[d$aQkA%4%+SF4Jr!AHRTWkLNlr+)_H',e:6sEWnF1YqAiDGt?(5Kgo?&aP/t"ZkJgf6CJ)&ER&^44_dnjZ"> @`1L<3f,*t5Us(Pb>bKLBA[*n:H5MA ;V='WQ@[iBiPFaWcj"C\=/RnlUoJ-HCTgEt>F,l.*f92ArUma2 3m5oT4%N^BS$=$_KW/pP;bL6E1lDUPM6$F8OOcI7I?.d>*E35",<tIg/JU%C/T!sWa4:;ohiZ0,mXUjpC9@b?TJpJO>9\>4R4>^8i=/H@%_*iXi!@Rf\akh(dl+#rsRJG]H)B5"+QEC[1ci,I8f`(QY+>Ws..DmH+/BDV@9PTTA5!?nKhVgAF?['JprQQEC4^@16rgRqELddHHoSl $:X3HCrr Sgi!AOG#TNg&*?r^o*Ms@< Ooi]j28T)^C=II-tdag0=:oQn&>)E-d;+mVrWM*mH1d%m@Jk<*'F$I@KJ>.:25Yh73gOQ!2?<AM\d+TY?PA26qtE$5<8lpqreQIpk1R2EAq=?+?P@?2qkfTPq^J[cJ+S],:AU*kW6A.IY?s8sQjTo2^_7m7llAi&:rrl+M,bKVM#J%RDlo;I^MZ^Ab[HS&)'JTm]U71fW=;!e<W%9P=#/#_]NRY@C)&W%s^4t,*rFtop.4\I&$/NK96VXVJ?aFY'A$!<$f>:O?2a=/(G#)\X#_rg35abm8rBj8K&OF7$QKH0YCp[D*ROI8F^$\"e[:rFN2J;W0a"-Zqa?;(Z1CU5;OdaHIULE$`^O^m=A5t &Q'\"0U#i\(EL,j)oGOKmU^b>\Td=.mn=9\EcLH`N9&jWAcsOE2!HEkag7:LefbK;MkAA_!<e:7:N!-T?ORM)mY!*^$!)t'(_,Hr$AIgU0@3EJ-3%hrNtq]Wf`p544k*WPMq#XB[N7] V)4ej[Bip(X(gJ?=`ArpfY&OK<?(6?UlD3jR ;,-M ?a*#>h?iAWBN^Di_6R[>0n@[ FHg<_A_-&l:HNP!A_j $<r<JL[H9]&IC>"]s`Q'*7dRl'd%F\B<N*3c%8>&P.%Q?L>q)\]B+ce )qT\KRgNNHQ#o!-e"X/S=(I2bDE3)FV&4:fKY:?&9;nS!RALA>q^\FBY'-4[>lQ,?]#3/8F3.8;J/FG>$0,mJ3dJ&hi$,n]iE5P?Il*"mr3FQ Po-=<)id_<r5!A@AgD3NF$+sHqgUoHj55Gfr2rGhVSA\Q9[P)2f]911O_5)*cYcY\KNM]V_YACfZ1$!tRADrAWc>X)e/ZF!)qCA:#0<M]`/ h33#GTP%#3X4l@jE\SX>Hlh9)R0L$P1O3s(O5XjCK +$QRLM&Lf#\D[tJhQ0g2%.Et.1EhW-I$@8PA7ElQ<D`F]FTQ^0lC^&)_qn$R2tCd_-pY#7B#0d-h?2mB*WV ?pD%.">FTX4 @Z+$[0ER'AX.T0A&m[J%F=lqa,sP]_2,KG6J;AN,EpcbjNpAM`HP,F2W%@,X(/9s7A1.haE4Z#FV(Ut ;DHrtNPr;p_V(@9<`YlTF2\`U9Q_82c#!(]T;r0&= kat71d";K$:+LnaWq))=ON$KV:rIda\(@Anf_Z/I:>&-k:i$jn-^O4!j#NfEsGh[#"Vq%5 qn3s3+)b(^.Xd07Qd$dL61bR(%V$D!s<dqo86T"f66 c'q^/lJ[1/"o:$pLILA*rA$G9R'#gY[OJB=(3\HJlA1hMdPS-[(h!9)dbDp/MOl,_:ebkl+V@NbsX\D%6-_dp3-Fn+k-,P+SG "kA-h3C;<Z\/ae2k1Rm1?g/$NDZ`Of/3(V].(.^j6V>Acrh"JG'FK&r,d%[thtZ1Q&MgV 5Aa2n6jV+Mp"?& "i8h+dmKbo@<'k.<mG16:-qL]OlOU7ICRT)""d$TBd*&4jfgFUV";gGKRK,dNB_GL-JneUoW`*#EpEVi5."M\2O_<OLAMV@9+C&VDX0Yh!*niX1@0Sfdf4;oIL<HYP#!#"?$mn4P>trdH3P('@$-kJJrI7^.*$"Xl'Z/C#eQ_7@8dEmsQ\L<(8lqhf8OlRSni=T$@'FgGqBD=Xo"m^O8bJANBdn2$78Kd)Tmn.Gm.]f'S4-XCO >b i@K#>bL!a gme-G(e%-kbhB*SGXF(&bkF4M5[B 2ksLC[]j=BIm_C^n5oMk7>mXr><^?YRqJ-88)nH.lYeBA(TlT.Xsf%bM&i[iAqhD2nk#JkbTS.Bm tnE:?h, HONo<Z.<h2K^2a)Q>E?+(+t'q29[5Gg7D*0"EgODHeaD'[O#SDbpNbO%h#Z6&7*0AW3*9%gZMk8C,_#4R*V5Z!5Aa``/PAcpnIc7)q3NakD44nF6.r\P(rB<cbE0XDEKF=G;ok@o@'+]4q +-[s%"?!t9p`A(/YO'["]$NZ9R6kOZZ0r>!`cQ#$>PE6rn,Q'jq2Z$.t@clQH/q)K@IjIV?Aa%+.qQ^o`6+`NT-Ac./4;LQ!"HJ&@)]4kS-dVEthjcMWCM[?Uj2l5`!(la4LYpAt120S[F,$+i]MY0q[j!bOcpABT<*A"Ge#%>>">l>J!E=#_#pd73b\J&81(+-=b0;ofqW#DAeENO26b=Bte<=Q:LbC/W4L'NOJi*]kC(<1A<.ne;_k2K'(KA!CN5IYW'_f>XmZ3J5E+YHf><U.Ie0TJ'A'9gpT\k79EXXKF8b0ZVM CHlQ%oK2^ZtaOQUMLc&hKAA8A3']1)ZTEOZ<o=W$t1q14YPAlP7>66pe(+W%MeTFtGO2C^kjd;c[td7:f"JErc^WWW9P<GpOJ+8?4&i,+SfH/_G4gqie$fM06`*M;nVL`BQOO=mGet9)$`o/]'eRnbqEgs+ee;b'JNQ1'rRh9F]?,`.?M]s7TbZ.-7X=gI^Jq[V;aDnAa+XZS%+X?sXr BK,.G8`7d<p28WBnU2:ph>8X"EGNb`\P!3@kc`]ThN AYGM_6jlqXqj8RaH`BpnJFO2G;^ _A$Q0Ol#6 m4hSdOQJMfg_"j>o5Z$7gnSf%i:A=s5P`M/6*>oZ72@;0\\O\NI!bo<H>0IG`/1l0FW/<@lnX6#\BVl'nb\s.l4/KP60orniFN!MnRO^2`l'KZHHne*0:lA(Ha<*Lq3,M'>&fo5';oqXZl.tbHU-`O(sJrsD\3KA^,LhM!:=YmKqDRCN^&^:L_#H2SUS+n0[(\SE?qGOYE\CV[7UEIk<i6-*F` :&3X4r!O5&F1#Lb(A ,iOq,bM;= KZm*4\8A+^g,KTNo7LM!.<5FW/IbH/+%m'bq;5@GB""IAA97Jt_VT/K;9EA^t4rt[;0^.88>jR)bPsA=$%P:0jo%B67\S>F'D*Y(onLJi''3pJpDW^$Pd4\e='d[lCfj5m"ncOQ;FIGS+"*#kB#*.@ tW\Z&!rZ/116]lhp23o,2?=qpc"]3#8>$/QXtslaYR2e^A%NGOS6?^h"1-9AS%H[/tUH9NF)0cKjLAl?WT85G:_\%28?)cM#J)@i0ajWF8h#f!%9d68&O2;LHS`G5&8hUra(Edco^\Af_fNcgaoXaLH)b Y9DI[]_8B`=F*8"H&*)hD;]5O<XF'lZRd:;TKHWGK<QEmb fq?t;J#0M32#G:5/%Eq1hD97-%5JN>!DI?]VPR+`VE/.L[QFc._d!kJD;5.;pG^`7\?O(tsh$nn62%X>5h2j B1*\can[?.t5>:OElXG]o]BE2ni.m4GkL,#AM5(=5h'o5P9n sZJ))QU/]T!3ngkVGW2:Y\\=A2A]VQ'$-g]taXA8jY6@8c"He4j4ai [b*5G^>bRXBCo!^f!8c54&.I R!a`b C(<=.`Lhssq:nHF4m8`5BU>Wf(Fq/AnO:BgHbAXd4S@TKWFscj3@(8DA fCt3Ap7Af=]JYGcIC$k[#,%/jE+X8 f7<$!kZpZ'sT53W@@oTiKOi*9Rsr1Y-JI;>BOIqY]S34A:*s-hhkQaL3UCsK7eGVcodIqGB<O*0@*(fgmF@A<)Z2fDN,t82a7aMX ^,o//oZd$Z&ZW9A;r^5`ZVRbd:#t[G@>`P6\(M$9>q$OYtD-_`,lF84SFOAWdEcsW8p1fn6\UUoi(3iXNt[M-7'ZDh*ij_rhO3dr_V*E/3[iGGl:G%9 DdO%+[L):QVF%E9oo6_+j_o=@hg9c;nGt;7pWCA5OSEb)beNGrl7Go*:Ab9/La (Vc,CHTp -ed\QeI-L%^$'Vp]YT"Ce[HsM3"7?T#e,8Gj". e06qYPms`_'mp:AIgealhh1Z7$o5sD[o+^XjmC5-kaX]kGZC,0Lo3S!dg56eJ$*G#U+k=. <3Y149a=1@ _Q8R!DPMU^/Z-WKoDM0@!E5@Ja+&)`I]-H$3-@*_tMdI7;A!Do'78BOZ#9RXOGj0Hhq[#^m4&75kF tr&mo6OD,#G (Db4"\e0hJfAj!I#aecb4UEfl>.a9tA'T&O5X]j]I=ia3HB0158N'.%h"6j6Ip/g,nke$",.('-%UQYjCQ'etA(]lGP!Cr0aFO1GBX\F2;Y<n_-XTO:(bsl$bs2]eT5QZ0k3:cQ0HF.0c.mG0-$L4B6[SQDGaO0n$ed$_9c_"7%E#qms2S3=%BP+AI3rA=Ugt!&%=BlB!Q8rl4(p.W;^ABkk.(+%3Bf!AF_P L1iF<&gFYhbO-0H@Kt$S8KQs(YLVFF]Dpe%ljBO'#oY%Lt<M(0m??4F99Ekc"s.tc=OtYXP"Y+0e381+5=h=e9QG,J(QV2?1`h/>n?j ?DK"]kVYLb%\1XZab0n:7\mt1sHdf.pUOS_)#B+lat?3L:.T."\JbMJ,EPN^2 6d+31P,TJ:\TMRa6)*]lT6iG=B/W9l1)MmQP+oBH<O"KFZeR,sK"gFfCD9&Ae_S>&&"Qcq4Ak+f$jphpbGUc\f[UZ@RXh7[G?O0eGJ AC=[ot6X5ls!ST!+^:`01dKna?8=_V0s#Lro4'bgM52UR1)!ACO4Vf>4'R:r=K-=6Y^j"RFjjL_R ij?@tpa(EQ%n5.B&bCY]];Qr4P;LHZSm2b)M?:0rb!f/C457tRmAq1Sg\=GtkG;saf9Ag[AS*qF:FYp7$rm;;ksPf3Re9egW>eLT_V4L(a<\LAGjmkD1<FX&5DEpmtU59b]1M32#J(:fCoWI&!AfUgghsnTb2m_.G.C68?*,PF#cd#^hO)nA@Wj>(so:)mCI\4QYXoXN9d_8R>+@& tGbj*f,FC>+F`'\Ep`J1Wcj*3N8#nm;c1=qIi<N<!(`%AsA)Y'-NAOhgnpKXB W-3d'RB?\/T?B8Oe :73r&Ac.h g!s8f!7H@)KAN'.@cs01Ac;r#,;l.%t6s@gW!&miGDTWS;OA3GF67]!7!4,;cpZN("8(`[GR*GQTOC(_)tr=j)Adh-W%\'4\LMU\e(W&5$4c'IM<- MUj\;1O=iHcA\9D:Xtd9^a#Ag:[m`deAaHbQ=QdLhAY;tK/J&LW Ld8W+GST@TGhh9c;0,&ks $TObV`+osdDbg_c<>9*#HMbtdk<A*<mEeoV=rb2Q#PtA,ANTN,=:#= Mo5M\;Eq:9lHi>J!q4Q+k?VIQ(NW&nU]DEA&8pMTVq@6Orq;NQ[h6q=)?AEoHTEc<DsXBMqqARD&9O!9EY<7qST[bGk]F@A0&-Z*KWTI4ip@cRr.SW/)A8/'3=K^ZU:EJ1q@.O+p_JCQ+7s\eYra8^CCFs5Ebd+Jas^Ga0H*S"X.:2@XT)8jHT2qP5Q>QN?DYD%F8N=7F75UsA=^DL(* Y:odI+^'3"c`OK.;G`c[8;?c2[9bUmA37!/EEfNIQB*A"1#<sQJ*[a0GBV=FH9VN'tD5?>Y"3_._Y/'Ug&C_`&5V'')6[t.4n\<b+X-a[rH8Ci^:E5a3b XkAjD]&HWWVS-8/.pNM/n+-AMj6=1P>+>[qn #=A66c3'"a?_'R\QjPd\-f?c 1ga%M`^3;c+4RUe+8J]Qc^c,Ea6qR:1N_@#i+BgPD^QC4$@[7'<8Xi8^F\gEH7_,mHW?ef.$QcN6:C/?-W:]`RDCKN`d8GV@@R*k=Eh Pj1@94h%n4D#m<QdD?a5a@ ,Ws#A8G\6AAY4.Asj(l/\<b<NaQ[%F-/%InEjS6E]gVdTLjBEAS:mb?H"$l1XTJ<(d1?L&64B[RAA`>A8Rt0O<(l7;R4(sg)EZH?!/(iS@WX`.fro5+$Bg07]NU^+S7&3Jik'IOK_ZfeGma;rK#$ZF-iPa5+d2LMWJR504_EW'/!&[&so,'GJ]NJLb2kEp'S87"_VCmX<USO8M4!6AQj>sKc8&pDg399c8Zcqo+(^9VV\-4QE'79ir/;2eI#HTb3i8nXX9po&?=i+75Cg$d8QYB"qAB0RUGUdZjq)EC)99>ednIT+rG2P5>/"l<$^#62@X.OP0<\l&t<i/aQAn2-K*bJ"JP1SB\44d,77@<FL85Nt_B8$\B#DQEni@^c_nX^jGZL]4W*WHcacrkEsZ3<0jWE9!JWMA:@4/B+sE_Mj O>K2fqSko[-G5+WXG/4i,lt3m7j*gp-Fqg,2;DQAA17Qs\*:UZgH@MD$7.Yg+jAnA,_E$[ZrA(SbGGRn$P[ Pn[$Ak]Kf1tm&e"7\3ZgYWh+Dp4r<`'ha;]^34 ^PTd8$Fd[_r#L*tL2S(O/TG34Te\7Kh%WSk`CocYt@2tI<ZU^"0 [J`;l!g;B9"jAtnG6gqjSh89#2Y^!DA8(lgp+EboS8!Q%)UHU)BW+!XniJb_gjI7jZnqj>&0YIVoa@#:@iq#5"gL79n$[Y7Esp?b-3Z+QjrVJ1i<LG]*UenM8`='sdi9B!)+^Zkbb,(R.+!H31&=XQ[/X[)oF2?GrLV\hl!`?Ack@P0fp=kL3o[g]__F-]<8Mp(2'f&YpA#9S3\ibKX"9W.,&RM@'k53k/I[M;?T1<^2E\K4tkf.:LE=*,0>B&m5p[nZO&j\7BHo@k(,;`>b@A]b)Y:*C*%g/ab=IG/^&W`7eG?"X>\"D':(p2`D>QANkSke_V6MeJ3X4&gB,fes+mGORj3NApg34f0\+L:%JPm\PA=;Gbh^D\;[RM9rV/!pB$qNAA9sno2"bCM\QRA.MZc4&Q`?^OVQ 9'DLP-S,'U3la.pjBK1\g5Z)i>)S33M& T52iAlVo0`A76jFEimi&aafI7>pOU8]_SroKVeW. ,lM]!(:Zf1=V2%@/"ZPF:XF8TXDtAC'&rc<GitAK!RO@EM33MoBTP3G%7U9Mab@8.ZF@1/9(&S,!8q1at=5*C2&57I?phjEH scF`K/(=2raBP`AK,!0%BlY1d0L$W)+mc+6,OtL?(t&qr)pLS9b]9ECFm!KZS(V*oJo:g8_X-rhI[p.A$OT$#D]\PC6G<44 +5<rdIKBI(.d_M8/`;3,m0.!V*,q+WqU8pBL,>%9T':;oLIa<&P(D$MF8KDsKMAGig]UmRaIbOn<YbsU=Di9XooN02J'B#YW/BU[J9%WD]X#8R"a#%Ffon0$9<a)QBid@:t0_Pd.EON7:i"'/%pEf#AKsrZa"F[De,F[#fSZacHt3BZj]))[jm%coClL*)lTOGrD)@>P?9.Yid]s6::3`e4[H*l0Eg)\At:J#o38<4!>Jb2/QQN<H2`Fgh65@1n<E5=?[e(P4"qIhU)C!JS$a(1qN9q2TYBkh(a2B[)15mFb)Y#dN^B[.^tZ(+XIBiCqOaf"k^hfCdt*[%O$HU+9*l+mG@a]TDBHWVZIb.HE%:KCUkgf;0,#WH+-E2ljPK85OPnd!<25`AK?5[p>E*^9An:I%-bm,)Ar'i?0,So/k-G)S_(M)sh></2S(%ra7cA!&,Y6_n-N:56nsW#Hq5V[9*BXWGM2qr3QR;W>TqJH:_2T9n^P\A>g)[Q:`L 9X4[k.#X424]_oHPg.faGtb,"!s8\ PqGBae+V9R42d9qQ$#GC?>fTUd8KG!VIAiV$MWr$aMAf>,e<2jfgkB=i@\;CD$Oro^2B=FN M^o+ n8@07?E!`ebB^@BWN1?IR];O_f(sPdLnd]6n;5"7?n>2I[N2L?69OH.>.iZq6&`;^iTaCK !?Ai@S.1%n(A2\$;/fJ!6P0[YL8XT0@:Q:i]C*9E[-S,t=Jqchq:UW[AKFq%A6I1R(F0,'DT3UZe5PbG*?FHaRoBd[MNja0sN@4@X2_T_Sd.^> 8Z]A(dD>(5K54HM5@h"CK:`F/4='/0+bgQKc >Vq]Ff.HK*Xm`Lg+`L7mk%1"32so5Nq2j6W8d%.QnNZP`t5eV, U3Jln$]5^5]^O4lT8@SgbpS"F)7'p?SYAjBSR Q`opb$BV`:"3bt>WM%_CT'(Ap3P>i_ C$rbf47bdK760,a3\&EdC_]d?:\Na$)_5XF;B:_ENnF&Ltd"XEF[&0JfAsID)k!?*21!rI&:Cp?/tkUh>:&:C6KAFqCRQe3h;nNtD$kAqj>4kJ#s@L'YCp-mRRZ[R^p^Z,BUq=6=`9+!)&5AoJEg*rWK]nnb@)'"/S)CsYO-<Aj-pAC+LR>X@5?dq.MF[PWEU5pt5F[]W[M#rZ\Isa%#;KeQ=4t[r%K)mh6b1o04L]N8jlEB-*F8gn5o$TcJ>T#%;1Q',oiXOQIi0Z_lm,Dh]&0e6+I3Qeb9O[YEX)VB*K\_1_t'N;nNc*$I_7'0l-_if0pYIRimXPt?ZGZIs<\>Vl4t]F!X4E&(rEEV<I'1!+9: S$Mg:/*p^'SN,W4_'-mm+F9b[pEa$Di1&jU/,n&I:H`\6"=!Fit"f+W^Z%s:b3-2D5,]aeD5%[a.=F50nZb^Rn$>o6Wp[Ei^=+r*q)(lWCFrUk 40p(TP#ATs//KMZLGDmD3G9nF]A>_E+*bfHq<nB]hO$KUAYS6%B-I:_]CN1PPN );>A.A_-X')92n<S]d^QJk4Wp7 0?YWF%382[T5>DI?cj^nTCXE+J]Jo\"$=Q`l-,.blk58Q3N<$X?RQdA5B6=B8pEDR6:KH,ppA=jJa+K@n6^bn$'$O/)Y>J?c$$I-d,#3j^]j80Wt41MWs\?WJE,;COD?4ms,oD.sIRQ9a\!i8t961pg^r4kF)6VXh%&Hl9b)q6K;`hc#H8^n2 )AkTmRhAMi3I"RWbVkaFcN=+%5FYH[*>XUn0/b8XsF!+f^5/0ZfClKWRFbo)M5tjDPecUD2m=''7eSmM/[mLm!r[f?JmZcrkg!Lh[1dUfN?dA@)F.J-9M)3$.Y20n8Zg`3!(UGd<GFWZ,OLk3V2A1'We#bKF&(.#]1H@&LS8c/[_#&paml4E7bisULq`>(MW(!#5ZTk2.M"2cSaNAOAUktY' nW2%P$'I>U\FeZ [N"$%gr1Uh=[6AGTHBICP3:\*l#_Y0k"Z+i)W#8i_:O a9$IBpl>:i(VYHQb=`r"6 _fW:gmJ[^6*`E9<gJ-2_6B#Zp_p.DK]LC%0K3col2g3_FWKZ9/R ge&+EMk </5p>21NAIAZO^#7'9Vb?oI&.\S7c%#^#opo9IXehk=\0$T0,G%e9#82k=g$S&G%6N#^S@rDA!+rrf1"bRe%AL (FOT^tDX"( `1?C:Ip4sIW(W%[>5Jh\UWaBn>X<mJt+@8EgG_lIbM+)O@NoiR)FB\fX=XC3_G FPq3:ECsU# 9on?r'd59J'^/>."Kn[=A.c7#a"C)<:kfN" 9JLLN_S@aRHtdj<[DVF"f*9?)do<>pD;1kP>]0Vi..`S`,5.>9fq^pL]A0`k3Vs6,]857=RYbq@!UHV7#9\,F6f[O8Rh5,X8b!*h[.&6o?BmZ#9/Z)=[Ff"EK@J`p.i7?7s%BA+n6$I\-VL0agqa7[42#t)+a<lkcQ,']@$RIUdcnnW%"l1oU3<4gNki&^=).I@U@3[aWLTn`'QA<_4$3VV\?9Za5kO_/U_>`/-F(Rs,.6$+6As/EQAETU`7_`*NQJaYlW38t<[FGQ,\S2*q!7mZ]E3a2[gBgS.p=<s2JStK"Aro<,?UZqPR$-1EOp_b.,kgN9:0qI$RfC)f]rJr]lPr`WZ,/IZYSCM,gTQIJHE8GX+[UUMLf_@FA=HEQ5W&>`(O20#3ZfCMTjW);J;1_>6$@ZdU &m[HUg>Q<tgQ_!)k_QE`[%.ZUB9]k>^^db"&rlbj?KDiifhg/ZZ_APqgXga]<I?-DH!WC33EhBEm:UqNh_a 9 3YZR`nlt3FRqXKC@5e-T8E)NmP/H)gSL%bdT^Ar6Wdlqbm,1;"d]hL]>(?jl%0\"OeGm?l*& ",ptA&]q;N[%KIrr&p_YC/BpJt>aANfsB" <=<80jfO3+pc`%iQ[G?YO6eP+]>d?nq"5,^/b+ZacXQa#(/!&"bCEbSqpD'3WPdblq$F-AJi5)2(B3>5`jF[9LDU@hh6=q_?L`R9H3&:TSj,hfhNA*J&%d2[eWol4`ZW^)Y0^!M8UM)Fcq<;610>oIQ6/7GKALd+o.:$:3kqk)Mot^Ne0654ciIqn%h+O@\$V9D+tt.B pB!1fVL;MUGE`s;(<\*ZK3'.?WPO;jH9K[/G&>P$8Z#-&E_i1Q(^(>e+;[-g0PD&.AIlFo?\RFs!T*Z?#W<d2h]oN@:2`R>7aB/.+4oOgQd@].*bW:=T.qJ3\j#%5^!N^a `[!tV*dojYTeUId.kI>%?8Q&5Gp#IL'^Tsct/5E\imT>p*oJL KO#5Dr>1.1mRS2eKJ_rZ2_cS+c>rs;kE3a<[K3[T*-%R^`@*W1e<i?XfV#qp'#7 <5n1(M8Fhh>L9&_&l#=3NW`+Y^]KVcg]ZqYG,l2Z,j.7,[02J^J]!oeoG-5:U23"5Od,da430\ZNXIHd.rJ*;jUt9d>(t*L^GPkX]ce Ya#NC7c#MGa)!7cN)j5LF%&X0?D@Jn%*/A"NAV$G#mI-+mj)#/^ecY`^!WKfRk[)Krjj1ARmUip]/P#qG4I9H!<]*fVBJ)k*dr3,gI*\'M_bb4C8`ip<-G_<?3qSr00rZ`%W`fA*7Nqg6m`". t] g2mdFdW]WAc9)4=#U/P>ngYq,YmqEj,$oYIi\#NF(>ZsEZ"JWrfdhTGDVP6tSH?`Z";]:gMo_QQYXA7I4Qg?Z#tE>?_t)*t](SE;r;J8cS3EP7.@3p'VJW<A)kt` t5"(Kq(5$9cM9(?7cZIbID&b=F%"\"=?E5&%5$i)AKY1&3IY3cQ@:\!:'BsHm)f`=W]7A(F+^glU afRa5_Q<KWr153#1Q[&B9g`J;O'7"?MJd-7]`)M/8$QBh\;e237ma/4!D9aN:FJL;SKAWAkDIt2IN!5'TFKVm?C_SA="(>gD(9.).N'LTH5eA(sfaM8)K'8QK2Y8*pNK9KN!%"6'@ho>PL,fHsD%eim[CThlG&SH>eTd-BUo]MC?D=gptat:;":Q(X;YG*5rU7Z<K*kAUA=/oY/qE%?ft`LdHnI-j// =(aqk\[K^ohBsC`$%LEs==T$M `F\]U[5WQVMA3oGd[p8l]AJ`m?-IoSR&JU\c\!&?,##nrQB:>HN&[9[!)eRGqnV#8A44nOnh(0Q#+G))c-nQl1Wm/]t4bfpO&A(p)H_8@!A_!rP+0#(8NNa\53QCFI<tk+AQG-.=cJ-#5RcTP>!Irc\#NmmKX;AX:haa'Xl)haG'N+(A@hWfd'QR(Xql,mX!#OA.-?k&Zl)HK5Lmf6&Kp!!d<Y\8W<][Q 2.K&b(7`4nmjl5ELj#PiZja&G-`!fXQ2q<VKb97D44;7i1bTe^JcPJ#;Yn[E>S[fh:rabYhCSl#c#B'F4ZZ-UZ\$5XJDf_X74K]nr>T+"jem!+*`s[Pq'$C$oc'/r=h,'6!O*=3.1m3.SPF4D]jS(X0#&@ZCo)[GG_?`oddGD 7s/7[bL2 fN#KQ:7i:B5(2q$;s=n#^1"c$,5G^6$`V!H?!I%/M.t5n;;?%rph(p0YP/[heg5E,:o3toP@=.OPOPUSnd%RH=7k[O&Pl6c&kYdi^%Yj?<BgZIA@34tffA;lIZ"1(r#Jo4`KG<nIp#A@^/9j)c7(?:@BOJNeI!&E=>"FEl-A>?0isP3KJ*=k$--I5Rr:EOJWo>1bA:n#($pV_7VoIs)GR2[p>C2C\p5A0X]o J?QjNBb$qIeSQL=@62^#QhfA*aYA%f'$XRl5l9gh_G,LRb7a7O1Zg_;r&A$tA\PK&B8sASCYObeOA=(FACKH3!`[DHfpf1\JQnEb_aOk@UbO%?'3-WfSV?b)DIcVD^I;eAU#9`ZH(rp!i.rf0tKF[aO9<,A8#or;^g50XfIG5;t:3<)i"gG9)7CQ5;7MLf>)nr9EY@kSD!Uis*c0/^Y[aIYBlKoX#sQTP4p2+]$ZA1;ZmF%b.aUTSi<"*+@A?G.\6d:EX2%=1PfD'oeF!UtZ^KJ"3JnSh]"s^8M4* ARL^S+oi.Z5-$j`6]%?H"gr;YNAr)-rNT!E6MgGkVN>8`9e#$ c#/`"FR02ME,e2)ber_C8^$#USH:EaV )pi;h=oT aW";R&&9B Q]P@K:!1(A&"G?]L4_c-j3lFVCCqRJo*<$3$!R56l<^*C;)1GBl.">LC2QnRi"AU.,%2o;E9>F.TW4rCOfo/.iH_\Fk'T9b3bR=T06("Y=C>A%7JNc(`.G":j!GCto[<hfC]9E!NOA@<GN1\fWZb[-o5W#!>aZnDR2LTk1C/,A$%Qna7/lFEjJ-dHFf6c3`2OQkYBi"9m"YZf+<l"qPc8%X0=#8ZIOkEN_EA-t\Z$)3bCh,A;eqX%i&0[iWm$n6Gpi&?]XPXZrMl14%H3+R"i#4Jea]3_dQ3 B1jsIVR(AR"r_r*T%l.b?%++-G4O5B%oGDg6 T/Vij GYeR,'#q3OgANWVsVdXRo'2Vqq!"(T3U>,Y%.&o*;H<*=r0- UT^ gp^c8J,5cgsUKleXTH^l?kDPC1 =*l,qpe@n>0enhaI!4A?OR6jb.ja:/ qrO(T;_9iTMME!F/]rOD0`%V.]BpcQfk]Po:)0kN;)d%.W>GCY/gjb,#Ce7'mCOW2FKb3Ea4ZJei%:a@^T`p>D9Q(;I?U;F7(Q!eV.$(O>l_q#olXJ!=B3m_N;0QB4^<nBB%N']-0si5Y[<U5:nKi8/BmThY,q/2!FE3hAA&_F^'101\sILD*ogY]CoKW]N*8j52&!F9'QEg@h[ d^\r*(rOm%+];!#)Y996'*PW3tq[A.:%=r705f?6a(Uj<prOQtaSNINUIK,7(;nXIp9c$g@(ID,N9THl6A[Sf!QXI6nF2A"ol3?5Z-X-8j/3gcm<mbPr3Q0Y&_R;j`XdNtL1Fj2l5mlcS6rf(IV=/dS?\\(fgoEfL&(9Y#*<F $K>pjrmRp-4.#=#)Xm382Yi&8jA[-a!(S2Mi6VZ+>Q lFe ^h6P)_N+0nsbb9Yk7R+FI(WZE8\nN-.eT;105'(^QCe<b_"eSJ^es-BDt@O!S"QR#o"NPm^&nM<: SpI.=ksN300qBAUQCV!Yqbcs8AJ%C\(dN\YEI8,h0OX` ,#:T#flVIA';T.I+cCrOb7IN8;LHLkH`)ri:t7WR>_E$J4e\/8ThAI:CU`R'&>HPL/pt(n"Qd\A.5G3r_>th%lS_^sk6H].jfmTNpsJ=",8$"TStbUY#&8Gc-q,Xh<gse$deq<a]F6FMYG2g6W`_oD9-%Ne;0VnpJaM\f-/obn`34jB1&Rd5sbl`6<:D[.A0[DStLA=[F9.;+ 7,48i!56#br/X'eOE3^)cJp$0#qWI36pjUq\Rm+Lq,P_H*[4Fg7lb_(8VYlaIn!Sf$/VBg(WFALPf=#,PpMT3< i2af/k]$=%[/W1#(dh+/Oj'PQjAU!4H*:KO1_Z\?;QL;'N4 "k-Ee>4]h.p4YZZ@>hm8^_tZ[@AK_h$Xn>47//PMT@Y!EO=m49=?CO_;(2+nS`3AgXf&][c_"$N&BUpnj+SFhTQ1a<dhW Y_NaA+CH ZWoE%e-%'3Vl!rZ"#_g>CXsZt&WFpNl66A&2V-Qt(&&""=IfljF@"'&]Peg$ik5L%TKo)_^7m=H37bb2(pOX-APf_IlRmiAE#c8*Z4MI.bJ1:``C@pSH):W8Cdo<o\co+>SE#mbo9G5c5"b!VC9IZ\6GqgGtAAlS#S#U6"kE@+$`P3YC$n>J52S5t5P\*Vt=Woj.->>WOg`d8:iidhR\F`Th[l?;m<,OAag'DY_DmXi-asq^VA6RW=lpHj*"3;J_dN:(AZ>CB<a*WrK"bUc$=EE@^/7k+o+L+.'jffPTD4B5e"^+Nb#tr:\Uj?a4(S&?O=l/lK$`%Hq`7cNh*7Pf;g]5.78]P_MN`PfVk'KWUL%HSgp@(Z^@#;nlJMXpSD56I^c0'V!+Mdi,pmK]L'D[orM<s,bTB.aEia>gWC&2tqrf]/*jl%:Y&MtK@?>\Saf.>:cmp(-\n9K>#5DCA$!_+k4kJN'Th]3Qq!dU2Wd!=QlT&n@^eBd#$`/rgc1`IYFjP1KAh7!RG2-%I\t"ECc`n6t5`sCgjHk/rMA5])U\7hMGh<(UAc0RP1M*VaqH9A;2XY.s^s8-KeAH#ipk+5@U*DF4`#+h"*,>3`6[cWB?cGR-TehldI1#E^88TCKd\m7&IQ=?!Tb;DM_$RI/&8;Sc%m: V4o;3[48$>5d0R\Wem;+US0E)+`G<2=]Zb*L$`W0[^dN^N1jQAWEcA$)L1sM 4YIaGB>9<s6n'rUAE=kM?OEof]"ng9Dt7m-b9LbR;Wj*a=^-Z]Z2OB\Uq)qS0%AUeJ,(?aK6i1$"RdFh61VO5*ZX/0,Js(jo'&ITRgP7nl/6>,e+4m4C1_[g5[s-S_5#L2f-ZkCFE@J"l?s1(&*":A-X'Ki@B*Z?AFdY5jlk5OaT,I^aTQd^V]iH2P9GY":ph=P4oLi-)3P&[LS3"cA.85a Vfbl5VA`#[(UDKh[fn8+MkV&B?]`6D3o#JZLa9Sfh0kp%Z6>$MMN.!ARk$Q3og AXd"CNCM)&)669T76A'*lB&A".N#Y0gsJ A3sc #/cL"&a[dC;S9$.C8R\[Tg`Rk)nmt @AD6>sk5+6-8V=]F"Y5`Z-^L>(2Q0_ pcig6GKqho0/5dZG>6.U__ S6,!L) sp8eYZ;<Dd#SYY\mmYt1BHO7-B=q>F`K7GK-]ViPKqkIrhM.AT%3H%eH 'mqa<lgTX Xii$IC"#_@t*KGro(4j;9"V.`tPX%3"#gt/73ta]@>4bh>\.3P)Cis(/;W<%LVXeQlQ5lO'31IB_&Ue0`<AdoC&s<bhdA4$^;-4WD8.=RCBq%OQ%p[d64UT^^9`9.ZYg Ak.\8oR6Sba!rLVn3'C@/AHHE90#GsphJ9Prh$8.%21jTjhN&l\#8G[!.3C4 ]d4J^PTVJMm4bpl*3PCkskAktVO1(8&!h>#KA;A*1oGdKf268<lLEFP"9K<!CA\qo 50!FH[;MPjtiG^N>licsVtcpU,5<#jnr*O`Yl_fTA<gX+?SqA\1<^"&9mb;q?$fn/tS#@<9nPp#b3;=]OGb=Set9!&A/,e1?C7&XNr4Od,OAV_;mRNEAgcjr(00X]p*n19:@inqWO3Z:3Oa#_+l0I@`J$br@?6E^]m'!$Fm^BF=CDlnc>aMrlPDU ;d JH4g1]Hqan Y*"qWsiA[0&h+6EY .D]4&<d$ t5-T:-@/Y6!A]7;@_#k-%]A`VO`h1gDR A!i>e#eCRCSsC8Yg?gRP9rhdg:@g.H!X2Ab RNKBsQ=>mde.t3)7&m#DbLG]qkm1Gr_tAc["br-7-,3#r];%W$ 3m>>K%JgT;,!@t`IH?]R9]CEq*k)At7Q9<@nKbX[8m!:OB)BWP#<_AAO89sON[g=LMBk3](F]A8\RYt[VmMs#,iQ[)eC6JA8r""")gW?Jq#5!AJf7nAa)R,3e2Q?cA`nFp"M^)kO[))qS6lq#%<8%%,N7A`#Lk= >kAaE``=S,r"EQN\3JMMR)bF/8!Wf.O%A'MqIg>EC$s`6%: >3]pHd[`hZ ,,j#dH1>OWPkL6'^HNb).YhY16B5&s:!k_,"&%A:!ZRpNfHVTO3q<\QNAt](F%K,B03a,MYh`k<ba]7R.,pOCE-_j.Y].?Sg%L#m_q3*(AC\rU$!1:CoYgAcF96VFGlK$:Vpn^mC0'Fj\/'HY6-@,8Z[c=p$dsqd6l2\Rj;3cbM$5][jhgrTQ,e90d_)l"EA[`5r1P#AXjeanD\%\&he1e!J&iii?Eh8'FoZ6[djVAi&P+c_4RZN"'SoH6&kb>^jreYh&?NCqF_H8\99 tWpjJJZQVqS?p.!-:NrIC7*[7g`dqa.M[W6i<c,VaV)Q 8'[>#8`I80ie6GGY+FWfAp6Oep^\=#=B#.rA8:X/=d` rP)t43i,s^bKkPBS4G?_Z]Vhb=HA)+7E0M ^V,&^`c'M&kS(r6b#ft(F*@rYg5Zb%4.gih[+,r '_V)d07/8(d7Algo)GEhW>.g#jhJ\`C\Z,Hn7k(41`o:. `9 pHBRGA0q/%<_+43<Q*U?Dj$:tMX)Y3SJ-U^\X- R[^_0H2hIR/T;-%@2PtM]c\??A1D^F&#`H'&*00MjMX>_2)lB9gjFH+OLjVa&:GrqR"$_'m0/GYKA`FPio<Z-#+1qf1/2YLSUW.:L<+ti7k^j(tVY131(Yf !Vm.&^rl"=OR-`hA-jlogkW]M.k?U<d_I@IXeR[c>D-2KlH`fk"Zh,@sOG30,e]#k\4a)47S:%)lhNd2-Olmb,r)hWDW^pq6t3](2('jj8V0oDRe)-r/[mfAHol)'>"R/5U"5;5lW>1RDc5TB7`gR<(%jmgVCl+.dLpmsb-t  I^&@.lA(99NA=eL'j[ZS5iQ7(27&-JjCTlHt&<4r;OorA+Qr/1-3=jc'g.*$`Ap>3)O'_M"h6Q2?ghgsPrJXRbA8T._&_)04!0k`ls3/]AH)Iq>s&aGK7fU55GEQLD$J9'W3'.`i2=]F7q)8e<UP`RDGKG>tEl!B+M5_Yo1bN-B6$79"0KM]CsnY6;cqeiHNdO.9J"BI[;MCo>R!rDX7T;*9[_ \s&=*p7e7>4L'L>knbW(n9K 45(T_<mYo`7FDa&cSJ.:\o<&%^t@OtRqnY^lKUEE\_]BSZQL_4;Gb^#C>G[$_4k3Y^8Yof)550FJ9,^6 #AXiQ/tekn]OA3Q_IG\;nbP^-@!EQP6n!h]`-AV*6Z8kUQ!<fBR-T"csCmN,b;8O/mpeIN-l.AaQ[Tc9X^.i;$If4_FV.) ``9,lNI\Mj omK.YB#j D2SjfaaW`?TKi+CY0dOpn^##5oW3O=YN')QI-cBXb-5EO+Y.^,;9nr2n+;or lepUg0'-Io+N*$1^[saO$<<n]Yo13^k`j+/UZ_gorfHm!tAmWiO(<]OcSQ1.$:F`bF&A8;4@MFKJe>g&dL"\@C&EP cN/>O<ElY c`Uk Zj4G]5b?+KZZr*m* Zl.pd= MS3JH!rOC%E(JG1DS$U&'YWbGn!^CL$SAd51KF(kL'ojFXsM%Ck=AGIA5J5>B#A<J7=\Oob*n38>dfA["=OW]s::[WLCfiRqa(9;ldB[REY^Ep#//28eMZ4<W%Df*q*D5AM*-*Bc"9?fN67E<P"en76$U,,!fhW;<-;?UV"$Z(H?1m-n]Q\?dB61'\+lKA]TCZ9H:5pQ-6=Q$G4729/#!n[s<!oUB@0Tcra$e.Db@dAFh2Sg$3'Zlbmr_oC"o?]>Q<L6[m#o[[HDXS>T%'6ZfDBlE[KP.^V3ljM3A%=?,>+/-P83Mik2hsGoB.a 4dne AeO7i-mqXo g"LQb)4!G_ [e">4Rg[Yi('P*Km3K\_=1(4IinfR:<Y6adAG eir?6UOAd'Be_bM6""?H;QAN]Y6leN`6TX(I3f$::F/ra@fAjpA63Yc %2\3rJS8b;qsC@]c)UT'd!A:Ak!'\i*P1 25WDDb'D)b^:so`E4l=?8F"lpb2nAp+\eDT21''$m\&l#h\g+%"<dO'#.aVE>R0lr8lfk=94PLDH.IoSBf,;]#nK"3kb?Z6RY\s],LZi_"Sd@NAI_$GHNBECT-grSAOcL@WX1,'FDeT2XL8OK;d-,L)lP,3\ngKVG[Riea`tK2rist,D&OfRZFD#p9]8CqpYIPq*n)pUEMW- FEpiW;_(< <6To3Jb?RdV(@1j?r.$#2^>U)#AT]+t?=FcB/aO1h',8peaN!_qE'CM;aLTPF(<!_[8;'07l_`OI1?g,^]fb=;<k6KBS0f(.6e)<l3aR>pF"b'`kk[*G@%bJNKU<a]Ij([HC'lI<h\JcMPARB* JC>)=]#?FpeV#28^Ffb,1+mY6<8J4+=$M'!7 =_"1PP12";)Zt*MT!V#B7coB0lRHT]rLXEY+3t"-oq%`gV)1IEs7F?Y,67h%8@6,[H"ihY9+5:sHTNc/qo-esI"_nrmHUZ7Y$!Gi*k[.TFrKaagJ#.ZYc`IZDb@S;$R`T([%\otr(7'-!-8_P/?#1qX4e<^==3L3codji\,sAA\)"NR^]KX38CDA_ZRk9g/*Rgd'dIU'T:UREr*9C]bR^"e)&Y6nC(fr]"=0?QX:6ldPb8)\o\Hm7b`:k_PNBYO(itiV"o./S.s\Q0g<I;qQ>(!@%qKX:32LDk^S(t:MBCXA<j Pn'4tpQbgBeOX(V5?'WgC,7#Thr-n K1VU<VKjE,23!,G2tG0'?.c"#],,K'-9AgQ>6NN1<q%ViZMW\OAk;aF.egS6Y+(g1at!Z[?N.IlpD6"BG.!J'7?;l3>AsNU7_-)@(He/,ltg[<=IUe6,KP$1APhQDl*O&lpe@2At-@SYUb0%WYt6-,Hg!)>DV3BDO:9I()T/t. dja7JL;Zl3@ )K7@*esQl?.:8Mq"YW@DisKkf@T:jA+<\AaFYJm>1d%;m:('"A0aP`C?>D7 eN>JIfTEIAS1Q@B4kRn<Bc9h?'+#jYqrEWN+n[.?f0%cVj<WmFD?\!EC#[;6$  $h)H0WGIS=/j8r,LP1tY<2ns' GtS=GOG)8mBnXFbFbVn>\/`4JId3a&'^,\n0XGE*Zonfp$1?rO3LT3Xol:O`46kib+<\F,ilohHRS1?<X")J'<IhYe'ZH:\e;C]m`Mpn65`r8Xs=tj@;<b9a$c!0c]<^_MQfZi''P,M=50C:Ac*#s=5p3mga^t=nK':YT=rAacMX50lD2S*56F;[: _agqTtBLO?&D AH0/]S4O%E,jjpMKkObY.82n:PqAD[9+1UUkmA\;HZ>ElmM(#eNE(!'Ip;L3O"t'lr[VhZQkcDO\5*,0d]1PD/( <I#:X8K:;9^m_[jU_EBFF$;<lCN@PCl*S\Kll06=FajDN0PgBT6MDqLoHQ_SC^:A(VAK6;*EQ)5^8oHk\A!0%K3SmDf_##tRS^c"t.3J-nV.&]lcC/=Icq,7&/p52G]idg2/s@[O9##!X'F^dmJ^Y@DF5>mf bcoCVi2chgQ^I%J1Qa3I+[#k&b;Rd?PUo$Nj_:*[`pPNtob%3Xt!Vm],\`G<V`]QiS<!l_a%AQ?"5D9UaVZ><"[AAYl0n$k>dQ[o-SNbQ<qTEND[<a%4O[?J/G_!^0[VPXW 5Xt.QDD/UG=NOsr?`q[?4.)QT9K'J^ipt=ek!g-9>'ZU,<-T"<B])n:GE4I$!4rRGAm8V<.XQUsiHK $aYTl0@1W@5<a% !pa> Y8Zp15qL@=Ao[NY2M;rAJYMn)X8p;c7o6*ilL(`mXG:aH3KO oh1=t_*QZ8?L6Mrs^+U9h%O:b^WlA9QiiRi'nZipZ._VbMVI$4rBg(R5pG!DD$bn=2:!*HVUHE(4!TDZO(A$*knR\hoWjk8Gojroa..Sb]Md;t>Ri49_f%8q6(p0i1+7//+5<KJmp)80\_m[M.qmCl_st06T0YKfldg$4N =/ MQKIhpmN[\oqOdb[L$A#m6_^H^bC'+t;`cZ,QRm\L48e1KgoZO,i/O3i9$)];(;=8r.q;i_?PTmK+A!Zq J8%fdh8(m]L*6?2jT8j<%!O[("MA3hX=Fl>P0Y+lso[="9V?)#\'mBrihJ9`0NG[*'KjpP(<ApQPWZpcG>*Q9ofL^e3h*kb,B5YJ'M$#^P>&Y135]ci3aD"d&9P,"%5"_YUXpO"F83JU))9PJk=?tp(f0.AO7SO$K>B4AIa8IqNl'OYbY"jA;[7n%Gag_9W+egY.f8(k.h)q%LB@G$:#C1FA2XM0>TkgG$kF5[gPD#m&`je4[-'8PrCC"VL!)sf;7E5Kbh)r,&kJHlgEAS\-`bqlN]0Ya#ka2*Mi;K+@EArBZUIp.E>NBh(j`U<+-ntk>t-QmF`Y,jA*I__nA._k/S"U[ql^[Ctc>e=Rs:/oE7/i(6A8^c[i6'\tlQ.q[#DoBe[<BS(GRMR8(f)LTWWh9pAD`R+Mi80C2?APkYH"nA_1^+osK\_brdlsdSbEetG0E5$Irk(:m#r_7Eb&'e\l$TJ/lAh%a7(;h`=rHol<5aqhl:E,jHcpPoY9sE$\,?A%! e>ZHk$ Vf1W5e?g2f\6bK*SSYEZboDaLqNrBB8nX(sIU+*Ca+!K:&/f1_`Ym9rMWU.e,r/gpt;_4Pb!\tIW"m)f=T>UrS2+ErcW=!#LbbR3h<Cb(`3n(fOSX2!jTA!i9\K1K)AV]12k!!Hn2@8FoqfBK0)5kisTAbMb:P"^HogtX\4q!OeF^!G,c;boDYB/7$#cXUV,+0W*GjL[3o\#(:a?H'BqipVm1-sDj$=d>niPS#J>3i61;qWL.aVmT!a_qA:%LZNW7(]=F^p^D$$cELa&_e:AACH0D/b'BMaat?hrA+9tsc)66"5OsRr(ckAUFL=TgA_4Da")KA0@)1gp#c1d.P4-n*ng*.XG1f.=\-;(f]\s"sE#sfSX!gY,?]sAjAOs^V_&b4c^?i=?iDjp7p%^[O+jC$Nb@l9CstnbkrQAb?'tRRE!KJ?+TtQ26[&eU3KW(L7`L)O>8E-)GN4H6/X;Pg7A7j,$]&[?KLq1* o[o'mI<maVTI==r72C MRP./A-5V=0mV`^0+e;.h-&JPGLn&gch0'#@-?OKl[p[8G]8LRrOTC-+dA%3LenE$iss(cg?/[L*nH$A97h"WSt<an!'ZceZQ-ZB=bFs)KTFSCA^Mt^I88`i3hA<7L<!e@jfOZY`?nMO;D70[A=ngLid*=&UXO71R8<(81l.Z:Qkh0.RAf\+Tj4j2aEDJg?>\iP_m?ECe=AMh)Vd%?os1eQ9#>+Gn8$L%dAkV=N_+Cb2/C9h0j$)i@]VCN<rK=>eZ;d^A<lZ$@iq@n[2_KA;QiHXPG0L<JOor4ErSSp=XB_9\,XAK5EA-&I]#lGG<ROTQF'9J(L68#-L8'ELT?C_D2dEd_t-b"Js&9-%m2=85% 7N;AmZh]aip9CC;`bM^N75Xr'$e)+DG(,\%>,Q7:Z.?]#DV6q1SsmMT$pqN""p#`5HjbIH2^((E*dOp\UFAZVKkd2S?FM=n&[nHMJ]S2*aA%WFE7(e"tQ!0T()5T]?F[RQ\R?S[Ms8*-G<iaJA"UDtom+[I^K9p\9snIUt(PA[["d3DbTE4@3)7amp,$,W`Pl?A1]Sh':`%;BR+@bK>[fP!_J<6MbE&cY=$;1src9GdkKiJ+ZlR3m?M(;Fe:<_D`.;I HI !25$:BS-I<^1SPj)!NEmJccU1`0 >o-S!b7hZc+C(.=k(LI66g-"WP>Kpn^p`EY)T^HB?nA[UW[HM<RXbY2;"kK-9gH[_MbEW_ET;R!M"?U"p!.+[/c'e FDZ@is+!2tF%[p3igUjIqIh.#RiPns_JhaIE=Ei(T^OPad#52d 7*^8[8kZT MZVc7C'tl8jA=\.&8VQ;&[;n(8*2LDD$#P/YbPde;c3<sB1>D6&,qcNb;($:gJmLcLa;mR17pA0,I1lBj;sB4U>?t>'_sA 2<7&`HACF9IMJ?HlSCGbng-J[$dG+&ntD26WAINnS$ A]_Eo&;AJdci;W?kUBK JbHDU2=G5Ef#hnf1aTo\AsqX0nBCH'iF)sX*->7G?;;FSkT%]^U\is?>BNWMXGh\in!Ld=b0g6OS,41jpN93[OQa,1.*kNNV<H.sHba/A$1Pl^nr)9d42AO3AU#^V*5A5EJ1Cia$gp^DnSGt%=B[=en?!q\i&A6eiad`V>Yc`IMI?gX_3YZ;ZfF@+o>a^JZn+W6P;UN0U<.WdE4)=s.m6_%4fQD&t9@Q)9>F"O)3*oS An?632I[`68Gm$rdPAKMtHb:dU)R2DX#mmVB3GYt71=dDX.D,kLOnNd"EC9mP%S<fD#&5"$'r]5O"3c#bRaIGDtd%lb Lj?`fIQjJ,e+q\>9Ln0IA&)MMa C@1.a+:"J)A=>LimCYW(1t+9,4h\./VSQHRJt7f4UJFn:HY7bef%/5=-n=;=Ek$o[Q( .:-a,fQG5TE'dntYs7hClXj!)CQ20gEE,3B4^hFrCOL$;id=;lml.kg-]dX0[ZiJ?mZISK"L#H=4T^osPQU(P/T.AA/Nq=oh4%n:m#mK<t%):7?>M!D;QsN+1?+E^>iC(5$9-K?;J%^E3m.UIL6WZ>i[bY-sDtO+6_BDXm1tZ@%c<`&b,g 5XVjH+AdPTWB"+sp#^=hX^'NnQ;o;<7-c_>l]%:O@&2ei68IoJO0+h#Sh"'0C)^1=-qb>C)SQm?)ac!<MIS$K[R*=D&XI6BSA=U@&%Vn*4p`Md0OV`c\ Lf%qmJ#$I?5gs` ((o"]c6J,i:^!gAb+Q4rjk9;[SHo^"&d$@=<^N%e)!AZM\$n=V`_gg:.;k5R8*aOU$&&p/-Y-IPf:j,XtJ1:(HmD"_PfOU'Ao@g+M!s(G[f!<K!rfA\R20YtA!&B8d_ PqD8@I>5DHA\c4sNW@KQO]c_FBO^B>5B('FV0`?C36-&h=ap6 ;KHkY'28PtoM00:m_!:h5AK8<^)O@&27VPRr>s5L6CFB9ZssE(ZfTdeGPI$T;hX#ksB(D8OY[:7_8oD;` ld8QA#ON4+:oi(Q75g]Uh#'YfDJSR "nVWiP$q'>(R[-=Z8T#WI0rWo&k;0<$\9F]B8O5!baW)45APX0PTd4ST^J[F5moRCC0InB)/t7Qcq'tWL%5tX!]Q)E`fN38\8$hGDg5AA:)e9cU;qGf2AG'2Wfqk!!j.SV2EYn6a'_Ors;=[F^6D5B+M&9Oa3J>E-DAMA#[q4::0jp[l)h,:3Y*IVcb/"pqV(EjehdV:&\X&]'f@ffj4P;)'W Cf;_2-C0Ps_tJ$@*ZeV6)LfY[a2U.m6'VL YFAG-)?-D<`8!bYab.GE&AiTj?*V(hAsHaqNHUN&3_M%(Z3.@NB;%gfi0.aH,XVl8UTlG6gAlcZsgYM\7ZoEb+</?@ekq<l4'CTEAqKW`rIU1iJ]_hhm.8Rh$ah0@HqnkAG*R8Rod6s/Ve#ht!ak4RMo`k`8N*3:LnM/5lQs*/^aR"`fKSZsCYd7?#X"p*A?491oA#Ke6id0*NV`c_3lZ.21]AQqH;c+BcX!]XXfc^.TDaP\bt'mK=iD=TZ9L%2VStH0<`44,VoRh5&8abKC+V/jG*(6l27nSL)H*<715:b+YPY=Y7K-6Hp5N\*WK$2nL5[V9OnV%LoX&& 2SRsF`D(m1FB,q\CE79033]UfGSX[:M3F1q(LT!hjNATto:QQPe7Bm.6A0tBfSo^-@l27HS3n;":<K%d6i(5CDMihB`nr7,0DZo5t@?1>FW(OB+m&g8AR1<!tHRelLAP"?FRcatTtlCCTkWM6V3V:HK"l#ofVd]9F!:ga-R5#a]q 1(Ao$.i[9*X1M7_MM_gML^Y2ebP*9kt`<8TqqN@0VeW*8c60)(dJ=Zq#0J_^&4c,;NTBOb- .=D3BW3frn3i!-h&r=EA8*ts>BB`NdPmTd8FD?!9*NHZFQ(*Hq5kc,/oJ efAmpWGYtP+QHa#LqN ;bGd<F*nq\Co"7k!Fn?7'BG"MQQ11%T^ID'J20_0)@lH#bhK\#^df9*=9tWE+oeR$o&AqWe!ADKV%kn7RHUNS;sn:]J"K#]Bl[pmh@eT.1=k*pd2pRs*L5IKqbi9t+hA#DK96Q'fg1'fC.3*h_(j $7Q<`.<`'>UOep`3&Q*:NafN6@+-q)%l=Bh(AjZjaIsF!32;'aP31I';WtBo"_DEk<#?$54O4mOH6YhRTp\oUZ=JTEfDo9*H@I+/A`/H$)PL+EVM 3R4nR1^jOc!_^?`>8KBMT*]QFP4_r9a&M-A/MM%9&Vqm58C;IJPMR&!nXR,\6'D'mFfh(TN+.q#$=k><h8cRE@NgXi.L"X`OrKc4ApGa+ .1o$)nkM3qJN^ FHr\P=f5&kdd`Gg$i3GS@b/(2Jg7#5>T?5t9H?$nYX:@=DEFLVkgA:'j<go`s].(N0lDA-f0Q.^:oE6"^!H2()\A"<9*0W$*7o+U44n,X[Z g2\E,e[d6DV/;`@D`FT&&,":snlJ,Q+3siD&Yp>M,?dGG!AIKHp"$0!KA%2%!d;(#\TG>ULXZ]'24H?Kq-hK%Ee]18#S$nTE*Q$m;[-:@7GI]r3$#bNg26YaDRCdb1H&G[]@f@&!#%]7o&J:RpN"J:Pt5M:2;Af00/b"F.7!?_r6gHYI-&&l*42`PqXHAO&cK1S q-A.dej%g2r_N6UPcZAj'/Q)iMYQoWA!63at](rR8BrAmc@5 g>XK5U;H8nY)jn;jRV$p.%cAZoJdZb0.U:dba+h,[E\Hhc6PF(</$Di?7!U<6-@#i@4h"%0$RL9H-9 t3KqOC$lhECjtH?#CF.Ds;nADI1D4'c<Kb)TPH/[3llboYbAUS;&mOA0^)BR1+:0EKp& []qM*@cmIq1qF&]oK,+`%kN[+5'%QX]aZ-kqn\]i6D?WP2K^_eI/cj%/pA$OI'7RW;:T.Gle#akm0_);11H/o01^BBKkh3daC]5nc/.S6P0DA6]r13E(41ZKh.cOJ(>h]l*!O-%"kg9[A3pWJT(CXae412[=6,Ahng7YjQ<>VoYsY>;?.56aQBtL,B+c(9:4]43_H(T--b#jp38=3k]5PJZc>mN81oF\,V))#F!FO8pMD:$BI,H2C\n1(U[r.G,2."jbEj4"h*G'5CQg"*o-_,o9Yq1"=MFqm;enNo)2me[s@t.b1,006:h*-7kAL!,dmGB%p3.[dh=meVN/,;r/n^Are4-1`MV4rN$"q,n-M0s[K#J=@/X9JFr0/(gle^TC=,-22CM71id:$F"'M4hWk9mU<\GM13iO&/+F'>'p;UlE'M$X)i2^')8V']\)W8i1h'=]!;D4sMj$7fO0=[r/O;G&@%:E]DJaKDVX0b;LB>1H7\r0+C':R5aq1k_kUe<Sf^G&FYW\ n*O5$F?B^ink#a8/QOhAlkDiQZ<H4#J8!YL-PtV\#\M6im4W'^m`a(4G(Z0`$dYfrTiJ4:a3L0"9mR]<,;rr,PJD=lKb07ZoQ7<@tigP!c2UMGq6.6k"18P"%/c8);Y-)?4n`pXkMiDD.AdCEi](X!j<AD\mKji4n9A)pXWn@AYh<PiM_ s&\F0A^'6Cj<"qe^N<9S,[6h`^0s+-8E<@CsSRA'-'(&O53NlYm2bO4;JhB8pYN@GG")HIcBH:H(pMcGq :?F[04Gkp6$A0AN(!a-1CpnN6U[\[#3%[,FB*e80gs&BX$2pM^C6nk(m/$BQY*OGtU%5(W_A0CAm1 bt&4]UX\t)QHE?0Rr<eHQO;7@o8A57QLJ=5?s+4N@AfN%n:>f.BhFW40Y1^AN!bM][3j6#88q33ddAoQ5NEQ]ZZ<5A=(/cN(5@UZ&7aZos:0 pdrGQKe+o!!c !I8N0Tq7N*p/S;Hpcp6*<9ZEG+V%;SWHk%eQ7hU908p$nboO:5E'(Gn'BG4)&E/6C?OMF?ZW`(lq7QCfPF89#`\SE-q2]N2nstM`&iN_ '`f6Mq6&8p+*TI/I.!kM2c)Ibp+iC6Ll\!iB+2gP/7b)?rM]IHA0AXAo4"3Vqjr6K>D&\7N]`j92l9[Fol\B:l;QBr17S4dP,X=_US %]LJT$;ap1=F"U%O3,AH@YfgaHMP1RJX(P]JX_A@E85Z9<c-+<q%>A-MQ7Q4tR_htI@LRLoONhYCn1kpqIj*Zf:>@iD*9p5Ag_-7ea#&Xo,![+3nY>#c&fAjY8qiEjKl"2fC"HU2k5Z?(VE]lc2o&$N].k9o?*/;TZao'g+`[E/gpCbgp+2[B8nDGdtiqU1t)HBNMn4Q'j;b<4k,.V?XIQbon()B8)jnL$)s.JGUo><)]=+#Ha=+dLlZQ[<$Hg/@PD7fAi).d5*Z_F^jk\K`7&9#>W3V'1lE;N!`N-<0-H+R50#A_GJVdB3>@g^XX`4iI3 KU\6*/:BNo^U-YAlY0L`(BS;EY/rHeA^m[]t7^[dMT!-70VfUgS5`W$mH"OL'h5pO2640 nVA[6'rDE6sDJ5;U?[52?Y03-h$jT#6K23$;7pDLR\8g?SNojB!VLtchs`MR-75n.`SZKf? ?+4lTFJ;jE6*$mG4'$K>BSZf/=5c/IK&&Y`MARfqg/E ?[3n+fd&NKKe-HkFVch+[3ojgg?4H [5<=>]Pl 4?Q9Y\4gsUq.AGsHP3G(#^]6nILm<Ic,Sqp0[latj.Cb1$_<'&M;A33B=!?E GtR8J`:*m5AabW1TAR3tA-,&$UO5*-.H8"\IWSJ3A98-RC:'JBkfU8$Je)M0*Z2(/Egs>;9dR)I\sH2dM3UVKE7l2aTN)7:iKG!$PB>`Er)j7kP:HW[0=Ad@p_(52`>AXOcgkr0aM=UR7!KFs8Q@06JF(.0sSM-*#Q-&+Qp$q=JX3X?O`EC4eAtJ7fP^DFRR>4MT/jnZLqnWafK\VL\!Fcl&j,?;aO@WCJ49e>mJ=]d]hD]qml#mDsM4t'Kj9j@@jWp:kmmo<RdjA67"C2OZXP`<GFLql<Cn].q:kD4qk#@AEhl(%f)5JX.JUo;8ENC9L.C*=k7D&#Xitehq^?AX8#W1V_IkZNllg!C%\4iPh>SU=7Vm"A74oA+SAe^JX,<BmeQ'$+h=c6ao'Y]]T]6CgM3dtR%=)F9Y )&*k#PL;s+'H-FD%-70!r;6g-\g!tCPFK'X\#qB"&:@Ge9((AWB,Z,04$^f4_QWY4Ao7C1F.HAg'Vg. $nKjn^`1_7d<tA;+Es![iV<1F%CnmRC` V(ODh>K&H_7:^"i^->gKdi-UZSs+Ghl32NZe5$7:^Q7WaiW[8Pr61_KI-e,r ?%4n iag8!+,Jt.A<aEV$7Z0]#$?#./o^L?$W\ 9`,EHa*+f"N^:H5)UXIq"Xj't<X6n2b EQ+$+H9bE?[*n*:1dtp<P,!^=Yk$mWnArW-UoC$D9N6eie2lR5cX_rV9%VQR52sc#!:dH<<;nWji^Dj aQ+,k:2_[0#o6KMKL,Sg?0l-4Ps0P,Vh'S=5l2@hRi* E"iSB$U:1>*:COLpr_HK$;_ga[H3E+('^")O6n]N@#:;dl?]09dbH4g^pS60c^9!>RF$PioDhJJb14(oF\AbC&2$JlC(;9g>DniH(`tN'>5aqA#3[mT\9(F&!J5L3?F.RU6M9;'=q eV&>Y1%d]5e9?ZE&7D;>B*-QJSr"O*WF\S=c(qe8#N4FJ!7lCIpn69-<SeqIt(UoN',d0Q:T2o;>3B;W^+6l4&K>;^U-R\@b13VigD%Y.a/c<Nhp8[*1 $JgFIb$*V487n$7/1&/.TKP;Mgh!hW))%k*T$#L-ar2':,!Agb'*#'gQ`_E_,b@Mig8+9?fN)lBNUV=2KRA*4fTdl/A[o=b+0(c$Ck=r>[N?d+4-TRjI,k`m.LfE##>#lt27lgCeL_>(Z\W]$a0Oj-f+t5/rnJc+dj@d4d!H(a3DK9lq7 .mUI$^/HTDL[GncYFbZ& R60(<C7NZV@sA$5>hmHPd5FT5$[BA9A"!n[;#) (n3#5oC#q%f+)65O =tR3\(*dIjm5\)WO)Qn+6,D<iLnD.[VmmcH(KXASP?dr^ToW7;f7^hTO."4]P>9<WU2b.bj$&_K5R)CV"&7ZYTa5% f.^TFUrF]P:@C#fbBbP`1J`2a$Xk(/Aq(/]&C1.3bSV%@JoabA\R_kSQq+hH@MW*l1cPljPQ0Ir3+*ZTXD8eP.--*q@m]q?hh>'P]A9OO-V^f7Sm:__ $=L7SgZ ./&`hE )#T,R$mE"Io"_VLU4`qjO)fQK=2fS,2!MMY?9]=;1Osa^T rQ.OA]NZ/<@MWrQ4)".Z- o1,XjZK\3EB75DCpL^pl8aiUeI9p1EcH16ZQ(^V>NiA</,$R\&J9m 8I'qh4l>OBKadMKOl*#9nhBa`?_:+I]1@ssK`%AH&JhK>(L03mPfr# J534:BkK8 ^QD"=&),c0%-^WY1ZmWg8Fn422;M+I0UZEA<@-U'&A/`2D=Ni88)t\+F+`l569P/Z`7R.k=ei2;CLTo:'jGb;"<m3JcL2@NXo/TELWo4K!)*)8i3rAB4BNkbXSVOA\X"R/o!a-D>SaLdBqBHm=d;O!5Aj MCsU(AKW7US]9e"_9bg,s:??4VZ1-6HirJD"67_)%Fh7MAN\S\BDSbY3rmWT3k39I0329e<2o;0`:W:4RMdp;mN)AC+>@=chL?8+XU6t3`iI<O'jjBo:,RC5pMQOFpY)9c06hS%@<"`N_`>e2hL:Y9)!NU)97V@U(tRck@5i+L,r+iA`9@A=MF%1crm%2(YLX&.ZR6IIFZ$tjZlZm3l&1/[.B6Yl8Ug7V5m$;/ >EB4\>MOL<0%&@d21bOK@_XNFh;H[J^hQN[?9b8(QG7AeG0NmSAE"LC>?P!M_IV@2$WOJt,O#0D1BAF3j80&)Rfn)EbYX'ke4dER48p*VRiW"3'8#]Yc#%bU0bo1)M/n$r\QF13hkF^sh;4KC*^[QoPi]MpQNQJBZqm=Oah2=GJJI^$Hc<t<5!3r1E!-!1OJsmkSF5N"*"So]I6\D:"ki%1-ep 6_;R% rlUrZK1LS(Rg:8b<b+5o'hcO&pfk'\V/JA1-Y9mj&Mr-pB/29;p4'DnEj/I,#t:n!;94qE!2eaa8t`l[-&PdfB?BGSSnti/lgIq#<?KpF,N@gn/pJ0#%(GWoJq-!H#gXRmnU/n`,Rl?'9]m<ac@?VqH:453Hg@8Rr*^':?"<m,_3\D/GbMTa^D6A^r&%7i!4Kj2_m?F'PTSTXBmd'mc,:"GfAph/Oi!D[",k*'?_o)_0nb6MI*;]66oQ?[UDag\01 np6W)-Y/2AVn/=aq@_V@8HVe>K\d%_e7f$6=mP\;_QtEX/_J-+F'^%4Y%:n06T]R]^Ch<t,Ab.::*leb06B=aUaWsfoNO,7fVk/H*=-lta/tQB*VUWl6mcH2k70T9kK$;f.Za<<K`iQ[`ND$'OU5&'Z9mf5DA+5UcC[P\-B!h8:=&4-d'^s3k1#t ^"dfn;p;nbf>sh)Y'i_mBZ^ib6^A%aP\sFa[MNb@GCXC/S]<'jB"jG/)^&@@_ADT^bht;?=t":ZOi9b1lJQ':NR@gU%VA0ROb^[Z5 #Qk5%cldRbrb6$j$C7.r8p<Na0r)^4pWN09k)jZ79g*:W"R[\8PGqd2;e]t5M8=)46^I).[CQtVofl@>?.q"pm=Xc" YLPr[:]nh TK-qka^LA5Q=%ktd?W/cK!old>\C1bUIL3LeNoFO7.j+*Al<LTRbLFoqS`QZp`5N\[r0`f=*k?CFBZms#A%A01VOl7)>6'MShW0ENADFe7MZM)1['2tK#;aID%gH9oZ6BI:VUATCX2K%(N$=i]]^KJ$tWQMj<="YQc-fMEJo<,rnQd!0LO;C<Yn(Zk<^)TTF3-$HaD15>dnqdL1Wc] .n@Ni2psT5BK38n@h'%PW,XRq^gH#A=k7t;4Op\]S,e1+mNgj(:9 IEO0$T&X1G^-CU/RoW_O,F(6rN<6J?SB#GY%;c=B;Glb>mm*&XI@!(]!7>sT7&m_iqo/#QNDkj\p8VTmK@$H;(9CWojQ9@ejK;`nfgOoYbH>E'MQ7MT$;nG7Z'9)^WflKHU@:Y4O;5I\FLUb5t7e:gkN"q(EOdOrehq&r!ROY3j>U;iTK,p1sJ7s1?4jN2-dnArgtpW0D+eC<>efRa_Kh?:aZ8$K1@ABgL_n,kgQ:\n*%tW;#CZ1'c)Bks@W;'5P:hg,iJSFcD$P);k=6mh#A:c/DSOpG9./#iTg57o7rU]Nt,2KN4rW/'!gVA"KgN8U]_irJQUP)7`oU9_NqSfZ2`dhX,ctR6T,gS,S^78>W6hiGW%!rJEdqX)#jsk)6(^M#cAp%@d%o]4dgqLK]5d@jg&pt.dr)`$#$WECm[Pp 4Q #0f]At_hMl('aJCsA2S/M3jiG.a1d@c22R-W<q16KsS@NRK%8A$<b*J4S\Ge$k7S<b`Wqb-lDo;ZURg.?7johY_n`@O.gA.j"D<EDl)KaAe%ce^h%jrPe5=[6sJ5(W,&#A"jr4t,mT]k5fOSl0X/Ts-X&\\_KRGm5]U:HBUX4pm29]b!&Bh*IoWaMcA-q,"Acp:6QQ]B!ntcGY<o]iXK()20/,=>Ib3DX&S^pK*qs-(.TbG^_/E/F'_=p;U\Ji-hkMS0:O4.&i'a+9MObIlfMF?\Qd=#A$pq;N+bh6j>5;Qh>$5 N!dT&XSAbADW4T)`/;j,t%IK=GA\SP??R/f9!'E%$7mK\tcioalVe2g_FeSCE)qLAstRl;mOLhksfHc>@d&\h&FsjdS/n\d@aDA_PdU-_%fV>3++RG=qc>T:(0&_G+9U;MXYUK\AO?H+s3D!R$OV]mYT#dE#g_RWkM"q(I\AB81n7S%M!VB,O\6)l9[Cl$ F["h6ARRBHh/At4ah]s<"b?4;fL[9GKmc75:A2r\0'CSjY\ ?f@o]pWN)Lo::Y,-#+jtJ`NA7`[8Mkt5T5hqeAf:tkTP[K_,GE%5A'jM>h3J@h%WG5oVsbDHOr5V9V+Kq?iY7-:EQ)8fAS<\s3WlBPJO7O]$o@#L@.bC&hDoW<#!.8j.<deSSBIlfMlFMSV$)=XP]6_r([V2O\ddm C27IL+'0ifqoH"?/,Ji$R`$!_tN4=Z\^BU&!(PKnsrC8W$*hPSh!JZ?:j5e%A<c"[4f=Nao3gOD:VjotqmB%&gJS,+#-\^8Tp!CWGBdAAD2!,%:H=@[`S@R*WNnRU?T:Oh"Z%) frXRaX=1(2!"r$/b8E!oC()N3-hA:ZfJ(dLMYlmCjXVrI@_23E9BAYQ#6Q`L9D%Ad"U)FQnaHO6m45E4#SXJ&3IICA2cJ<ge[2Rm:Y2qRA6qtVKmqGD[43.!'!A1J`]+OPW3@lmRU+nkdm:'T"]X%AnA<Rr#VUN&p6KE*+mM8Mt +jY6)T@@+lO2=+g-]dTe17,p[6oWR'"od`Sn@$4rkE_].h2mZOqp@rQ*V>/YVOL?[po`CD(-at 4c#FcK-NZ'E.V7+pq,(3OrT$8%E>e37o57[BeTJDYASSD!^;%ob5K)46%67J"l.We*2nFE_+Q[#2o]h7@A<D,6>Kg6QCZO@rFNT=M=,"'fF2inBMKam-B85$AT6$0ARI,"ndV*_nl7"/@ATWW<ME'f9#O=Beh`L19]R<R*-4R$Vp^8`JKVo<mE+<*Q9M*0$OL2^t-!lnW]fOoSca(L/ENf]M_6_<.\3/"Fldg'WiL:?*re8S-Af"tl@:Fr[8DnZ-/U2!1LUB77,j2?9\K!HN7GAGk9C#At_7\mIHZN`enlks.BT]m3?[SQKS_dd53bMc`7f/Y@Jgt43P"PltP7MOD_+h /:ibi:^[i,NV0A@6UI&?iC@hU0iOM8OG(PaF)]Qbm,l@82)4hd2;nqsj@&6sdP]K8_dH?MCL@kP XP_AA37%*a?U9sh*Q7JCo;fi!OW"Z>Gg:k39m(>t.AWg&?f"3>/Rp/ahL3dtB `2/$5?1bYrhmJLfcMKte`@/n ;s\PnT/el`laD&EK"5+(3,l*jjPWJe(E7Ikt9cJ0%Fc@mm4c3s^AaaV6B,<-O/m`>G+(.&:#aZ#9-4LM," NefR'XeH@nc1AJ?![jD]Ct+iFl[%%TUY_'I:%<*,/&hC@cWUg-2(9>91LXW^WRb6Z1Mpp$-:l[ DQi0AoqIoO`]b[G"Vn:+jA;%kO0-<5Bag=g_$P\tZ\Ufl!4Cii ;QkA\1%*dY(s(sSHDMq=oX*gK?G>A!kST(H.)`!NNb&A>'kY/-)&n'UKd',g+4$qNIQOCHPYBJ7!80Lrt;"M=>5X5G$;DHMNYQrdj!jJjQt1Y!DI?Bq)ZRZH>nCk\c44PAC4^5PLIc\g.285(6$?(c/!0j8:NCC0RiF*k?AT^qQatHh0"d(C1H4W8P>>B:\"^Ui?A\d(YB mOnh*JlA 'VqmWe\0q4-=QqaMbKSI45>A,Z>_#qfRJ#[:c^r\S2+6le^O/TCPFI>HZ8=/G-548sSdom(J?C3[$25 @:Oh<;JJA6)#$H'B+`06\Pr)o%;\`#+FA@:D7CMl8."5'AU5Y0UCNL](BA2b`2 $pHjf\&jS4<0!`nLbYrA eMZnU`,o#Q.[: )3-IZ>^(MUp29?DC.a!)r7#h^d,;D1-coAB:U;=))#Ng!GP/>"]q>9M(1`FW0a#Wr$7"cB$>#E  'eDCTt]3.FS0roi2q]J'B(JtbZp-es2]((SleEpVd4,B@X`=n,LT_b+/I;b?K )<6CS7;lA/*qA"_;J:1O=mrAgeP&JekLD(Z$ffEi5O(E79KWOfd]SG@7@\\NM$^nV_B/US&U7s"kM%G'"K!&?gb:%Kb82?&'"*Dt^bXt>k9E'h,Bl94+.Ns=el@F/FC\1-sA37Aa&<4p,BO5>LLD/c2N@01M\pDte )*nCBEAN]bo):3V r8BdgCl Jq6TUiF[>CJND$=UnJftI5^ApC))lfM#&RX*lFnJ'h5c@/WQ14=GCl'6o=;No2"C"JI/TgW7SViQ/(cjX>M[@.K8?RN([-2HrgS4B.@A^?AP2??N`")AN+U<@;BRAOX5=9<)\F/B!sid \]R4qV(INR$/o\<Pk&-gc?iO,5B7njk_6]3dW?1XXpe#RANAh@2KF'\:N*P\b@?SjE.R@::J-L:8[V`gH?3Wmsjm!_Z'K.H:mpQQ?UNbDjf6*$;S#6+C2eItQ.:Iqr2% CQ`VJ7t(&Z'[ZWcEZ9YB[HeD=tFDYl^a2+=Sh!47W5YsN\Y:cXME$Mq_4SE:Yj3_ T*'/>*\m<g[me*gbD VfKt)=NW;/-_4*S*J;Q_(j5]i$Z">(_8eC3ki.QkkFlPkVSK2?HWAXqMY5*L<Q+V:HU=/[P#l*TEt!fUf5..0q/;ANor()L]\1ZR9YZ!^qaJt4dWt$AoGk?B/@jn?;1LDiLXq:qX8Ip`ciQ>87TOE`ko<3Dr-QBPV >cE<1T([B4^_CgZ,4LZf!bt)t9iVR(1kWXpj3-rrq(aG_SZM&O=-2QU37V@gQn@Sn,/!HqiO-gccspXsa1$%e, ?Vf'5=@/e_,It<FTB-]AtMt$Xq0jHV.>O*U2`3mYcqB^eZ+#Adk'>A;.&2osb[=ZEO@^U#:ifJ<;GgZB^o!>D5Gc^/Hq9XH$KVb=A?c%<Oj4ai"%dcm*%<RRQKfJdi#<hb!TsQ$9`kBADMEk^oYa;l5&4cFA<%+]A%<Ih>DI294lh+]p[^cDj?&J/CH>p>,l/_1E#i?-YZ-)._(QrZnSc5?KekMbBPs;]SmRbJ=(P-?I!AedL*C@,$dqDm%P&*/*( g*qmb%q!nn8.hs\;nt!UUlXRVsAQAE*#qaO#Y%E)r"LKQNJ@dW"EfX)+\Xf=5.TH3/>dJ?9W tU9s#&CHsFT =c`3DHLNJLkA1cJ"D>aQJh%<X8.h"X.`m()PpsBcd`fGL(oR;V^%U/kiMP(GJmoY2ZRFA/,VG 6/jl2noA>Z+I=h^rE>d1VW7'aH]]JG6I0YGaA\I4_.$q\b$R:pKB;n)m>%K/0l9LG^(bW1$qiQ\NbYONAA%c. ;:PFQ++0-E5P^s=7^MP8$)rr47`":&$='>ihiFb/>A^7^3UO39<>#]l6N[%B8M$t2jP@%RBeVOYO0l]pOnrFAN&oO:7o Ts,gtoYh7:\1!Bh9)!XOeCHYE%9C8pB10HZh'L-a0$c3AS=r^a67gCNkVht<`c_]f>R4ZAitLW!L;og%CIJ*^T9RsA,q1:c#X`UI`K>kJdN]*A"qD#1.Ua/!FE7e?JEj EGALbU2cdRpPf&=$do/?2),R]p 2D(HtnNsNp-J+k0V;gb6l<gfhoN:'4`=_XeXR ET>2;k$e5o:JBCXFf.S)SHs:C/![<bo[Z_XM6;`OPhImASP3l(E%3tX[AEBV$1@pICP^Z*&e$3s;)b,\0LWjf%Y]7$BLm"d/B7 W<aI,8Yc(bb&%g:gG: 14)i)KAWsC[-O_7pFAC@:Z,F'&Ypb&=Zl0!Q!JOG&_S#&[@+1a@!<\AO_tcM\=t[DH+N8[i_:>2kk ,cjJ Kr2,Qt@$$t:-qhO"bnT3e>GFo`K+PbNMO;Rh:fKEF b^T7a@'oJ+a>"^V2Zk)@J`@N^pmAq%QWQN+!+M+bt90mtZE0 `g%qYf,qqqpKg+tr6`aA;Of*\RbV#Frr=&+-pqWsEFAZ/&ToT/sW&kO_7Lt4FL-UA:phLq#DbcirKJ;:J!iUhKdJk`Iq?!8S=dGd(kA8d;klrp4J+:88cXMP*\1)m=,E9k=hd6*I*90X90`?JA1dXAYfX!D16fdaBiI2gA'(;n"cBp4P\XCih-r#GAN3R<%V0<q2I2WtQn8%:*V2D!F@KPQf[A1`^V*qr7U(D%1C0t hd!_.2J4'jr-]MrGV+bo'"EoM:^a3rR^#fm*eQ'T$gA/d'Jp>5c)Ad)p-E(Kqk78CE#O'\:KZ.$GT^d*>)K3MQq&G=R]"2OGEqP%;BH#9<f.r!D^/1tj[ik8tIpRlXq@#7H8R3rJB;AKNR;68#k&"1O)^$.PB0(p]pN$R2i1>-%+i4*lhab2R3M5=b(`WeqJPG@;KkT,dQe)M*WF>&D_N*ib@0E#,[WS(Ne3YPtT -;'m-BQ,jbp'eo[pBjAF>f#h2b+qRd>%^++bN`P]?CNq2Ks$^/XnGO3@G&rEp\_&AX7XH,7!gr@**7'BiS)pERcqFmntb5'[=Z"c/#p:(E@_eZC:p?kPH=o:1p@3<DV&e"32@[_UeiHmJ0!:O(&qs IY$4-QQAOdd3E[`ZYmEClsS?`EMMri+/A5,= $A=tYa#SY,_5l?`F#.9"m0:5^:WAV q_6PX`d8'eSS >)J$\A]q'*C]G*9fa`jeSZgj2laCfVS/+Rgf9=%S_KB[8,l[@`lR"B[o4mBA-dY!UM%Q2c=13AZZ[6fc1["[9g\"=Uq5Dl@d4n^K:'982?V909L0= Y\5AlX1V6$1m'%]"ERYJPnT)Gl#/Fc<]k`/U.[4;cR[b)j#q4q0Qpc=c$eAr-5V+n8(\DNtiJhPis3:@<.F:[`KrKL8W\i,[1].b((pCjbj%LDh0?H$2Z*)J+*Lc`jQZI`Na.k>`PhQHNn2A=OJ@cppcMK00LPg"L@-WWZ5K<BSbLspU\@C<WdG(L.0srsX8XdM['n=_"PZ^3;e\qYL6fAC&_ nB8q!!BC8%OOH<AI)LmB*?$M%l),>E#s_ldV&Mtf)3DBA%!#?1I@"`-^E^=!X]4AVDEQT#%;70kd_4V"dKX!j^/UfH'O@[*IHC(b]AZ$ZhLM/X!0/Q]Hj*V%k0mI(Y8$XOn\.qIn5/e)B"I]#/Da6V\'O]QkN_G)[I=8'Y(Z_.RqXB/)[MIAq#n& +nm qtG6g<`OU\iKY/dQE6D5kNJV[rJ=8[(S8T$Ig4eIP_Ff),<Y-Lg6  ?7kM]6f@qqRBc#'l&c5+(&:EVl3b&X@.cN75BSO1$;i\+E\*3"9S2"%6p3Nm2h_RA:cej+ESdU8)";5DlLlMcKOnT3s,\-,WoA:6P;"k%&Yq:ENNZUHH@`?1JHR(2d `$:U,WQK.$OS'KQn*;:-2Z^,=[ml^t8N=%6R]Meg+b'GoQ5DWU<q#g([:WBVRD<Sa#N$;T+)bC+gm]pm7]na0UeM'Xg!KICC>`&Lp!cC phI)oG57]I6<N\U0>PZ7ciGAcfh87BP]!`AMF;<5TcIYXOemL8En[ag"!hb]T(1\M&7[4C]n<>X,.(bE"Z=([5+>& rentPm#3'4/>U[ mc^#Z6rV*\t$LgGp3$:*IG-1.25FGebgA4U&coE".Ab+j-&ZR+E*e!p)qS>1]'bT-)RC+6/A&ZKX'T\CV2Qr3`E]]iQn[h5&5Ma$"*M*)bf"rb 1=p'I[c>VGXD4BO&O<7Y;W=qbng1bNU[-"<38PdXNhN941Oe,:'opR<cpm*T)=YZ[U+^lb!4+T:-_3)P(\78hlkojE!!m%qq5ZX'GMA.Aa_GfLITm/*r&Un0b88@Ko[;dY+]\Kd9AsWk'(]r]8A]Gb&^:W;Sj)!a#<UFgJjdBtmChsl&Z.llni*'T-"_Mf#%g_Al8;b5j'rj@S=.mIZ7ojf75;'3D/$*P>3b^,0YF\\+f07p3"0lXp,tKtrIL.0e^*.pf'00aE$R#8W]O?4=0QB3LUr?mkX7B(m;)co6E'"<1lQtfQ@5mR0YR*`FFYID+'S0K,Z(nal4ssG[7IS%s^48pi2B,8 (A)KiNFLc6m=&O)5j2 ld*[8=I!/f h4\G#i>h?@8P#VEE76!^E)V!l$DeRMMriK1&8NgVG;f,X^bfZ8;N4$nqP]C^A\%?AmO '$aWQLR^Va,+Zq/p@aiWi?>N[Z"2S+^e]cMRH;7DWNFR1OOaj4sQ^l*]XC`tZV8T>FWeS8N`'`ZmSGbb2eSFN!\>bDh=Mg:S!ks4EOLh&T0[*TN_`kpiT1=?:k_k;Q0I]3Nl])gbh8Q<Mb\fM!,kf^g0SiACn3fet?)1G!0*8a%iPBKq\[R,k:#2^C8>e[+!k4U1g]>XHKo9tgI1bb V:/`F<Ta-5AmhPmDG^RO5!Qa),VN(QQ&B:l!J%5t/$goAC]T P24"<X7?dlf,@T34ZV IWKU8&4 nOeO:@3TsdT&QDt)-fAeoar"O"t$9i:jHkR.]l1b!B?<60LU1qa*G4(#EbT&g,JK;Mt&l6SE'<*BTRst8=MXMik.YP=&VA jG'?3pGS1JD'm4XXUPXg@KJiN.5r&9_0H,4ed\JW5RIJ^#gOR2_Gf4J!W6C7MX`cOt8)V"8]?a*8#baQ9^,tDU#(%QLq,OPQ(kPA]^`sa3$@NW@5@2C!,V">Vt5e&D/pg:(d.@+Ti`LPWE+W&/"YVOO`C4kk nOkoUl jY0"< %J9d,%lsY,a]+`A!Qd$WR\n:0DE(<M`Fegm=apA<i(A<\scW*^:1j@dAQ-5g$ommh:m*M0tE97_NDO!*HDCN!c"8ZC?_0QdHdR0hP_s\0RX7,JjM+k\>XF nG9G?GMXFO9&eBQcA$Ma3]LQ5RBX""7_De@DAoZQNAZJegBWjA36hDcEt.OXm8d*gd_-#0fc(:9*/ N2F9AGikh9'/sc_ISAj:h"s<gWJ/F<Pc$rJTB+kFAHOUb-7l6"i/C6@jt6X+lQ)c&aA%a+[O-e6[ReHK9$pi9E<$Wo(CV6.*7M7,m8[^7p5A#AIIcVqGHt AJ!PFdJc!#ZQ%sX>3Lk_'a=a`^3(<`BbZOk["B^_Z[[/,eq4;LT:oHXk;r5;lA( n@cjgaLC2[i)rKZ;OI:EhCnER4k#)1Yoa,+EQ$i6)dSr%2Ba.i3: Z6SA($a!gl$/nC6>X0EE_a72CE`nHL"`ZGU_5SWAA?W! (WXtM9Wj$I8P'8dij.S0aU*$(MBK HLH6'U[tq#JP7->J=na1WgXIWFkjdW;1Y/sUn#bTMqen^`7bA>(^c[!Kn[f7p8AYb(N2%5\FG-L0^5"qMVFfaV;sBVm9oiEHgj8H#OYoUjZMNTL]MqdZ!s]cpa;;lN)0USZ0k+_G!MnioBE=e0hs3r-CTIilr>3q]9?JfI)sNDPA#dp;\3rFdLnA@Vlgp74%r7>Cgf\N4eLYHU?3*\67rK]pP cW23D84fAQO=2,.p#g4RL\<&QdV ^;qPsYans`GF=XZhl-GE%EW5^<fLR8Mc7rU-$*iEL2T/ZkjAp&o.scbL;@g\c2`Ut7*/r:\Keo]K_bGOH2)[EF])dnR&TAp^JaLIgA!E^q\*L0aAH;o!`+A6'OE63l^,%c`P_X3/0 -1b3X516_L/Dmt`5 B\A;NM$kJI3g-**5J69q4m:,4kr]0<L1`rC%bH@DlS,]m4^6XAY,A>K=(+(V=OtTqc&:bh48P[RUX/.YKGC]^DA<><6b=Gh2Dq04>^*V370c+O5F&r,CEhV5fK)nMfG)Sr8`i/9K-7fO=b@T+\J+7EJ@6klM`Q4^Yf+e^kXIrNXAGn;/:SJYhcqAr=0shWAs',%1sM.3OZQcJeJI`fO4RV]9g$#h">h=C.AR^)4e&Q&K5,iZ-B(t$:NFV#FF8tY^GHO5n1q#PIMZ_0JrgF[%\I7R\CK5A:-rc-P6fFm:TGqbU_rjs$q+O(I4So>0[ApW!qQ5IQnZ5GHgr=EL-H20@6Bk(AV$0UUF_#f9(6#g":j!<iI!+3'S0AK@d#1A,FO! ;MDIY!d=Xf#.(EVm;^>Vsn&pX)b477Sif2mg^V1$*D#_.DY@_CmmU-8`$i1,h6^8U.(EtH9<m]5og)eJ!bbP[b!QGEpJkP@:bmPkCONM-(-`U]cXU]gC"%Mj,@f%/]*q*9;UYd9!)=Ne'4Z7rtB]b9pj"dpVkMk<Ai)F'=H Bh]=D#&BS9_cTZ]-B`b`F&?;N6baVT8Fr"t/gerd??\<J6Q1bLprbQH\C?&cHaVI*<5>A4d>@k<(tQjjl"\gQa7PArYdrTe6r=Fe=T9cYL&@LU6=r5r]hD:o42!1ARde,?dMrkU;`BbNYcA"=KM)hRQ`r\hoQOAA37TNtXr^(6q9@=N9)j6I!8CJJl!pg-_M;tMdSa2sD)B&*c8t/dY0;+=;j)*-03I1n 2I7F1jpOc(ocb%%L/XUj\s$EE2<aC!=>[!<;]8k#])^N+AlXfNZ6/jO^Z2^!R,"s'd:k<[[^ig6Z6W<#(\l5m0+^GHLN:tD`4,k,D:V*HTp8)O_K7DO[>f!AM&]rN'< MOn)D"o@mK?jmgi%FltcD*0.@,-H(e6!!F<5Oa!Q)A4OHKi=2=NDBR6@jDrV&Wj.p`!VSUb0Gh8*Zjh-X[3(Gg3Nl0:'`YMLhApqnB0#aJC$$_67?8k]:lA:+pqsqs/Mm4jBpTGGsKA8RH@k$)kJI<+c^HbkSX0%Q6i%m;?7J(<X\V\G.lX)qP]=#7?7HPMQA>D&q+P0N@9)JKEX!8dCA0b"8TbCZ-4C%L?#,4$UAo`9IPt"K`Sf`b$7?,ne4fS';ok_3l4HIS+7H,JV(5[NqX4TO0KdO_&'"*ZKDtK30&f;<?lD/2D8O[cWP;=*.I.YV<lKc;^rDL)J(OgJN/e4Llt[QV;l=qRjX.7=!,$.X15V:7[?[j/:bf,_PN?[&7K9+`.ih)Ce^:oh[ArJl(9\+)pUl>;*"3p#]t1@ZYl82b^sjLL,;P)$tJC='b,pm1Cm1CoL1(]URj,HA?_J9;VST47LXYmIq.O^,!54Q4RIsp6QWdbW9CZ0g-5J;Z1i^4bRIk96r.nI.jUBUtc^I![At*:&UE(KjS(/ Jd]<V?>I,aAkEn]?QaL!&MV:b0tVF\$_(<SjK60T/r[q;Pt41mA5qRC]BRQ@pX14>fq__Uo3W'>*p+6%E,[*>T5hJR^FkJ*$#43+VhrK&&p"?K%S'r";pKQ_1qb8JL_:#Np5>I_Oh%>m3;' $k<As\P,Vt*bEObBn,kh fBU+nb_"'I0DD(6l<oTJgP'T4c*.+fp4JsAP^/!;;8Xod!mJ@s<1Mt= +pNUJJb@%VoPc.R/,MS'l?/8OG/H$1,l09FJ('-2]H,!*2d2>A]i&P3A)7WUXm*#ZnF7kU 1WcU4+/c,^ -0gAri,BK K=cYV*G9sXa#/#<iK-Y]`m` <?SYcX!]$%0N^6-<p.&AGO.s99D)Qi*aPR^QNn;XEs[#4RPj`<*i$/LAdh>^jW GtqIn+LA4/bkSW.9MZ)ZRbNBKV_LN\>$DohPie]e3?rjWX GP.8#A2O5-\7"C,KPlP5Y&U2[KlMQo25q.3EZ/#]X2UYU:gAI"E4Yd2`IY%O<$ptj+Y5]LR'G?P^<NsgI_ReKZpmA;8UocDq?Q[_Z2l6O&k3#f"AGEA(!3ZZ?-MWZ?HFP%E@kcCBcY-\T%5'#r>mRdRd'qh/>A?D<V1=tZ)4NI)B*EWZo&jmiXAmqmZpcdMPN0mA7MEEacOQF;k,<!a]92?7Q#=KY;>ch4Ys!%UG'FYh>6K5g'Wo]c.oGTbGS-?^eAW\3VOP=+T)_WecM?5/9/6e//Jf,$7((V@0/%O_k?8A&H!C(gZA$\lIS=bt.=ZY@%!*`3#1(<:>NNP_$?-H;O/FPOsqr#q?B!C3%4*:JXFlF#cLCIjr`AG:B!d'B:ai=HEr&4cICiI=dGf.F'X;M6n5/S8'LkO6,6SRf"?UQJ 8,8[=tiC(UO]J/AYl0_?+#]rgkV$$.dI 7HGbO;WJ8I^^/ZW:jj+m+ZRir%>jmWXkmbC"Z$[TT\)Y[%)L_O+>NLsS-^\Kr>eStgZ#G A:R:mQ9l024<FA'(Cl,B&kVWh\=L2ERI8@k($PEKC9gDV4&.Z#nAD@KmPcgt1Jr[\\6(orWJcH[!Z2B<ikM-PQH<$pmTY\`'3d.AT;,tq<cesEdq78?HYm6@#LLa ]FnE%MAW6'Dp`,]efmi[_-D=;-rB8'DIN4<PEcrM>Xb[V]M'lGW-%sjY%:s+b?o?Oqid<XES%Fp1)RnCg@Ab6dfeJ=tRP()n5i@VE;W;jJQaJi_rNk5gB"LVAoe%SAp1?F'I,$!Zg.TO8r&\c4Tj?V(B[N!&[p`Qe0;b;S$;]A^mMlFXhq^Ye2LOI%`SYMBdf>&TFsJ@HLOL+=1SP31P hSX@eR@61(H7f\W"`mdPf:JlLE\%sVboqP4m\Nq-1j^;jA3521f9X,Aj8 h61c=0G480E30bdNHdj&m!Dp.4OQXj#aaF!H!=VO5-2R)NsdM]o;)bs d/2NC8_'%hG"68'4CP22i5 ID*8>Kok0XF^'mPq6em/"*-0SQU`KfTbIF."oVADl9.i?JbIjp!>4;qd>\/mJ%tM9S^(IBK8CF!'a:/*2^=R&TVHG1AoIgh$;4MMC.sTZ18b@_Zo#F^a'lH%VkRFeh2WYgs>2'$]7o 4)V)"jRBN-OqbfGi5`bUbEG^cc4TB*.I,M<9sX"\hpBYZHZU&@Mp`(t:oi*W:4eUk:j=1q ,E77#5=+>#V]iiI`,A,]$+;aTN]qO^elX5gd1_cZJ&FRjfe50o5YP2'/3.:tN)GPGsWD^3FHn<Po<9t.kFBFP/9mJBM`>qf7Y)jBEs,'lX0J\@FpCNkj$?;b53k3K,1&Op =ktW98o6go'DlS/q$]6qIlD'XPAPmF+52*KkbNsoh;Et'l?WA^MS,cWZp(t5!bSfAhfY-_Q]XtfE,'-9JKs'SPbdC1h*.2MmDb#CcO5ZK4qR hc$;0*<<]G<3F)bo 'As'2@+[.Oi&<\UAp2/fMeA.>A6/$Z"R/ENAd47X+n*=b4Q+FRK00@a(!! leYn1B)b%]n7>`@,nF9q4>7"BE9fLGLt1!X3%,!\R5KBn;AUtn2@2m,"#sBAj`+n(HN2KR7c2Q/&9'N ^XS:_/A'igE2Aj<AhAZ6$Fft3'q+OB(1$aU=,m9X;.e Y'7'NM[0iCbgc&3f-IbkVUtpXrl/j3Fo)ola>G,k@@A^R,I/t:L?DO'U2Prl1NJqr_k&<lnWkGs8!hHDhcll%pi2n[h<GItgs0%5\ZL<AZVMAaF6fC'g7!rRX--,`]K^0V3-AYVLh&(K`i^C</qj6C(geKYFA@/n_<LFFh?pfPeP;eVO$,(]]K?i7Zkk]`%fF[%c7i\RUKTnKcM7]NAd^e3=bpDXtLGb`6OAQ&[<Z5HJ9F^>80fZcK>Og<s-&1HYFbg8i:+=@8TC6GL^"HdhZa@D$#-BtK;.,Vo(!keiM:(oDc,3m`!eK8`/jha2Z&Ch5ohpX1bFM8KJ(4*T_*g;DQ#N/SH*GBlsjtIsQ(*>'6c3$J];t3h6CH,g2P/MT7r?t,TKRN9\;OM3RbVC0bhSX_6F5R-M# Tg!EoD%SYW*1Qb)JLI/!MfTZ5""s:8<fd)7^`*M.ZA!F>Fmr0*Tp7P$FXqJ#%`#=_H<?MT[%;fhQrT,R.=80>i91XEeFU(m9+m2hA%ZW4E[?l`=:QW"mdF>Ps03m<9`7jB""<4<6$-`bCsKjMAMPCG2W BC?^;O17GSDg1l'"Pmojqq0\C^,I_\).2+#%I?.kI##'n*@PSh /o:lPp2Z4b7o0EQ'c7%>Xme8V0/+A[A-_n\L\l)=Lt!%c70O[C(7qZ&Xk_@jh'DphP</EV`'W[cV4KO_1+b_/GBE0&O$\aKSBddD2ne#fjf],/]]cZD!3)XUJj3g7mPPs:L#THo<Z&n4>/i7p^g2br/7boPXRH[Eo?l];bY6T&LrD@<b1'?tp2Efn$Ai>4Zb mjUf9>\b\PdtDQ :AG\H91r>R(6g6qAb$<S#hlo JI:@*8EHoEei9ARh9iB(=.NH&4D5cDr#k5jWaF>7DXRdjt&#mdFFjNoXE&]0FU;,LKf.2lK`7In."M0#RCDI4:C;p):5PNIKrL3?oqj=BUQ3;4 AVCAC&1nibS^kES7a0kLA50\3((0f?85.QdN-/k:in<)tFMAkQnN#Aid/G'-+((:Pf*)lh20R&gpeI7U\Yl2k#r3OD%VKs\#i>oggj6J8"N-QlI4:t=+raJWD%Fj [k"AM^O5frFf[K$SI?eH2b,*H<YJZnt5(@Ir6VDUKnPh:9lsHAn C4]AD&Q`"lqil1BAIW+FfiQ[K]K="X'dM&Nb)L*R!A?Yq1C'gS(Y0#U'52,4&f5$">_FN4iT_2W5`Dk;&ib\_qM+VUPX*#<Ftd(f`XXa]BO';.a61BGK(DaU7XWs^d* f)=8R\0C01IdA(>_o:LX2<lI18nD[aGi/.oBOg(s",&Y]/[.0Cm4!IPP7AN-tDC@o;5--DgBk\kF'WV,>c1!mBV<XThWa\g$m]O!+H,\9eSo%+^'A:iV+#"3f]=7d/^gA?GE6%HH@9rXC+ObJApe><4l/QA=!gg5c!dX"1f/cJ1P((2,_LkH?hi77HZ0I=A:fO?4 ;$FnW3F6sg?Oof1g'GCA$&]Y)..5U76;%d!Qn+-kp0BTl'km"!0[bL%2]?d_Br;oa(Z>qta.#5A& 7P;BhDR&rW^c(N?H$ZG&e<h g$)pfAlGAf;B)gFteIi2E:N9jGntD&QPH*InJkOY,UN?L\/>E-b0]m"^:$D&m5aAeh=a_.]970#lT,`a5A]BVThe@Uk)_8 ZpR+H-.YTY3;1>![%_HdMoDPG"s;(hh?NRqHZt"eroGFR9`2OY#VA1#2g>Fn`]<sQ]!,Mh1Z'3Gofp(K(sF(?BUJCHa)%'=L?7gb07T-&X:'n"3`8jh8kaK,]9IG6\/<Y7Q7q;RkiKU%r-d\^d"Q'6c?8;oca32VQE^gU]<0tsUS7LHp?1kSD:$*-:H?mB[-IjN/a>(`5fS$U%Y(qmE"h Vi40&3B5$.\'V/S^Mh$\P#,Z%43m%R()7^iP$oVJV[p@6-t)n&\87=]I^1L\7Xm$T9cp,j7KZNa<98DU:iHc [F!naeI4_E3A15X=K/?&%%JQWN=Hd/_DN];2 a2d0B4+h6($ 2kUAp^im/nP ZYNj/+7+E>\t@9r<sP'k&JX9 h2^2QD0/;go?MKAsS(<&3AQ7?MN=Y(hX9#b<D[$?'MdqLikpX1?<#JAeA,Al)fG.!]CD-s_Ua,+/LCJ+o:SM*^93D*.CY0A^#B<a&mO[2Mq8esn=W,eEtLci30-FK[YQtMWT54=])XZ;HUZHN[`P59LeeKg4<k)2T&qCEXj)?_&HTFo@r`sBp6[,T3LNn:2/3&kE;=g:.(UH3`[QLes6A!@LRj8:]U,h)=/_'_!j`(\ H!Y:h=UA.sclXI?ASLiA2hZ<]`!HI4AtQq4GPikAtU7!b*@aDO, 4?s$+&T9-69GP9Y^U%#@k`7.hBR5Q:s7:n8AetPr3XLSCs)f:K-a9lQ"Z`fqsi0$R,4t^AGMR4`B5smUM"H'B!#]&R-[8de^6X*:Vt]6ZU5(6:*$TNY_9nBPd^4GTS&ejf5Qdgl!:r3+P.`Vf$g#qhm7W:A("#<bVHoA5l^LAN @=:)PE#&1b;LF[G4ER<dT aaN_pEWe=<WidNIg_Cm*I&3m*%k<?.U8^XLB4il7 33F9)/hf*'VOfP>LReJ6M*/8L>="H34g#/NK>Ad1 \CmZ'F^F6E`PKDjC<g,dB]Rn%Nq@&CU_r_"+5=832 /C`(564:VIUVLcC@;G`2qc>)efPUQ&-XD]o?Eo%(9,N3NA#Z[P\rhcrL<84n RsP^.b+KZHk#;\'XYg'knHL3 gJ#doTCiI<lA(Zt`]X[S_4!d=JjP$*n6NPl_<h`MeAmQQQJ-.p6;-8sU]aAG#gl*/V>%XZ@alWl]r/9b_7@0q/TIMA%=L2AbG,3a)lo?H(]!$t&k?)a?b;gpK@tF:H)I5+29Z@Fa8^58D%0DO%(5564 (&RH(!OdYt9-C=.ZJMsfhMR%ZFUc(Y\\5P!gcVp^<TG =d k!i*9tW,f9'PY ,N@rR*Mt/]GBr*sno2/TmHG];M!7a#UT)53mfKL0Wm=+QW=:$n*8;Sc]PEf@b!.7XL0bg,],2MN+AZ_UUA5HD^:%=e,_A3Ep[p/B`j,lde1I74sV2@(L\_L/U$g)&4TH'`tSC2hS;F=I)=i&m4G1eA-$a8dDf?66rdS`Prp!B;)SlXA[N=Lm`o.4TtiFjSS):ep\pm)2,T`GYKK8P-6Bs7AXgQdpEs%3"lCBDJcEWZiBLm)*#P8'!L3M?,[D6AtH(#Gr`5Uc&=XLr(p:$97R'qr 6pH,a+G_VKBekG96_MG(`ps(p$6`)b#Kn*8Rj0Q@<d/A#SmpWSX-;Y+*p<>+5)\]*)N .1<"i-71jr$AY7p7"9T),c)=`S)N*B6:L0U*aW-HDXfsXVf&a<(l/tsMn`;`[l=;f&rbgcDP19ZEZA`M6]Dis-MCIcAO6c+p_Cjkirb!a@hLMC@MqCs2O`]B#rob8S>_U5OXA3pr&39^o:+e9h<W,t')V:Kr#7),^&"=l-iNfP?7jcT;/ZiA"'`s>i 'I3"4N@<qLlApX^qJt`Vt\.\=/QA^Id:T_<*ecck=d0&h/J\hBn6E`[D3[KW!mkdGI<[=!(1r!`NXLXQN l*>\J7Xa=lJE3f( #c!T]m0R<c$:P,Xh0(nCGc2Pk5LWZ/J2$2,aFQ8'$44__AEApbd)#=;YFi4t$aDo=m-VO\_C,=/'.!,TsXFDNePWVRJ`3;#D%`P7taS3N`rVBA2d?=`[b3(.@KRA82@U_TF^+A#KFeH/#2qiXKa` AB/[c?XX::Q4qemr`1IjR]5X`?\]o-( F]Ud8/Zd<1/AeMX7'MXALA.[EWo"A%')ZLIJgcS[>%].V"YU.*=4nA:rY`)W`4sGkr.ZjqmcYC0gq\QpDs<Jq5=BFg$j"4bQ#0b@%nsDE5Gdd6MQ4hbZ,4*F9HTgOPS[=['&;b<tm$,OahH?!AiR6;.8fL<+m%@H9k&=hLI=Y9V!2(p@>/%Qp5h1>ZaEk$iG4ddArO`raj:VV 0a9Co?8!$SI/:Y'1@hY75]?Ff'^? QFb4lTA.f$Yl90*85i)JDM)1n "UeV\ZcL6WMYrcW7"8A=OK YqMAG+\;q"$Q$C'>(S56?>3A2UgAC#$.EH]?mL2VM;j1kO[A__NaH)etB4AY.nDt0PlpQqVNA`@O>JJG]5joI]sKc[gAJ3GibCf`aceB6$AQO@=hcmn[:%9WVS$?\24 o>I^]qF01Sc18 KGW9[F]`I"A$rA]$6Q?OnpSHiaO5_q'XZfnP:UNHje<=W9eXhr+2qdXobd8)-FRk2OK?\GH<Z?V+jY'U!IC6$5"WL;REHI2kE@>PXt\?Wk`3n?d%5iZZ.fCG#MDi_2N&OObPNm(' SW:B8jX`%fFdY+_'f#)B?:*J8sbf$D8f.?<&Hh-37eQOA$E [(Z6"/H`*"%I??Gn<X&q5,@D!$gSMs/ :*&%b7,-&<;A"Bs3CMP]<&$r=Mrl^`$Gg"Pm/521-C>ldl[&$aZ-6gmUAESdC"<mkJ[R4J_rn+E7O4Jf9]atEa!sU3OA]#XlrMJo,JrZ%5d2]d5Kt<MD^J>t`86-WGc'm0cgT?0g.!InTDh,_;?C7!-SC-B f9t$\^.0oSHA94BgE+A2)Ca^"&sN4aY-`fF_N`'<!^NUk[.05sj`2-7AO'WBAAP1.h--3,AA';bdbNhfL$p+p^!7Tdn:k V+oGCW]j[IHOC9Od!>8''4mTaq[2kH70 i+EJh<,N:0<@Z`dsBKA/E:$];\j-9O5#`\Y6.'F4+AAI*L >.+Aa"smg1e-;U%U)M/>o^#f3*la>+ E/j!4FcZM(9RbWR0'hE\*-A68[F_G?dIbeH!+cS<,-m6L^8Ffa)]!9>E;kkaW%rds,W+a6mDnhJ2E?DU'f0OBb[Oo&>"-\P2'Rc!Ls I0cE'nArkdfYsfo_`.74[rF` Hi:9a(?&:SprMXm4Z#nC>I4SZ3Zoq3E3207!e`N7pL]?ALII%EcE)UcAsE.W2Hp;H9f.6+, ghLDk,_;h T&AP("mHq/_[9kfk(L0+c_kI+X[b[n.99_:>$f.%.56=4%s9A%%'/"LZ^Q)EZe,i%eJmpGE?Ys+0$FUa@=fjAQl!/Q.CH2_%2AX^<_Y:3SJ]rcrXA$dP).gd([HQlTq=]p7/]!LO9*@=ca0/Z3@0FIMFf_siXZa@9.G&^+ C Fsf!VNRTJV4"sdYt1e`A8fG#/1shHkP&:Bo8g,r^['#Tg?tFsP%J''$2>C)+>P`r)'DP&/J95^P fLEpi4=G3)34qS:,XGEEiEqS$Qj^Zdh7D/7C"8KP&(1=We<URP`"(9Pbg@B9-_]NJ)^]ZiS^XdL3o 6ns+YqAs/7/c3+Wi,kVL6IioV4,5s\A-jfBCQXt.qR/JRlL9B:]=&[f0^A(L91H! Pj?F^B7M3R:So;X!PA@p0AmW:_R;T!OU=T=I% @ZS/4#E%;gF,<k>3^kEtL4TbtU?mk;.VSrCA$8d\08>K*hJib__TmH4e!?G<KqO6^Ys`b:?/A"h="9(m?NeJ\'A=6i*'R+G(<"4J$giO@R.dsT#icqri$&Yj 1X2L_EbfKJJDg[5=eaGDWt=A>5SN_49jl!&VAO^s"^ PQKL%5)aA [@#,S0O&p]=6,mKq=J,mAB.<#*(_eS ii#NK#p\H>Co*SBDXiV`"%;o^=4VaGiDdCsB$H@\[  FiZ.iALhM>=p0R8UoC&CRV_OUDI5@4>iJ,%'G=Skgt#hMR6H)Ka=91GU[ao9UJH0Ym]S:ZY!d,-1$1"ae].!Ao&h-`/L"ej3YYI.Ge(Qce.Gn*QrSaX7WtSP`PKoVMA;KN3/?YreI$!KB'/mi/ h^$W2`BjDB@9p_JIrqFTq22\`WQH:;")70d"di`KBZOp"<:%U6/-@@T@:KiYlANXit!UJGtk(.t]$C#o+k;:WA&DgiP+TR1r[W2n=KD.9,<_F>i6`85t/+(]s:7J?78q*>>7Q' (Gta[p"f`;AAsNQntj[<HcSFF"iT%Z,P]j/G4lXD3O,ABJ\L3N%PT8_[='i+QhC8A \.X9$Krtr0WPSG`"p8 tU'AJ#e([Gq21Or[em.EB,"-r'+:\t:ZmBb*&( kpA!:63RTsJLbATN`//D$WXRI7,UIpoJ+jDc`<1tVGpW$Pcr#OHRPs6hbZSTU=mkPVLAhL]>@*^q:UJ,G"`'&/)?BB3]4c1p%P,_% +/?^@oPQ1?7o+ppq"@$J+cK4!GY">?J1'-1fA_lNn/[rmG1,_j4oq62Y?^9Q11P0E5'0'3J+tnBlP^]e"0kN3Ekc1WonI9-qsMs@b-.'/AACX:FY190e GgVY!hZ+ Aa7V`Z_k0E865">AW=1Dk^,,nadpT?l,%$1UUUqnh51&M:W,(UFc8Knc0ULMJ`f8h]Pb><?T;qgM] pUNi?FA*HgPa*Ig6rC W*>T*i_Y$RHG5hsO 7bb gM/A5A'5 5,*;5S82DX%QmA5fgNi/1&P;q]dsNU41[#D2D^Z:M&[;0>b\:7jR*e* _H;m;@09TTsFcjAt-$H?DmP*]l<i`33Lth-ObZ#[_K:E1oBKA\U%ZRW/$HD3JrZ$M-%5!t<7.Z0]pM.75r+4gI;5rS?sXMr`\0Dms:P]2@-S@b7hF\ N\EOW*Djph-em`ak,Re(UkHPWM1`S@_8=pfcP8ZF.\sl9LfGbh6r!*fU Y@Hf?V<BLY49;#"?\^aaUV<$#)%"2a4O'aQ,IKl.n;^dEAqKdE4Z(HOgOB@!\-AG7!?sUlS$?.M)&TW25Kia=0F6!>DaNQ=><7L_aC4oAT*BE50p%_nI),^WUrm>77Ah65?7lQMn<"4@nc]oi3t-A5qp*BD@e'&XCij9#C.FZ6\dt?YRb+^2EJtNkB#o$)'-j*T;Y!\55BHQ"*"fMtP2[jgYr:N!ncG>:B^p=+AK+8_oOY8Gd/%R9]$3`o'aKE34?h>E9q%iIQM-+L46?T]4U7`fUoR!'Wekf._+*ED9<%CIS:^tD+/dd>0^3NWop?Inj>Nj[%&qpVWQ<<'dAhmO\pAECmd2l&?<e U#5.)HBCb1A-KcG/MVi8]>2 mDI:dVSc!3[<EGd*YLii!/6_M:1t8>Z`'!%dlD@8pAKZ8!mkb^^kWP[l.[7\C)Ut\hHaj,H;QPBdpohH]fdK?O.A,EX'5g-6 ZS?]! 3rTg@NE1_r9>p K>[;D9OjYkX&b1g4#fGP? @VHt"HUY@2Iso4nIZM6TA]&Uc]!nEHLj2[@dUd"(f(l!VOgneeni],`S)L:dL!&h2`!,$F,=5MZA4(5*0m%#/'iQY 7D&mN=2VB,19(_Rq]Df+Y!\=HGTK:1J?rkOLae^5`JbE9dah/X,k\]:A2<imn=MVE. M/HNTl8g$m;($GkqHs&f$HhGKOfe?+A)F:2^73<l9KSBWWr#R[/#JXY*;&'R`Kk/#o9B;rL?Aj6p8DIeFmGefh#KQA'@SL;>h.Gginn.j+Mb>EXl)Xsb[NNF"Bltp.U=0mq?THM0*nHSblADA&aHj#s#Tf13Jt4@!6NKoK(d5.TLh)9Zneh4)7i=N;Ns?BA5kc"=Hc7gUeX 9spSSBdPO(,JX%a]"Z_fAJ!:"Ds9frG$$lIrib./&@O%f(/`P6E`3T@e?Tgo]rbc*+5IM:H8"GN"j"Ti#37eB@"h44>+B"gCU)eXFRsTHp[V/&a QO:W<^A$_:tU<b4#) Vc[$+>[/0"2>T,YTRNY Sn_@0#m\3"cqBtQXY'?Ial2BW6nhIA\cUc=c B^!d<XT^L*`eK"'/$fD(;\=]/OUWNB>^W9^_-2_/qD&1#nst7C-&W03D>ah.%AU2(Je.AB.e#GZFFZRP_GPM#E dNaY 0VP1D8d.hL-%'UnXRtLI@YE"@q"QSJ5*<S6HM31gL;leT&,%CIVCA<0o!l%V+.>.<l_^.0p59j-c,/D(PGX];F20MbqZS/71H10Nb jUpp<,XgB: 3]?!;]nJs]66fbIH^W@Q'>1Z&LKison[,3VDs[^MEaS]Sn[35_.]M@DNso)?sOc94@$QJ&533m.$k%T[B?%5:!;D95dCA<g8R\ U\YU67Z/O)%6r\WpY?+3RN+$c] 91:cqN'9s[U?I3^bo3"Df-2+&Zp*=A$O3[kn#a>t"f8(M/,$_8@4Ng[TBACtPE600l;0IMBA`e;JD.6+d_!,C7X[3Ap5S+JkPT-T>@p*HfGgC8G8l$Gt&;j qFHG47U5rP;'A(8RiEp@-"gq85#djA?-M8U8l`BjW*7"o#3'\$dI"Da38TRHP=P=b^Wq0LaaA]%K'8Ykc<M'RE1.ZAK=cGBNZ5Q-ehe`7C/AZIBB565C!l^AsDa-_H!W:e9"//Q23bDZ<7<N;/8,s5\C;]dA!A6*5F]!R1^_(jVVN>HJZ0>RVokpZ#)k4 $Ta( E*Tc`dt_L%h2PhE.5Al1Qj%)HdAUd<e1Cdhid2]iBZ_)Y&3(Vs6!.kH\E?X;EloK*@peE%tn(n-8YnqG ]c3_ 7];d4;IWQ8\2NQDZ:TGXQA<7n,eM!=OH6N6Zd+`j&PR_??4^pH/Hk]n_*<!#RjQ_A((2N9Kq>@.S4R8'+U'p172,a1$&m'*N+8iMb:A&]<tDCi8/#cSI\a9V4ai5LalmGl^ogSk@o2VRqGG\[8,@&4;kt2O2%qgLmo=D q;O )14-B0dlr>JN+-=Ad,"N$NbM?d9GjD's27\E<@s&bG1?me7&qFJjE?g)-^fUf(SoF1K_\<l-"OTQH`-+4UJ_n`D=m8nTN_t'3(Rm!*A@p;=5oH"qq4B[O;qM]t1UF#Bpla1KBSX;K;<?]9ce_eYJ;\1'*I&lp.jHTAeLc9tI1g/cQf4D_mTP2_lsaRnesN;UcM%GQS/Ctest/sun/net/www/http/ChunkedInputStream/SCCS/s.test.txtaA@;pe?>W!jN@`lf3h*-Ud%8Ua!AkXGOL%U]+4Ol'!5J)M_!`X0/T1,N^(.SC77FpSiE_rZ^N*g5Ul:XIks/gDdZs70+kKtZ/sdg(l6mE"akc.1'.6) OI%<l`Ea/YCkJe%/+MrAndHi4!CXC>Xoka[%<;iI!q<$n0[X\X)sJ>:.4trHQF,< =UMci)!lr1H'8bAc_m\8W7C0[fP`^sZU0W?$$.%XAB5G<%JTq#0L7OC#F"3Lb#eC?XB6:[2%PM;S+e6!8p[lA*QK7Ci6E)FZNRk'42FmIK"7$A NL!DG0!ih`'L'-@jH*PF0+^e$4336K>LoV!'YRt+!IpKM5o7&]JRKrL  qaLj\W`;_NT >)t?;[?')A\^oa518(VaT5ifW7i2_DFA=.XE-R%Y+TWiZAQ_**DS@<6]AtYN3qs=;"t+ni@$pS;*K)6BAta'lcX6kMF2Z:H:<sDr!kI2UsDG8"e&>].7)+*EOFhSH4S;R7:QU`N>c.S9B*o6^F01lSo\/NX3nd:7>9Nn0! 1Jm7<hl`RU;.M:_j%0I8 UQ"L<++5*<OgN2i&ckKCBMBP`(i<rapC'm;/dces:fHsZ"pn;n_R]e!FDA\Y_1CZ_j8_ONkb3AJGAO1)elqE_ZPp8E0Cn!dGq#hqH[gf:t1=al_H#OX.BS0)`te92e0:.1m[nN`/1Fd#c3'6J(7eCf8g^$ttjOKk'fiV?&mR%jHYc[teB#73Gp JFp<a,K3._$NBH8i&<?8dRR^?jh*$3?S'.kmCi_N`4'_gLkJ/Ag#gl&3L<i('OhGXmgsC*?$7*EJN('mD2LM27N9geZ[-nAb\&:rGJ;*+h6C!tp$Ys("dEZUT3T.79sh"%Loo[c3ajg1SJ1NVnZsd/ HoFS(eq+\/bI.&ftd*gd[,#:;`Z%<M.:4&l1&aY2?'_A90V^ahK8W1P!*CtA:JQ$L417:&]g;nnfo\`de080A!dBWZ%OZa^1-@&Vf=(4OSm'<XVgL&OUi[&ci(R'<G0iX&V3.l.a+D]'p,cJ8El/8A%_8o)#3G#&F88V1WZ-)=m.D<CGCrNMG[CB,m2<\1%6BV10q46LoQ+8`LBQi[_sP]5@cf"=/3]]0$&+3q0pF([9(%bA#mm]fR%"$,Y$T%JR4T]^p=&e5r'<H"ra,j5T W$opW:i<GJ)^6s2'oE-W'fiPn`aIU*@G&KSbM*NKAeFjf$I83:X)tL*6\4(QjqO"F[akAH];)DO&7rETrNsR_MRBhNM>c8X$<5`jQ9,r;qNsognA[a*D2sp5:J<P#?=K<pO3XA>lS@D QI+phUn!m(;kKD,2k+b`Wh\M3+1TY(, mAs;!2;NTN.U^ /L2%D^'L[QT2Q[;4OJqK]Ck7E[o2IM7CXBLW*m1?#s$* H"MR[3R:[/.gC\[T!`2h0oWM]*&K)&Z,Kh(JB1&.eHq!N[M[AA$"Ee"IC1\:j7:DPn/h):XZM\OPG)o],.9]NghLT$DA#5Cl+I# V%t[!l9GmT;)8AOiFEXoL^(2'AVZMN#>0;9L?,JF(gifpJ_*pOHQ3o0:dC>aAU=kJnrVq_:;m5r9BJ5Y)df/`3nlE-6e:^i!!7@!b&&cK24pdA]mdr(%hi@YfJD1YJZ(Ddrj08G:J$iR%o\o@4aVPjB0h59$ek;]W)hK1B1-(^=.gD^<1Hgl8LmF9M*e:B5`Zf9!)IH%8(l +VXME\Bb>IRe8JEQ) XNm9A8j5.Doi^+%c&l;N$LXq4]U/fkm$0$;r.q7aX3)I\b[`RhVg\&@]Q.n/>g 70`Ugc7pRQ*LplB&QOro5!!g@8HMZB7@,8RtoC01JED6o5:l_/e'Q(7hal>EiWOh)`KnQh+h?5r912!?'r5'b+(Y,[Q'#='!H\( _`CBR/d;`6AUDN:%d)@n`Ag[,#mPGYWrZ]7?(NRG'Cl9$oCS@^WW8fL#po4`[U7&;&#MQ4l9T.Pr902kA!LQ<N+o4/=.N[=@dpWJXQ(U8 ks)70(nRiF&HE<1<[YU9Ef/ 4+W:0o4]OUs%@tWD!+Xe`%CE:J9;As:KGch+ORBct/"^#oRJiU(*iYS`%]_M;L#8f$N!*,i4DD3ed'P_]VAg4 oQ8QeV.h4t(=':Oc,JYWD\Y"?f1ML^)$oh@k[O;AjZ:Qp[\/LKUH9M1?N`/J"?Kih(RN7J.$.]Arg5n;_M8;>!A026pHaLYmq2=C"A%pr1rIoJN:ZnX5ejVhokNR_#X;5h&]m&_<H) f,#*!tAX1%mX]W(\%!('#QAFpP/nAi7U8Z$"3i:s"2\a-f r7D])pAqsO,JJfT9PM0f$`96Vce6jT_k(<Lj#$$=G\U3\gS1A.9=K=#\+U.;SY)>Ckkb39/GjZkTm)8VkLZHmj5%UJ#G2ngHtb([`WTN$Q+33HbF]qfKqUmRZ3H$m9l6*G06>+h g^<fc0`X7Y_\aNE]]X!`\pTN9=t:br\LS+kiB7QJV=NV[+E8H<,n\B,*N4d+@2ne+A@ )fq4\\NZcMne*\a%6hFY='!%ZOcneid>`NWdnW*Cg4kkkteOf*aOTRrN-:\e#%2m^87^1[J9T\#&ASQI9[O6gha2]jqa Id?:@^8]ANc>@*G&.(W!%J0?U\FJIrV&_CKVR))Gs@H=0I4<i-`B<OGk)K6L*"Z;MYqA+n5,0Sb@P5VLoGcM+/%\_c%-+B9,@+pa%R\/GLC2 _'cH/oj3lAP8lDp2?C.X2.9Vh05c@K8m3/:%$XI%i.1jr`8(rRLm<KK9C=5>f]-MhcM';9"l;L>dm-)P9fUJU@G*`SE?AVGn>AkA &gtCe`Q_-Y`I@AZtkJq!oD\aIZ5H(X;g<O5U]Z\hpOjksfb aKpH>9K2fds^;SA%_J"<dnF._00$)+VG:IR:rUNYf*?e\0*KBfR2beA&JnC7n^(b[*g'ZV^Ah70:!+*UO?pAPZJ A82GEHfAkI%f=5*?`,-<Z+*@k]F3=-<O4lmie(C\:#n-OX#+-l3ENgb3W=88A88&IhM?H<5#5:r?r2f8+WqQ?B@*rUV=DW;<ARrQ\Y)TCX(j,rPW8T) PW0_h.b:,&>Eo*"F+F41H,KleCP717"L69Z%5(Ah*j%:q?HH.maa@to2VqoQIm!5CoXk&"Akk>%/['&bP\X']n-A.)q$1OA_9C4AqpW%f`U-PrC< M %6DqGB<QWm7X@O!flCOt_B;2lmK!KgjG*'Z`_.:6_9k:-"ZV?+KZfg$<EYIV<'s"&N5<m6ok"iFNPZ3WJA1AJ*A]#Rj&pC=hQ*?2PX7Og1<B7_>\_bYLtdAd)L$+4'd+6K/.:ANU.5nk<5$9fSM+,PIESS]a'Fn"=*qg5PO-=bCW7EhL6/1.=hgF.JkE_GPMLs7/2WhW#ZYGLY\Y 'UTr;M7Z[\e:JIEAEjr nisP*ZAD%%X5<U^UZAQthgV]=YG:R>bLSMC`_0Vg>hbD]l@8Wj,;CX(K&3XT%tFR<>F)'q*#R[bcS7Rke'E>_6IV[g83]4@QT,&,. ^7K>nL=N'[,Ka`2s4gerT?c1>+%+T9E6_""$NjT-]_A:C,\O!<H95nQQG>N)oLJN+cIh>7M9P;I8f7L:FJ`h[$88Z#".tae"1(Kk12CWL:mUi@[/R^6[Fl?`m89[Q'7MIKRtbVtq?jZOWADLb)5LfY9NDQlo[I'jbtm>1#s;n'F)J\B&X$T!9sH@4@gBL"KZZ3lpNV*pm%E,i]i5PQY]K[8M8A02L]4.P_8!6oBbT1e4U9s+LmG9BX1a1SoTJ!RArs-M+4oHkM kmbd?dm^IE$`>fCid0I^j_FA6VAr11+Grl$h=>6ptU>V.D$-Go<;AF@[>3j)B^`8scNE>>Mn-B>,d"H6>3]6_[`pbI Tli@StiR;]&^BmL(.O&<D$)U'*n7+D]7<Q :G`sMgokVOLI[nq#W 6D4.1%<PE/@nAYlgm& 3hAt_NHU&NVZefQREB6s),R:I% d_;.TZJdm3T@A<^1 >ll 7T:H4n21BRe?`b,Y9@G=BC`3%/f8^#S]K,AJ]Cd>%tA?@JhO+3jjb6mg.[lFKH_N54JI^7n+.R9()PAAj',,kea\.,*heik"i@KE_Wd,=KmhG]")2:_[JOp,G[C]tGs-d=!_7L_p<MA5,rL]jb*?p1cq:S$NLHAn)eMOLqiI\PpeUhSl(G^-4H`WVCG<7]Vh4i!U_Z>b\'kZf$1I'2"0NlS5XXH\hW0+tL6tIa8gWqDQa6SWil*jk4V31KfU#h!L-(2kYC=6RZSMKj.M&W44@Wikh*,8+A/k=b8h5/*O^Ei T.X=b?\bmFe4p =Y\%/5Ad:(39>AWP#ASmINmgl"gP8&^icBi;B?3,qR,UdC*COaV^0r9^j'J/g1'^k gr`W+8\reRBgta9P,Tao#^njl!.aR"nT:YQMp[+;sLi9m?"h, J.7VpH.?LB&'75J%9JoR&iS@9[rK=UldXCCraQCD_O("f>ahCTF/-Pq,e*QC7HO%sAF9-@XEV5QAft?fZ+]j6If9?D/*Xq,>H]8qjGaZ,4coEai+qPAnrsoL5!OCPl#OQ2Qj9SNTT5r=@-ghrL#+Z?)f9Cn\^+VT4['%"0Apm \ajn%s;She/E2cGUe6S7HifFtC\C'f$Drqhgn6BknpXP`&%lOi=t-aD\j.X/SAW)7AIZG&UWi_(%A! I9pLp8hgsS>;Q;rV5oG9dLDI1=[E9K19`__]/68q4[nWZ+J3Y0X Xop\8bDX.*#el,[HVfpa#`@JL-`dp)[Tk#.:sZG[J>9<e\W=5V,lnp4i,<g_cmqV2O;)W5R`64U^=Ad`f>P$:LU$DZ--9Sk;"PMja,,b+r*i@qMY*07mWpi@@c*@ G'%setr`Oo i$*<)K6UY/`X<tP2qM,;.An2aS6=$B86<"3\3!731(Y<#tL*X0[B+#BEn%F6lHP7/K$p6 bH.PBt=V"Hj!-T_ 6@Z=drG0j5djnd'BD,"*5],!!W[4epUO)E7\jdeYUbPAVq'tY<R:EjX0/EnMLIUffM0"TB^$XPVSGNnU5K9XJs-4JnEsr`@4-7931N%+\gAh*>\3./4W,<S/RRGEA5o)#Z6MSgW<0TQ&V7f;*dNpn01YprAAPoFH=&1Ta5([9[Q8#D)+Xq)AAqE+V(Lto<RfAJ,e4/"RCUgIcY&g,:IiCP&i]?p>cg])P.^>ENU4ZbY,p08=b9KW;06emM0r2D]rk7*eq>]UYAQC)68bl4>opCE\@Cia?Yjh'DNB5e!*%JLf=/:.eGj+P4?DC_Om=IFNsJ`Q,R`E9*te+:d6sR8O$k?=rhKA_+]7Z]$]6aNKc[mmE,:FtHn"Z[SU`r.E6a;'n` .6L1QO(!JhXaMIO@+Qsq)'+DD-5?B4d$!Oqb!;,%/c=+I5tW,Oj<^a?%TD:r%f_9U]1t5js[JC>f&O_cV"rIgiJs^,mD?<?a#[6G(U4b@q`T#6eJ6.Ab!M3#lmO?hsEZL*(dkZ]c2=tLk(,oE6jHZt&N=UMKf'-,$$)6 #7**I>g(;(_GAV)i\M)[#kW''Ur!O*kP57AnnHY7[47,R[&9SbcXW*BVr;=mr8+_U,nUr0I_r^@ br^=pAgHqka3BEOph67]1U#"j3567IZTC*`oGdO21Q3l/V:m7$KdVE;bV28[Z#G.F^&cISNhZ82.emT>oY/^rs.//X=<$%]BB"528)tTn4ETK _5C_IZm;"&."APR@,;=JGk6"o.[?<%Tq4S+9WO,Y:aB#0J<DfKk(5b#5sT<s>LjW0#Sm(\nr`o/sP3Ql%&%j&<>doic^smX8+27 '4\1M h_0q6)99eo^tAm^GH!C'QF`e5r:ns#*bFm7-:1 ](B`E2trob<6e`^FeI\XUA9kMIt0'M5)T_IoX5pN`2C*E#\2AcR-Ns<Np*/%X WAd`q_JP_gEEM/1(Do<C8XgY#Dp<Xm/%LQ"81F5=N2ip?Wgm>d+ pqo5=;0JsKDQh+'kN'%;"o%:ChHIN$R(KD5)A7Jc[eHBs'!C1>3m(-fGVYj$ZiX/Q7HArA'Mn;8r.W]i6m8/f,>4>XB@MJ!Io(o'T8A@s(?h`KbIGDOkMW_XCee:lL;\#@1I:YO8,p&dMm]#h,*B[o;-E%qUt_O00%l[_BkDt,Wkl8g\6@:S#a302_O>dZ>*>8)Sat@-\/Z)ds3.<cK]r=m?rMJRU//=EbSR$cY=SD2([/I/X\8,[noB[3;ors H$FV/;4C":NeS_0Q=R$^cai6:+r^/&Yl?BQV,?edHXV]5IkLG/CFp[19UpnD9sA=ALs3&8WJH-Y*r>t6]-a:eLO/0_0tPT-Yen9(*eGfRT@F")SfYnV,$?L>%q,A7NT=/k'oh.kI9f.4kQK S(%_X%`:"%g0/]-iEm&[U\ARk8N+U3bl4(Z]G$9_d'hbSoK#bYreD-cQ"]W1T<lE#l1]>26F@^WsI QV,cpI.<WPTP<L0AWn,X.8N826l+S`=o^_M"<<ALZA/7J`TA+CmW%=Hpp#G.LQQ7,-+d\X_;XKsIh:V;?^EJ[CgDO=W^d5[HJX^sf<MV&9^EGckgbcj>iGi0/^_H5W^PS<eaIa+!`I#',0+8EG_45qFL6`VqR<?0'<+9K#6@b1iV5lT=n0;")M_As1G#6s;+SNtAoM"@po5<'RV))KZnbo#=h7%Kt1C9glfp7QdoYlP#O5<ne4nA`qaYG(p6W#-<'koFtAo$aZ/iWiDlbLFj)hl57(eVgAqV:r\VX@J7?Q2dK#4JmaK6e[AKpS\:Yc=Wm=Qc*a+n=HP$8Rca8V4""'\Xqk]Z-(ER'AQhAgs'qL4b#R4#dfJ9ZI[Ml^+ V5Sgj&!)!e"R'qLoig$a+22Xg$%Ha]aIbfPgUCH/Of>etF:UY-Q4GMPIg:1N-Ol;P#iQo3i4CE.]eYi!b.HPqP8?8rqMK&)%ltAtt7I[2Rc,.F3()Il`)9B[FH0NMiF"imOiBAAQm??^25+I6r6eonS@9&nM5t#5HMGTm&V?EMn(#^6B(X%X_4*o&["<gpegC;NPVl<q!n^mh9(G;!9A44i$aG B8._?n.9Kmt_Zj3VmR2>iS<lhfLWQ"r\(S.,[FedA P[:eI.[7%1=F9feY+/-pqX451L:lO'FDF99QQr3 N?7)e8n=JR*B9fd'CSUR],Qm`e@9#bgTe$TNn;aV6#'!Fl?D%QIa&S7'dg,JOJDlp@ZBl;Q#_-Tg";T. AT#NRW;.50QJIm9aZglf5YlG+?*"h7hZ)^Mp $`9&<2X_i)"snl(JEW0[4)D9A*/O].#e!*V`YXIe<;[*W !UY6+;+gMZ*Va^Ik&Q;_`Tj+S9S2b86$*Na;?ial(-5HWQA'T;rp/kbE/#9>mAC77R<pmk,dTJbl*l+&Dt4*$5l+L5Kd@W=@@r)"YYeK)RQhDhAWtU;jBHjg<n+XE(-4#rZ/G*/Ykp\nE;kb7JZ WdR86< 8.V=_O<HcE8cdW!\\&VFIM=K9)ZJ37I<2SNB,K`&9FOIdcZR6/@-Ykdb-hXdSi,5[#gb(t6Nc>7$h<(k9T(XHbMA7/&C:WHXsLIsj:_JJ*Pjpe$sh@tIp:F*]"#H8KFR=QiEorEFC2VZhO'(\4^C9qN<UQIrXifg19elc4/Ebq;8^U_o9Xp2_f(SMKRC<8Y]#)eI':0-CiAtjdVV#kZ%!3Ja3UftM%_[FWt_ =*X7()b^A7B]M]dog.cq;b56+MLVUB,fO0<,>Qh,rjGm8[JAmGAC0V@=G;mMLn%Va\ZK>niK0tYo9E7\)kK\U<#6(oX1Ngcf?Mo;dB`lpDI9fVs2>#Hf>LXnq7PtYp[8>:NaCgFI+Z@#As?kokS9%.00\jN"UP+F9*5dQ:ph\)b]Lr*Z:E)bkl q8k1L3mKlc\70&ZNfp7Xq&3]dNg@[T3&/p,id;XI54:&`jOVh]U"Gd4Y"gq5)\n4f!TN!A#lCG:4NV< ,%p`b<-4q\dt^n8b&lpgM+dc/HXAsPgFABAakW!5h38tHVne0prOK@GV+Dh;.A1#;WF5Pb &)A`Has6C#0[TASJW&\8JYpDTHK3_DE1RTlPjRrcm;G&I88C2CrFY'Lf;8c&sr&pBrTSsN-&LTj%\-ct[k^\Ro8JPP% J;Pg]p&8C7`CRLg:cd+T=L*p9VD+'[R$EPM"iniN=s(57WT0eU$K:l,;rATGAKKE39d=`!N-VEA&5[b!MH?;.kKP"XrkJKa-\kZ[SLD5VO,Z+J_lNp@2\HE6^^NO%l%+D0^c9>j6D<\'V;;6[8hd4)2r@\">!pAjXRrKW_:hPT*039!&)D^slWa8AVt35M?6^?8)XTqI*9O9$SGsX!cbr@t!.FSnIGf"_TA(FO)_DGr?Cm"*'hhOkF)C2fiNoDP8YCIkA!l0gOG8bCXsn.F'3=9#(A#r@K@Uq51k=@_,Qmf4;^p7XqnGH=0h#]p](HJ?Xj[>r>?U-4ZH_iR3c;LenPcIF/4\*.ORidj\:Amc]oJ.XFN-<es"F#Q3IA9>aNP")GnN[r.A9qFK;Uc;f<$%X>U+eU]\#pj=Z hqWM:MTq$/&N;eS&<8TLd:&Ab*r$I`^Tjo9Vr=O)->.eWoX67"jKGNfFA%?q-'EK=8g'_3k:PMOEKE66cTU99+#(D PREH#Xer)2ZO_:4H:'/d Hp!I)nqOrglfos*[lJ#rR&]ShTa)$W;ZKenVk`)S[8@?B*'3dHLh$9&h*XmqI]Vk#@AZ"RV8n(tC#LlbRUJ?(hjnf)ZtlUY[ sgHsHKC%1nREJFq+@D.nb4aA!9pS?c$#$-DA*6GQ568LTrtQOT79!lk2d F<V \=VL),,A*r7Z`\M8Tt-h6b"kK0iYt<3jNM1@,16$2'fZS#6XF?)@U 0paO$6SGHL?aq:nOf1 F=\]]8HI1qMI^MVRTRK8:rAMciqAYgWp-;V<*eim;N=!c,ICQf.5jD(U*Q"s9mbb +kppVO`fBYCr%5DOEt?.=8dQ,^JA)po"`]Aoc&dVOHI"D[2ZQ&4LUl:-OA-LG.k^Yf0cfJJP)jBF]0;QV_c\^J[AbN6.t,*Ya"nlm,m%$:i(LURoO=R7C=*pS=!=:El3k0PA:!Q2YLAiNd3OhDTX>Po5@"s!WY7q`=s1(Q^S90M#2I/3pHgkr=3IlK% 1r\_'N\1cEU!;668C<oAKN*1JToC/'IakSWG2lc`DA5E ^eL6(#RcT3"c,Jo5EJ&(9hWoq.];E;6#Hh\l5*es\7r5h`O*&,VFX>"1-W-JJErl:VNKbji+2SUDcdH0\3kh>c26a1h#6$8AV3dCAk<5EbS1Z 4`1CE7a7ELnOsLFXTVF9S =N$7b4;a&,7cUY2hr*P]nX06GrASL`<\03eZ*[lG`_k`i);at3041D"Q$`9L]YVZ)Im)IX<\62,DmYMR\HS"t?]o1:NBtH=[:>b#K&Y,$Qa /sl][0 &?pbqX>nU?)1#?jkC0)^$T;OIEGAQ6RT>Z2+')DgKA232na)2586_o1=A>?mBk.9\J@isKbA6//kpEdDbA]pCoK$f!%#?`e;Y-;d[<`&+=XS\&VAoaFZWlAJ#N\)4E=0tMCMKXk$6 Qd<#5@rNCh%7bQeAke(c)Vi0bEG/"8`maPqA!RC6D1 ZH8ME@A_[flBH_CGObF\AAL];j<a$\sCpRQXhFHJ=+IQ'Ap`NnY?@dpaXiO6'V'9=]XA:A^a@dD&9$;GmjRT <5BcG=72X#SW_-jm8NZo *bf$HDJT<F99b[a`]5`Aqt$%lr0[-IGP1m<L1tElo@J567#/\[)q9J!$q0RcS:48tfhg\?8I9B)bd)qqZo-;O$?! GR6O X7E[OqO++"X2oFnYr5AZEFqSNi!MAAJ4>_@Wkf?hM25%P6&fg3/0&ZQYE4(ABStJ'qKG&=YQ"A=esLc<W!hKDoiHFYa3?9]",+Q'qDf\n'qF1*Y3q%.C;,es+'SXd^dCto,X'<ZEel$Q1kos$a(n$G\Iq[].13;`PA4'qEA?X8o !Nn0!e5D<`m&GpQf?AL%/iV\WZtgl4jrC=(5eOQ(TAS9rj?5%,]j3>a`BO5b\!,Cs%U5WTM@Xe@59in0#Wnl2CGU/f(I.nnh8d9nL:.ha],dL%!O&(;Nq5MR]r<pR\#Sld##DF;Z[sJ9eGCe1\I&V&qCqEh8h*ttJpT5X2eJEi(Kf;)JQ`%AWI4g/9.*=?H?2@3-:$BNSWXGXdX1)$-fNk/7q72%GgVXc?7.%-7` Cg2ksnoVOp#K0K1'N*1Die>N:/Z;ga-Rd_JjKB)*`_4T'2AY=IC:d./8@WSU1) [8s3E[W#gm#Y]g4;[r>XD`LY)lAF4[TCgf$-_4' 9MH"I=N+!+g4q:;4m9WI.n/0dm?!6mW,,C==\O0^I"PPLZq2b^J$q?gQG-16I<rW'6]$dII283 UUW7V6D'[Vc)la!]N:Ym*K'^0$U]RH_N,U:]fChQQ;\"8\A.Z"\1-!mP#3P"6TY3:Q]6Dr:&6q:N9lYU2$QEtY]fR3aCR5P^#hO4:X#--91cW8Z96HVZ$A8P'm9p,8-2_R-Wsh2Ckf@4+@Ys(UoK@*CE(U":0=1OA_.Rqn<taYeNB30THNp#c-7\ZZSdGK>WAdHt5a>4J/_ls R#6\J5$X]o%<%'"3 `A*]=0>+@dhpC!UWb>_+;@E;ka<HZKR8T*,"<7J060U">\a h9Y9M"7Z_IET>)L k$d<W#+Y-)D,lI'Yp;_ACgMEQmhQWtn!$)&9m(F4VZDP0nWM,;dqgGt,rFM!A[#JCK!gfSE(h1im]f9lW7RVa<)(th@re]`/:Cgs>bAaL9]AM>As=oO(Pc-D5@p#<'NM[JQqZT[P=BLtscn*t6C+EYZ_fh0QSCk$LI0(a&`:jtpm6^%=L?8?]\F"P"C](I*(^fSr'$[gatV&o:&k?m^:PMi.f?//`>jOF%`N/ 7-7N>d(R5l"q8=<L2b=?Em0OkSR.3"\rG:o50,sp0GhRrS$)e%Af1^11B)^ER^@mjZA2%`/T]0Yq<A8`;XtM[+[,\S8"[\RLH/tc[<%6c mrAN"CkL=bVE(M';(5.Y&76diNr<PDD1ik*]7sprI) ] Pa /l.@O0Bt9-G'N[I(QGYdRt3&Gs<QgCJQ'DU\OZ$[8NVro_<T"8dt=Ba&\"MjJT[&ZMZ@'7 sQ.bMcf0trcH],A _r^A$p/*7\mZP"*id I>D^C0(-M&tddJCYL:UdMj:pZPmi$_;/0cb7H/1?AlmR(S62#F"e,&CpSQO[m9[-,&dal@DWZ.7+H!;etAsatJpZg,i*okI,S;8Ef`rj$F-N\'%=DM\;$jk (Wg7!2531OQ;Sj$^j^m0jU%(l]j-e$RGirG"po^71[Ie9h+(OLAU9a D4e/HbsAn<P,5@RG'M:#g/eVa@<@L0,(7)X7<sVT&,R!0V*3c%Z8tkS._Io(Cp^)Tah`r^6FdQ T8p3fXjBQ82bbqh8FNC`L[i:O8CX#c7&NA5p%Ph7Fj=Jg4eTV4_8^fBm.:L7Cqqb^>e.d%SN;;goAQ1_`SV 'N*3#kBs 'TfNp`_JsMp$VYrcpIG9C+pV7[j4d,5N^\EK.q1ff!AU9AQ$44B8T6Hg=T-E%"VjgbM%dq d@13`UgbS?#6I$pdl=Qb"p1;A-Q]0Ro]bA "k&5G(8>0`#6)JtrC;_hdrU@CBBrb'%$C=ohkmZ0Q[\1a4A'8l)nA&o0#<TM1aXEGm9.%BqO^dP(H&?q%?'FKQ&Y0g];8,Y-5c"f<1V.t(%#H:;ek!imGA"`(k8+.ak^_CUl(T(eZ@^;sf0!saTpj)5peDp?ZE8?S>n'=3!iZCh-<:f'ZOb0<f&)cZr,!/r-]^I<cQd7FfbkOOQcmX@p`^^4;%OKtX<pLK 7s*r3W)kWFDn^J sjIQ4/^Zj=&7&Z:\&+\^%4-5bD^siO^5:&F9d'I[im#fMPDHhVJJTpJ/sAabPtV85*Qt2`_!2GePMb.g7E+`D6)]:dFAYm)>;C&mAF\[7(f7=KfAhpjlM7RT)%=jrK%C&I8J2&E8,+c"mag3<rUK6+FR@M'=@L5g.t#$k9Agjd^(-I@C7Z'!e!''k`[Nl'km5P;;#i"OfWWtNVPKpZ+6QS%\3l;Ot;d+NM,=h#Nn:&]UFP2n0_>"re'""-#rrA?O _0G;KM2gQo`HheqqRGAmAm7"I3r2WYc'eeioojXT"V!hZ3`D\Etrgo-O,VaX8UU_#7GiCgsXV%-PK1(JSm7R'&eN$kQ "#kUArkU4lS_VQ;rVsiQ &@A`csf1E`*+EjNC59F[V\'o0$#Rhbs)><^/Q^/bqL3Z3k. [Umki)clEnQN\. s]S+8ZF0?MBlAqgC\[o7F(1WQ CkHB@F>BU2f2Z>AZ694KkP4q;tkSKaoF\RJcB(+-92"7=6Q_*$'[h>e9W-2,'"E#:D .TqXm'"HeM?]3-jc*(T *QET<^]bb3m-P Y#-\Ql'WL\X5r: H_BjbLsb9a4N"Eb/Em)"hc2XLIKrp.QIXGDL.n*X5A6:Iob=-6k;SAQ_N'>e#C<DAeL=8_GZQTE*brM,-o&FALgF@G^i+HRLB)A=H,.(U5tW_"F`>\G?BtOAd%r'H8aX7%5C.5iR.`,Ng0CEKL6_:=UQ7F])d6r(Vo,^O6[I. IJC(Z$kS-sBWMT(mRG*::NS0$Oo!1O<e!7hE:R9hJY6IE?as"^YBI#/Cp'9YGW`V*c>`IO,&)k%lQeG4&oQ*2\aPM8]Fka(/AH508dooZB@\\I\5;iGUNH^;S]NpcJZ>j!EF>NGe<i!2Fr&"T\cMiC@K-h2\g^pTTPl%X3Q#2%t7H/cBGACb/q%>rU>On@+&d"5;;Lt2A=N;$S67l*ZT@Q%7A&t&c[&04&C]e3M;+!:k&ZpA`B!oT$CJ:IJ@Zd>ki/!2_-9/'mki_alD bhQB,nUVA#7dW^8EB8?R[o.<B^,*,^7%lf.LEp-pHD>T(A5E-?80 DSoTFEi0<plbV]/C]#gTHPoP3.+5kF/KeAOUeW>4?%h1h$HL$L-L=nTrHlLAj;q^A;lL1E\XL'p/B5/@9orORMon>Gmd+6B8_r='+MABWfsqL:ipbmcc[lY!A#^f;AS\(\;inAR$^FTn<6%'<rGA KY]PqQ(B CG8km(^4*FaI#9/AX5dqWkn:)DM5eNn.iLnl>P3dckS&]nV/q-mpOIgF-6\kriesO&TM;<HBX.]iUlX'D^.Yn,HQRL08N9l7)9F$,hop&[:;8e<`k4."P?Y,l8C$Ql"hCsEa-8;e!n':))D_+qCBYZPDk0hci5ZoNKr$t5=*U[p2gr^\Fo[R.]T;U7V7''2&N*3KSFA:`3NcmBr %A"%4?AACV9YH`H1p]3S]Xk%npQMk/4%)t= "rRRH@o)9g_Akhh>B9hX.[d[>qbd-J[Bmfbd_/j(OVKX83N8Df"4JX^:CH`\fT4,')p$T#0XGfAqY`i]/rm7"PNQ[e6J1OD#LH$QYMMqH1%QL.[tW $etq=r$F=YA^L>DNj4Y.?_C?+2WppMb^:j!.=gl1/<Pc"V+0mjfYF_<m!0t\N`k[kSTP:Aha n/<3#"2[Wi'^?.r:I@!Y8P< 7seCS0OBZ9](A5?s)Gal-3UpJQ**nj.`b^1O/f:*HG1DoD?:36F:*D0U];^Eg+ PR(D)['@Y;0QT4L>:?pBa6#tZltcS)R58;MR09A#p!Rm%RDaTT^"o@-C@*%<;)q%/4.W)nB7^SrWMB3l>$3`UfoUkbgRW>FGK$M7c:gV6G4cWcj(+&BXnc]6Tp+QP%.RW^'-d3Pq! U%l'j26D41c H*(gS3NiQIsa]$Ion p8H3nr*>A^qpl.W<mL*:J.4RE@\g`HXc)go QcYk.T"QRlV9XA@N"ei l=HN</:l,m>[6)\-o+RU]Ydn%s.ON3@Ws`O1<TUEH]Vc2erbp.GYs"B'0K4__8YpA=E6+R1Z.Ad27;$!T_L\#7d%;4c-5T:D"!4h7koX+m2,teAq/H<]bBH&@.'hi.g8J[?o1E.T ^$::r:t&V'oE`)F$;jbOnA24MY2Q59ms5qYI`Ai`P&]'V]fhmi6KT&ZO:1P&eeUZEK+ 9\-00jsAegg\+0;E+F)_A*ptF,?,/dVZ'FW>*qj/B5AVF4W/-1!ib\(B9@a^gIpW;e96\E*d0'bmpb]\1)22,D(jn1<Qme[^])fZCeD>X?ZEglE[U[9;%D^n7Ad_P7)iAXj@tOFI[@4=>>%[g87r8T3+Pt<)4)5Hh>ODI>'?5@APJcV&bXH*AY`<T$(Rs)a.+K"EHpJ^-Q=1pk mj%](g\9<>#L<4`0ef;N?<EECbn]Ub`W5%HA?):HA+_+cRP7#fimbL\e5SsnXN'Md*m&1#X%1?d+D6pNE`J21s*Son<O)i'jcnCKA$("p+%ZBL3-+:L=1Uhi$oP60JN)T@Q<AM7]Tss+q_V.d+GjYG@[9+D!G+`jS6_sHq]M-acLOb3+*!_gqp;\5Yo/P"joMkrZh)L5fA0W-_^Q"&^lDi:aWef?+d2p>aC1 P*76q!4M&T3T-/A$njY]p"KSCla\=(UAaWKAi@>iViK+" `]F-EJk2b&_:6OZo9rJ^!tWW/OFNC<Y?I1bK5o1N1QX8]iAM>N<3lF ES=MTpc<kpe1G32)[*p`'At?J> S-AnSDq0'Qf2,.)r5q!MQd03c8g=VCR:)CEe[2HVNg*7p'+Bp3^i#3HjDQ(7i#!!XSAmt 4P-K;,!k9iJ ijHq4k]LZ %KNOA+kAN/^X1fJ"2I,0#<1mlMX59[XUg7ZEm,$GRW\L%LW>Go/qCES.A-0D/fBHd3R("7&n5[Vi;&q)4Rt)js+YEJg^0 &\V9A"KD&8r6t-@=$>! (2aNc:5+I02R3qS9Y n/FBP%$;&T8Tc]_X6p7(;*D)m5<#b Noia*7V>Wha)9/Z2i VW;S"8h=LsYpW;%_.iM3=#g`L][H1rEKIPHVV->?):`8!*Kr2<f)Js4rEP9p>Tl?hq!LQ;\4=rAfL*Aa"RgH1_lR&S?F5HAjP&T0PIg4XLAYRAh [a1Q+lkSA [("*1BWR+$W+i&G9q1][oPdi\' V_1[kqL0K3ID 3h.MCX@V<8Q3PDP4K@=]ob7A)cI*6YOkn\"R=o2@2rXaZ=h&&8SLr'2?8ht `2Ygj?Td,lsoe`b9c8hMpQX&,:o=A``PAX_W;(+4)UmgC.k%k1`?HfBg(.l4%r2 b;s)4q=/kU07;*Z!qJ/3_n$j9"=@p<([2LfI7M`##'[*meTIHG%AZCXWg\6SUo]>aLFk'&#RA5n:oZ,2raU7dIE)l/eo6S<6^EL+tZ*dQ^@m.nr6F`-;sX'@'hOcoT +D)Sgg(\5B/7%-ckN^1gB7<M-*YI([hqT]s^4L7+,6<q%`aZk[8M.DOX[XBA3q350ViQ4nAsbS8t;X>7WogW&+=4:f"d."r)paXl3>HBFX6XEGjLJ3*ChXJ<s&`2oCRXeDTQY[OLD8`t-H\f@\1t=cnB =0PkQb:FM4tc2tLJ!OBR#VU+#mS1GI:Kb2L%Q`"*Q55c5H8-VnAB9'>S)k.IXhHZXITitshjDD;mSU43qBRH!,MK$AD-&H 2A"H?q!dj^rZ9= \TYg;[G`SU/@>A`0n;(a@Z/2O>^1=5);qCFnM*3EnN-rSQC ;"+$BK: ?ZjaHJ,FCq>R@cBhXG6\Nf?Yt^99f!pI2e]S1*\p)78"LIY;AY+f5%hU`s:heEq;At/0?^2n\V&=pP5/n`KPVO9UFmNTgdZ[ZgVA`\ILfOfA/$\DXq>@QZ/1AC:73)shS_[o;&%iW<;^2M3=hU'@TOmK>A#)OW`#A:N^@bWOpi@[3st>adqdQF>5]CsdIlI;JrleEc;"dtUI.iPA6A:eTAs$G^oKcUJ?+ct4idA+(`C$H%M?G42c*h(3Fa\7j^#U9(`34N8R1b2qJ_dr6dhKdU62W2L%];]8MN@ipji.6s*fh[Ji@bJ3Agpt_cIBU89Tt('!t)B'J`3/,W?NaYSr92X9t.n=3A.djBQ3Sa0p:@8/oqFPYh(*P42s" 8@:1D\>nE*sn`/"^ATJS3W/?atV?Bo&P8@m1 HK;AS7+K,*FEm#<ciR%3'[*GYK o@\DojfNB4?*!/2P[!.<rahUR!U5MDX_ka'l_h$ cKWN"XN?c:Dr`)9'hE[2c6mD[*_OSGnj$65o$>4X9_oqJjXkHg=S^#8RWRp1`!j)k*PY. MmlA&1\r@*t$//AKr8F!R^.bqmse14(AF&Z:Q=o*LSj:>FpFn(erd;IsnaOKAmTMH)9G8G@YFp*(#nh95M.@?iX9,lK"]%r<HnW#qJN=[^I*9=b CC</N@eVcp_AB^+N'ATI[FW:73C<4\*>><p<^' T01#ekmY?7leqSFN_a? jA4B'I.VB_K:)P4#;<V]BT`6=ho)Xd]k<g"A?a!!&o3C3e=T1D-E9EeVTo9o7BPRB7?^_A9L!^Cj&h6f4gDqOE_g'$opkBmGp/Bs8g+]`(LG\#A<F1]=%\@e5"9]D4c3="g4e:eSi+8@O$g>b4#CR*A`cT9q%CUV[_(NYU7LJ%ciL(4P=DW\G]/7)=M31E=Q=._hI1+(Y#4e;EAK9!6qbZX7 9P9YY8q7a\#PeKlAWO5Z5A(1/@r<VL=>1?J@FA:X5[0+j9I9,#)>26ScaPBl_T8G$RAk4^b*K[F\k653<aALe7A<nT8T0 ^mZB9WlJaI))n!l??]`9SjfY/\Pti$+B50"hp#ZWBm5NW-j$bj2)02!^%7ZoFAZ.JZO$/%-2Yg>JaN1Snf^lBG1o=[$7<``l1jAI2TsIl-;#.16D%>E(C^t65)0Xm0(CkpN9F'Sg]VPQ`j/D3?$G+?L;)$[Sg$(^80m<].TF&C+h8%[-,ojf;GN8 qWYRg/CPL$qj7b?7h-D24*G1mC?c;"Bq trOD*aHJ+8:4M1'8AA,r2*fK!%&P)[.nQ'af=`W6<gE9T\?)l`+--aAJnQ_OSo1=dmnEZmdR)SD(+IshZn+`5e2"Ba7,Vgb/'#?qNL06;<&#BdQ]hS)MnrJ;XT::XPK!5"T&sc_@0X .h[1=.(;cf5*doeLi"=?Pk=Zmjq G"YZP&o4^MB("HBgZ?[k2l?WNBD5 :,,[[+o1#Y3koq(p9t0MIjV/dlL_-jJP6n;[/e*XWW$'SH@fo@j`(c^8R61arG&O%a<ZW'7]"hY":csZtB-IW2l[W)N53\_\ M DeXeR"9P`<>/0:h9!9W06U2&YW*=m/K2@ANJl&KPS"e:=S$)sYC< _8E*4`=d\]NMLW!R*=5H!@g4rV7F)AtX0!^U CFP4e-:b?0AOmp2+UQh[PYK2H;@\cmW*!QfTraE2BFWQ]\M1jBphT6>_?#8A]'A./9<kY=+Ip-?5l-&Y1Q./Ub@Bs+Ae.3s7:YTdSrKF]^li@*( ZN"%,FL93BPrOG.A&7I_Hk0*C2U *#WP3V"=ris;oN,*`o[iJ:"RrHIOQ:e$FQpcMqLF'Yc@ERqN&PB;No"YjE+JlADehI^-"5s+T\XNho-S<,0+7SF)g$!FkC@r3$M3 _sB_+'^'!JcQ6-qL"U<3=1o2hL,% :jV,:EJ_1Dft$6'^7m[A:alJ.bFa)MmNkZ^T-SH.4)Rk[cHY(7T2@I8Y[4XDd [!ioGAh$<7?)%i::pZP/p<VE"ZpD.@M)DD,R"Z^raq- p5%UNiX$_D..(1,6e1oeAFI7#n-C*eSlgjrS)C40pMteohS`(l(?$<TdWpd($/Dqa*K!Z?s*;$8_.XkJe&n;8L>'.[%o8W_KcWtoF:*>W,6PKR`P=$N#*b*R.0.F*gIQhgV`)j3mcLZDpnLK_*&=@)>B'.9sP4DQV%dnb]>=.f+bMq)#kHHUJ^dC7"RiAL@QDLj#r%A#QV)`5.<WMs7@PU5,<9Fspt[AO#1g kTcl+#^,ON`6;f;);<?a?g;rG.N_#eABHX1.G,"\Y`V1Z44DDWo^4gKGiWTAB;g]';)^E2t_rp29&-64<()@No1`q-#!VDe>6esha9J7*>-[C#:&N(>\tCV*aB[]$XQ3S@tE<$%^Njd>#n9>.K.0A\j@pEbP:FBAt9qap*"GM<X\_gV9.ElN>$]=Q7 7?kR7l6,)fXV;^^GfoFNL GVS.ni#P\@Me] <&>H5o4(B!g9<F\+7_YY"fW7'$\ebd_U.s-NeAi><a'LmjQiUj!@PB<Kp#bh1jPfhb;dKE@6m6b,T,9im,:$KL50lp=.g<>pUY5o%[o7dnYtg[PlctW`5`T.RprS0CD2m?*/a.?+sHk0BpAtr84RE``9iK"^$i=aMg_e79AMf_p1qGGjY,=@>E+E%5Ft PLFgKN$;bA)XYFNdg[1DPlck<NMsAb2! @0t=<B>ss5mfe"A>Sp@R`mL%KDnZ/=HU@ZA&a,N"cA?KOY*(W &UZ+f]Wd6R`;<p34mA>q)bgnJ(m>9D/c"!6(ki;AlB8UGY-?0m6l`l[q@J;.`RF!^^psAIHXr)CMPbDRNJ\dFA`oVG1l&\-?I5QpAQTF+RhF'!#=F\bOh.MsUKUKr;2:Arg'(Sn<Oc%2V=E0&LsO5$qc8dK"#`6a_>l$W078sP:kPhTiUGL[0Q<!sk8-8t(D&Bhs!nqg YL(hX!`Y03k:*R0'(f)A.,toa>!I$/!+)1i!@5+/hs9@El2XkddQ&JoX4f]cB>Q9#EfY':^D.I&Mtl.cAR@75]Q\]PbSlsX"G/Z6$NUe.$Q)+gGpF:I2iJ@O1HHEgA,d=ZZ:-jLe0U4QjDtN:pK.CV:d&_^:U(,EGZPF5rA73&L8V\ t E! Vr6\J;*\[NNE8QqA\(m1dQ[6VHT.\?PWq.O:l>"WAm+H^AdIs a_H\2FS*tLBZj=*oi7+Jf!T##@ <Kq5IqVPK9=WTOfIXX\Mm LmXd/f[`A&Y8gK)[;\$=eoWIBh<Z5e`Uq&^K2(%p.T JD`TIj:`nf*BsJIm\Zo`Y#c$3YE-PAQ24K&DE9t3a2rA1-JNo)6pL".L4apa<,Y@cO?f4d@d;Hilg[9-"kV4DKc45Z2'_?OnC5ZD$<`:A2+$%<KqP0W=)!6m(#1Bf\*r h'ZC:&"NGQ(`84k9(<U:3H[6t%@?OfjrtaR\+fJXFsGU&[tB23C5 \9h 4eF6b_p`8VsUQf1J.4M@DoQ\=hpcH>'gR\Rgh(GL0^)a\?aZ*53&&t!Z9_h)CCT"]^?XB+>/ZD>e,==9_YQOYZ@=4C%J)n5"t<7mA+<e55U)s9!.>s;l\?K%e@pCjq)Y5(QCt>[&L4m3kb&5/7'Kmq^pWUME.pNKcB Rn#b5Ti='Fr;c64M!@Hb:Y/O(=0T]K?8JnVf %(:H<59Vl&6o#ijl&r9SQl(#?Srmt'#?co"JSWBT"7)PanS%Z_=4aSf.:1-Oq>266R>OclW]*4h9_A,j/^V PDEN2e7cnnNn6Wb6qb4E-RXK/DC_nkW5roNrb0k/T_mjK:8)P@[ i^-:L]_?JE.S^im:89(g2?a8F\->fZ\\YqY$79i>p8l."H/EmBP=QL9JX[jB1N@,c,bg0f#=/nPLaQA0'a,oX)<B"@$GCO3CWHdAhP4PKJ$sH+Z3(`7t4b))T$WMgJGSp%. +?Y0[icHQ=e\GKmE5"pQ$>C0Q1TQLSYs/$;aGm 7mC&<o!2/ph`YH;ek;#pT7J+RO19@k7D0Xk4K4.sVX_aA3FZfX3(9Pko:q%PY.H:8@^G&pL__G<O+_o(Ua(n>5^![#d?bcRl-kQq+3WtiAr"EMiTTUB@Te@YkR`1W(TfAf8a?dP+aBj:A7C-eKSFcY?KOrl$U==9e:i"gA(?XBcK`Mo,)igG6btA:%C$6shNsn7=[`83AOth^RM*W%*oZ(I)WKae@:K-"Jno\#OaH!"gYq*Z^d@97OX4'%AN+lQWgEd*K,\=L,iV]!AY'o!ba+PXQiaViLAW P@Q$n!V9n;].sM$^o<.[mlHAcK?b*=XcROiN<Ch#k?6CicD4M+5'HA"JL'j1\s(gdS<*[^`:i?`4lPF?CqVqf,qm1MJj;o.V,X]50r(t-JTQ=Vht7!I_To!Y4#4Po'K'g#\;*%qRM& SAS/0"^]A]ZQ:X-VZk][D(N`A^7caVN+:X?%l?t,g>F]eVD9Yb*&iq&TWnlFgb e%1e1+-@ gN*Sb/1q;IZ%mDJM0r>Yo]i\ZA!%:8UF>@CDT-e&t[',Ag5#0GJhM''L'f7tli1A(rPBYUhe5U@\%!;3OdHa%qO4V>!B059/Z.c4-8GCm+=q0A]b;41rQ9W229`FprnZN1o!#.$^#eAp5R(*1K;/\i(I0<_ISZ>.?\^Wekbs]eZERn@5"3)OD=QBrN[krVmA]00HjR(j=.=3.#G&jDT%*%[0QUAh7RGV1=O>a"T/KV)b&B<%JOM?!dkEf-q^/d0Mq-Jnar7HsTWVeZ3j)ONakRK_FJN)[YTUQdsC/$^Elt/blPLHhiW81hWX?19KVeeB()c(piE0CY*HaZ ,dpSJoWHnrkW)BtO<pa?s;M9K`E+1]$-W.s7pCC(&AL Kp$dpX`/Y0@RZ5B[;1Cm"U%*D?.!__E<[s=$XbZhT2iVH@[a4JRDt@*XAh]eX<V,\NJ"hO1F@-! (BO:A<k7[F^_]2#B8J8%.r+Aa$^4AQ$#@O+Ig9gbX2Fc]=QT5)'8]5B2oR_:9]P*l+D?[V^!5h&3p3\]:5Ap#7LKtn<j0pmknLqK^H("h'\o( IIgNO'Y!i \nF@l<.\R2_Jia9p)89PC;McU4bKXm GCE3.BYl`Wt>Arr4pj?YM2555jKLI`2:&A^ad)s,#0K4mXTpU..-@*FDU@TX.N7>%(h:JBFtYkr!KEG>Gh5KstO#)H7Da@7j!erQ`_6qs3 $8T6D1gP!5H5\`nP-_TCk8CKoDJLNX5-jbnhU`g5:gTf>g#"\QUk=VoSJ4d@NMV)ZYmbP &"rDGK Q7roi.+^XkhrP\VX!m(:-,lEY*bd3B#1$*UY&ans$GBc*:E3=&mZM419#E/mettA@+'=grDQJLY^]GP42R;c?JZ5;;LCnD0a)nRYoftWdk4_c(5#A?M;*D.!O[lT\Mn(2A.$F-H&+P/rg&TXCZILAc9YjBXA'28IZ'!!F6A__mBA?D2CX1:H?hQ^54O\Ba;=F=?rYPk3)4.^c^=OV]b3??]lU%$!*g`YkAa!GRG+UQtS8=)A!Y2='d:;#Fp8B-X3sl'qg+iW.EDU5n3[1"]1\<^4W"oJiE@Ma*CdFJL9`K%-4 'lV:5'G`mj)[]bgA3W)e"3ls`aP%05SDcXSM]d$;-mOc/?H)NV/#j-bl0YG2K?_c(N\5AtW^7mP?b)3;!!$P8\2BRsML.(/,.9t+?d>2(BZCk4AnR?AG*Qfd=c9GAEfXIp9(a3T8Wbg&+A_MN]'p$EeTXB0_4LUB5YiXOsMigM*SY2?Z9+&=gHlk\i7*-M'TP!e,!n<o>b]0i21r6EY@/c,*np&P`( Y4<s^=Dgng' J"02:X%.ATst43Z;mN#hL6@**0Ytt>ntisD 'R4DV.h7e'=gchIDed:U<?P_d]h^J!(9JE\/NNZAoKFA1'''pWd]Z!HFt=+H\?U3-I1!#\;rHr+>?bR9=NO,s\3mm<lSm*]cA_`Tj"3964YZHtA :fYs:Df7f6OA>[#O+:8+6?<J6MVY3eog0oTM8!Aroi&1I//0]^EQk-kpob;S%kt11h,"s04 4fna\a]N)^5+Jt$0j*>hRRS@*VAL,UH\Y`"Da<J4R6""j5HnNI$0-iTl-3+Abj?DG6QMqq7iig)U^+$MN6$+dsm+^V4q9A3Z8!3i'ej%i0AS78EG2%rH=T5Y*aj@B=DnAgmMcU]W7b9-JY!+lVXTmPo(FhdrnS>ZcgTFqX<"UYE7F^DrG?A 2l3dZ<46f9*"WB"Cd]$&VAo!JK0=YnoF0QN*ATZ\$16RY(qg]2F5&\,(29'(H'8q!)V_'pI1l(R'm444.QZC,@;7AJmP.< QnYbCkEC]$2,%OIM)'snFB^4T%&X8RMOS+8Z\?J/'8/N\2Gb/<20A1\V0IN$(O.MDh]MhK+,NAN'$KKfUXb(L/A3EWKOk$Af7;ms"Jrs%)H#VL>#^90!4<KS35;e:oI%GRaF6""Q:l(Ch;!7I&-[_m[\Qp/U3_)a8s//PYdoX[##6TTXFL%HIY(T(;d\N#d8I55X"6k55Bmf1RcRB!(BB>2jF?(+btekZPD-_>RH/q[hY%eNS;MAMkAM3[E<2M*6C)HQ>3+I(I^hkgE*;Ai-e7-U53bDR.E?0#f2f4(gm"m%B[2J!3MPZ\DXn33RoBN=>op\XBU(.:g^)g@'<>Qc#9e?Z[55L]jnW_^Z5U/:+@/`c9s\K`)6OZ[go(agP,i+Vpbid'A:N5]Q8q%V@!+/*V%Utd_R?MU1e+;@:*GA/\DQjcO8H])p:4A^,i[:\<-1tj0VXI$->% /5m(OC&";77"BmI[^.8C%_;)AS6+m,;j^hKG0`Nenf!@%oT [Gnf.n'Q1WQ,SMVp\'rI-M!tHce><V6:k5-N`h2qAGfK%a,G#l^D/l q&@h0edE6.P C 0`3btXKsBKFg4:C!!lUiL"5D[+NStAEB;A@EW-_sjc4hb5GBCEQ3T7U!OE7dN31C]B_ &Jh@gN+^6hk\GDaJO0A>(+B67J`NiFJi7SP1>j>$)&T#)tF_b;p[ecN,Y?RN!$F!/_>;(>:;GVT\dJU#nO-qXhI],t$V1aD[/]-[2B1%l0i\E$fDCmGK?hnG^p?K_H)\#UCZ)O$C_&JDX$F/_o6UJ%'qg%#r,`1jE)lDL^B;TnsO A'mk5pi/1Mjmc!AET;Cot?F3[1J"-(i*X%eRGp$.nKfVJ=A?h)4B1W&b6=8P."oo,?VWWIo-<TdmdG2FEB>Y,DZ&DL7]laAWLW)$sI5\NQ5<.eJZ?5(h.L97rDJa4bb&AsH</gZ[5rZ\mWKlQ%R#6rcAbsfH1"Zf322_3l4(cT?q;dB;DrfsK\mtfJ12.^c!hlcFJZY7qY\S$OtHXpPIk<mY93^ %Q6*pEZKD@n*Z;:G<:sQN6,6[-tAegs4'`S>^!Q%=5iSD?h@%)<Te#Ad-R%\^Ahe\oF@*'f0 n-\5R:,lm/n5_]iji%d$;!]W06#\Bo@D:o LPPEM@d&JW\n**q&Y:R&E3b0>Aep ]PP*9f"q8-]Y$hA+/bfbe5b-p!"H(6C^P&k&sipE\-"cb],a[B.kh^ ,"A=/]B[)6L7'9B#LjOR ST/]7ID`Y6/n]D^j+[A@V.&UXVnS7.J0Pt)<-[tZqCi#sC2PJ*68,ngP?: D3P<!*)9ta/$=.'q^dIRP.*ec*2/^Y<k /V5DVNTMm%a!WONr<\_2/bWr:V2+islr/&*'Zk HO/d(QsdPjF0l! !-Z ; lG%9Kd[^5*5$'hPI4W`X0tse*k;Sq75g.s]:e"5Z2#-%]rg;!kQKk"gQBnL*6:\eQ]YOr/Oi_Fmd`)OB(1r<J,P=3eH'4n=$h%(-Y*;A<M&3?n?b&&J>dC&JB3Db^e>=g[lEAA"1G5p1`7nr@sDgt8AP7:\dFYSgB\e##sM)kQW=tmI?A)qqFKfGn\F>5cIs 3qM0\fC]KJL?A$.((ij8>";_a2[=Npr[H",#s(=I!a&*)TY]bt8L&bhPq2TK7kGdQQml<[*i]jF9o<lpXm7"R.m=nd@^d_*;*32P `cR:l#kfVi_Ct1!r_cb15R>7$]gK=dd ;>jU%4:(^nqQc/c[>T]iS9sXK_UH+UqrL!lQdM&_S7gr/tbFA^P2"#f[kYh.^Z(U(al5m@TL#Et$`(LO57%Q c^t1&\4dR1F lqANA+o)'@/IetJrJb%b/WR-$8N$A=d38K3[is;Y.#7=Xt)cQA:lX&.&;cdq!si125Nr#GW'f!"@jX#oNts9+25Fa5[jt0VNpBVE2<Z62'.@de_`T,K\!$P*R8R3r&cm+*q(dVrL0/]j1p/r0)LdqN;A3'tX,EmZ*E4IZl8ci6\^Wf:=J5= rn^2Am]c*r%3E//1#Z2VCD`\97!?:@qoN.#.%.@c'D7la&ZK&nAeC+.Zg>7/@4IPk->VGdg$ZOI`c? TP)OFJ8`%nhE@=SflQsXQr!T(@5hPA<A<G@B?B7)S>F;mVb@m6<2LKDedE(o@r&F>#F?,,\Xb0c"]!A31L(/7fUf4N!Z,&O7n4YhpK['K>FVE1R+oT'4)YR^,=FMNCUIa_hS=8''%VofLdgT,Ik*DggSD7*X0:C!'`PY]qfrU+76Xl"P?sZKmLAgn*LrcV^#mF=1'LN<6+[.@qTT0;0#N"FIV,F<_s=^`nF$tm"1h.-Q4Ri0*OI2,iJg05>*X"!i#_ctf)lV"?kn=1*0(p_]SU[%U4J2q,LAA,b-h q;BFRs6)kBgp'L/4EAU/kf2+KP_o";?t =44MWelLiom7@UfkO:N"k\Z2,=AhrY$s]'7&A&A;_I8Xc)dMP`-*Db9&U8>YeL<4#-d\eFCM3;YM1;A)8\LL9q1G[fCd*(M<R&ZTNICKqSkEfD_A \43Mn7e$1$oC&))W?)Y'W-0ebjkDZX;\5r+^b628MBJ1a+@brWkWEAagc8QihI#OP*Ai$KHhkpBW7.-(l:@H9,K$h=/E:2CsHP$8he3N7pql-?cPsHfmAXSROL`-GN'Ihe/I&ia'RhVNARA9a V`Lb!bXJWCn XQFG?]AN-?Q4!?sZZ``I%T<P l`&k#+^@3P"[ j/<#11rEF")YP<E-rPNIZ-Z;8 L!)X0kDa?-faOs0P"5D-\Y?1c6(;nUfh?CC)K 72J6E#Y^qmc)-)mmsA/28[9&4%-Jo>USZk"/LTs42#&YaFl1BFQ*-/)a>>@2X-(;ndjcTGT d`o(p8ESY&Edi;q9?#:qG'"AbAW2$aK/;.UjRU0-@Ih:[$n!c2I?n'^IFArki5)Dc2,'XQTO+5r5L(^N^PP $OQ',ET!pp5dLO-E1kJO9a2=g02OV?Z9DV@a+fASkLjo)aF"JNjgGZ3p^8g5^N:58?,)JK#Zn&4-eGrHXU2Z9OPj]p5$+/Z$-Q3=[Hib%jL6\6L3^G(sAZ;/;$!9MIe!sVY*c-)c9hX ^B8!`V0=CY5XW>FmA>3X+M2KiWJl#nRL/]<<=WG3Y=t;_j2hS?_12 X7O6lAq.%n)&@bbm-%g,e->2ipK49HW VD%@c6'SE/81mAL6?\Z0ig9"l(%-&bA@NPapRAeBosJ*H;e<K7<'sYW't &kP8%Be'C#,qA%jdM8%eZJ^U\0eTAbi)t?kZBO6`+rr8Hmorl.:&Y&XLTK%cA *6(cX*IPD-`fNEef;HtGD'N#7FM78r%)KdI8!e*ls7%6'(Nm+\/^%08*KBj4t(OU)6bdlc`XK1h3UH`(%o;10I7]=K-T-YY=JDoD<%HFfInO1Pg+NqW0Li]o>SBq5eNfa7KI\_`FAe?f<*aRKKmm4Y8\mY.E?a\AnRK-'nCM6R4_rZ&7@U".:pJcl[+SNVAp0QVkA.c_<3!+/-KpW#1#gPoTm$-AVaQbMm)+` #cY^(s1(\d3-pG[bQTS#X2e>jSN*6$6& :ZR87O>&7=1C,7q9_`l-f]fln ^b`^CQ^QI5%nAH8=j'Z5MW&BMi(Osr$*22[[js_:%J9Y7FH9^/$>%%L(e:gQR:f0;s24gOi&J%5[[39bQ&f/%CO",Y'QZ>>&g7=ag=@q&9Q*Dfp:7-"6hJ<"7dXNpIT1d;]"s8SV6l?9Ra7b=heU^<4Ackg^FQAVhiU;YdUIB&V_m2#B$/c.`Cf*AK[!*0Q@fBJ#X)cZpt$=pY:_&bJN<mo[hZpDtn>Xf@&01[g+j_Y<FH@@m/[oiHc:5gpsl;3b3T)Q(R^"tJ3>1bBqBk6tDRZX/\\lmQ&n0E:"@BrsAD#`8MX5d-;EU:ODs.Mkbf m:\"08hIRg^6C=kItD;WM$8[PMNbMJ)K'3#sbH6s>QSlh5o1Z367dNDXCRM4I)N>)SFn!:'E^GN0A@6-6,sETk?Ak8=rA$L_@39h]X%-5JAf,3eeCXHWgo^3VQCD mVqd!Hl\O$15c8dQfHVSnDCSr'0nDAS:Y/Em?DMWqc.hQ2Y*;WAK&CH7F'gWTSho?DTQOj2_!#LEBP=NA,><AaK_&KX;or*Qqc_A9t#E`p]$V^8\d'bOOG,p-S^9 Gn/iP<PLop_"\s&#fVQKZB(Y&X07"o'p7]sD)e/^gQ0JoIh^9)$%Ae04QMp*5#A+kg!e,l1`-2^NS[oJ8Q`(3,j1]81/!Q>KX5&>o;_PTQRO>\Y@0gCnerAWa!6D^+KF+YL.Z'b\-"6(F?g\OE;Fs0kDr"1qVA_PHSWXO&;Y+:2mhcGaHsS/G_Z:!'Ch<e_sN:0E($>Z7I^Z<+:?0.^-@IS@1@? ]: !"$s7!SE?(KJa*CJMmdD,1rRKD5O.6t:j6IddI-! UfW#H33O'YM]hLQY#JT;=5WH@V!nO@BSd-nSJ:8&k"0'S$Qt[-K3,m! RX'daCqMS-dfPHCBe\fEKRoF%ICJQ`NcG'M/SIfQns,7=>U[_4iF(LO(UgQt4h844%[s<n!gW<K?NPNA2DJT-6l@m_RX[HPt9.Nn,[rF/56mq"Xc`\&FQW:f[3_hC1!GGdd3'BZPt(st*/q"Uc9.ID(f9reXQlk XAA6#*)XTjWo>2_aP!mj4\1G"6fA:3./>Y!:&bP/+NL8:4c6+_-F +J_-[E[sV_J^Na&RTj._ 6at+M)2&DF98d.e;]b@4%KA9;F";Ag-"t8>l(-(54VB$\=lFO4[5tqc-,QtinJ*5]D#i14`&C@oSP9n,ma]*SXn3`;)fF6GA(Q_fl"K#gd$T&_Akelq#%h7kO2qN*YPY*o_8'2gXSmE,Y7K?"$7(KF[.C5imgT%87;WI^Tnct1@N\C!+qIT&a!0U"[)).N>,K-rhQ%p`W'RM<.JY@RHepUbb;?-3(a'r\;"hk^d[7H<I&3VH2EcAm T@^N>b-95b(/M/f4"so2f--WAPQ7-C<+!?B0SrE$eb.)m/-`A8+-j44d6]U"\QQdq`^if0mrDqa`P b0B/8cni((k(oTd>Z[]d8k\Glfo25A)^[)Z1pE&^5dN4HG^($FB4BFg%TK1^Y(:,M+>4os9N=d[AJ)qZ!o[J9$F:6/G2f[o[3D?+N@O4:U29OLK,acrJ:B..A%N[o=n3%!U5jleoIPD8Dc)SoT! r^B'a4=+%lp2%(*r"]ATCh^+\[KM@7 h7*9-S(fc_eaUK\AA`U+9P;DQ1HPb@Wc$NX%gOgQ:g2dR>0,e$g;\Y%D.BT_jso>FpDAFI$9I?FUMS1?$iI5enW`ICBJSP^Jmm)pjnB3.%s?-M<KLW+P0/njN1btYdLrSZc-Rt)^\oU<_=)`LsprFmOPC;^8@1[F61!^is3asPVVA'Eni)OdJK'7dqL^`:;t7^cP9#ph\R0Tl+J)=AMPBlFH)J,:&DR)OqXUt^+LZJA@(=0>H[t$612MJ_B'"R X\$aV".J!sEp`q<'>[nbZAbbm$Fl49A?LS%Fd%g;[ggO;FQ-i0B+(CS/-?g*Jm%1PH)gOR?4Q\IogY-JXn8S^Qf0AW<QC#]!sL1W[%rV.-,7s3BS#@4EF%<Z7\ao\Fa7Ib!//Pt'RPAM"^qK*8)RZm/NMaXWg=Z[(2Aaj^OM$MN!&$RIQnZ+g0s>Zt=HjmS[)Y'CjWrSUA%1&4>o`GA$gUU\LS[kB+,r`/cA-J.KWbjd4]X-*$g P\Al'q-d3q-AfbA@+_7k>AtIJlL$$M4qo21\SQFLo1_V[B;tT>!$^feIPTK)/)V>EYiEJOi`_) ?<BJdB<E`<nr=RhcJcqX`gJ#Yin,d?K.2 A@oo&Y[CAmr1j/1ij(2Ohl[p=GM:_1&:_9'RKMYPB60>c7Bk1)XJX#PtR#eAP&T-j2i6.=4tsZm\Ht9XAI+;M@Zj>lh6aA*P^9Ymg(-nB==-!A!JrP@Ujn,c[r*(jRfkdp?C`Ka'\tCX"c-]eWnE2]?!>E.353qKs7+!n 6#@GFonM(-?c#7/U `%=RF:Mq*<"(TX%6m'N##4BN`FCU]KA'IAZIS6Yl^M@.Ac9lM&j+B^nBPM_F8&U9A>4A1\,?]Dm"HjZQoeBS+g@P*r(0$h-(o%_)tF-^\Zaq[G!a!2*+kZEg1FIUQoQ'Is@!O-q6,RDW&L*cfTJMr;Oj@K-m4'q.h.&*g>6UZ=(NAE6(&A`Zt6_.a"'B,/4g6Hfhln\_$VK<9b%sD6DS1ik7n?VoNl2"AkNMRJ(srj,ggIqTJD7qinH'Dr"1T?,?%h<c-Uco<]qYh5_7APE6.!sA'D[Pcr+k]LepHG2bmBAdernTRGZiUJ^r([=(oV&OWNEHV^Z6icZ=$@b]<$9$iK<H 6U4<]f8HQ[9X-Q=o]LQtcYqe"h)`m&Oa*Ei1\nUCq-C:GABZiJMU.]5lP@ *5='qA>bS$j=<-khf7pSLbmIOs<af&TciBf.>&lf^:q)eJq-t<<Bp4MYX]R,p3l,/!L6f(Zp9$q/+Fn,<<qtY&;pLEPUnfo@kV6]lfE&lPW6+InXRm2bGg%Mo_Fb_#j.?mWW252os0'QAH9lBi'8c%lQ8\2sXi6gAH['BAoM6Jl+>clAMI"nj1["LM3$H.A[]_V;R5U&`:]b&6Y#jVf)[s2<3^EFT@'1k[m.sAA^O_^LFL5<i@I!Q<: 0afF/=fZ]gXKh1M'(5%DR%iJCa3faIJIN*dHkYY= Fpl1?Kh"<a558gPl90dr3q#brqD^Sl3bD>9`KL#`c@_.37hW^3D!7q-d+sH]H"PKBbEGaao'4X0VO*q_]DiJ#ETa`/.\VTG:6;/hY=Wa!&dS'jIhi#gQg53R29DWgDB2mM!@0YmiXSUjT D!&gnKpPXUESfKN#bk#]%`T&M!DV5H#\6\@D`s#Yflk3&2%qXLo/kH-T2+lB3B0d&IsS7IA^oG>>h%GSF V<Tl(c.j'6N,P*4^!dV)M/,GgO3Q(EU)KKAZ4YWF`Af<%3l#tcFXr*o*R0:;H.G*@s&D.3#ZRe1J7mFD[L1_J>]s/^>I\V7Whb<_U5;P;9SW;iK^'9MkJbf-b>'I^DY% 6/p1lqe$rmpi%@9QBQLOCh9Wa$ABDN&:17**U,7=]j`42D]FA(X='_`0=&J..5R6qj#'X=_SNl&n<0UYriB=h[1YL>bUtqiOrXhb)B t= cb@@148"gmJ<%kTK QA(<IM,'N[MkN&\``#J\(MJFkY+*hpVtsl_anG>T9]RnQM5*,[3K/00#\qa(\$r7<?GgZKV#$%Seam]F7HIVt6if-<f2DC8"5qiSVoLA]qZ<k@k37aQTED(EF?$5MFrVcnd7e bGbm%e*-./i!K78<[pe-RT=Z1/sDr^nNJlS5QWlI"B&dX'Fof;5?R2\pttL\cO> pID+>)5j.<J@DiQogea:[YWKp8$ehT/"\7/&GZ%&4TXHHXB'-bAHZ14tYFA@+gSEA$6;/Y/fS\6k87OCI"kiM*Pt2!1*B9SHM=KHiqXb4NpS:b USaOSpsJr8Ie,q&/00NR;XCOL**O]1D>mCf[oOPbk *Hq]Un-2Neb**1fRf$19k]UK#)%#/icFAV62!V2K=>\f3(@^SAhR6rO9i`c?MtkINO)W)6m9r(H2'PB gFG4,I[Yp0;d1!BqB[S;'L+& pVd.dlcQ<O]f*sH6qpk$5K;1cA&t7+67]MoX4rOq2tA=J7\lCKa]nH*hPS@a8H+SgosRb4t?=m 9A_bMP9.Z)>*bXk:70KQ;WC< <O4 0[T,\M0h3dL;rW*\m-A%C,B=p:F/QK'WOG47'$)1C*`/I.tc7*``C7q#;%Tf+P&?gn jN,t22cViK-s22LlGpZsFE.Sa;f\QVM!`piM`[X/BOn;b]@p1I3\*D<B#Fof/F:/4Jp5X`4!Ab`/ ra4j:RFrQjYq`!8AA!a;<G*is+]e:%)$;VJ<D')71 oSa<^&Xq[p"LB5[#]ESNE-El g9qtTB<n,\pCoOFWD3=>EFEEB.<f[50b;?X"Octl/m.=`A`Ac0o640&tI2VDri;p0^6los-<A4d`U(P1?*026IfW;UQ$^'as e+AXDDn@Y+rYC5$pA3 !"UmTc/WC 445`Ifc CfO$0)Z8W'*+2qlG#6SXd+NIbZ`Yjn8dBjYX!6NT8<27Th\HSHD2A5A/.,RMNe^8 Vs@?3<bA;:m,HEq\284<J,m\tdg"(C=LB')WU_b!@Y0R_>A^piDC\,Im^#IfeW(68F8jA%+/Q>s/BgLP1DB$OV`lJ`REf[-Z?cGs;A?ec2WMM)E5QT;A10:]\WaUT)s%-iC%7nSb6>=Qf#!q7Fjj+#Gs X$k(m9qAm;M_4/:AsX$Q_V!L5@1`aisf--I,&;AC[7`2P4,RNPD45@N?C`k!ZR_YRgfWtIfA*6Vok#S+>E@>19/TQVImrkno550EkE"V)c9'Ak:=[$#_,JiSFUg'rJZMD3D'fQUc<fT&:"hgO5pc^ZHWXNqQB(\EDO+m[!YAUldAB_H<nF&==9^g6=k#&Em3+,W.[o!T/<YIC4,LA;3AY$S1T4('W#o'bOi#aE]V9ZH$,T-qp`AFST\\8a?tk$20J<4N:SA*9HneA5r%VY" $E7K$qt-ng>ZsQ+*q/MbR]JtbAA;O?Qf.X,4.#q(WnbNEd7K_@]eeqZ MCJIP0Qjl<5XA#+c(53G+)42KD1'>-#71FPlOpT:)L=gpA>nio"mCS/C>c^t@An]C_<n#p>m?Kg$/\&fJ3U>TtJCaDf&!+O"S(\&i!0'Og;Tm1Z7t@"jbk[D[oI1qsV4"i@3Rcq&doS$0o-1+U*T:MB!>$E8qA:,s4*UFg]8P*P17W2"*NZWk6$'s2@ `bOp,s1C5>h0fC!gX`X@Q>># BH'>bB$BF%'kd/j/8KKIm7W_`R4Xa49b7+Fqb4O.qc)A`iVW&6%S!,6IE K4o@(lpLEUX96o=SU#cE:J1HKn5VFb3T-kJh!!eI.b-Mfke/EhEm7T0U7h+q7K`Q*]D0X6o$H Y8:Zi`Wsi*VFDtgRdiPY,A<:_M_`^h,YZNG%YhF:_I7*LF3`R.<-.0(o?; F2,A>sW*Ci][t3-=hpVaAQY?Vc`R@$2cY;'8+[,kc"r;fiQI(6[_GF@TU!Qi4NI*?=0/s$VR<_YZIdI.=9@]gakFqn4@on=`"Z"`/G!59#%^lhAi&:c-"Y1oN:dk;isr'Y7FC0`hEAoh6,2` Q-K*``XNoAc(gpA!A=[nB- r/\R;P`(=7j:UB/(W\F!L8+]h/#]*;tW<dP/ai#piqZ0sXqT_(LOb*8.=QQcln\CWLL&)aA)fGi=Q^DN$B 'Z/_^r$O$kW&dU39I"'[E`)$-X$\9 CI04eEc*'n(Pf>C&*A/Z0Y,Vgksst7)T^Rfi9=38]od>kZ\$]h*9cUU,DMlqhdQJ"?h^/;<Cj&&R0252VkXp(EZ!bWA=nn4a<\POCOC3TO"a-&O"a8Ve(ojXa!k#C6H-"-p#q9Hp-A'421cNd?;Oo\tf(j^fqkBQHM3fdQ']2l,Wa?i;:(<k,TL9^UV-Md3onB` O5?AM8Ed*'+J@T'?`'[>m:@:%ho@6ZU3'gmrAhEq^D2;"=8[O(h>Zm^,jO18*&>tSt4UreJ, +Fk<,;A1Jld[J(W2D"41o[jBL-P_Sg+RI)S6jX1.<>B rd.TsO%nj)X6l<bY>3Qd8@W.`WN6F^j+tJg]pdf4=?q@,eZg0&*-ALgS"qb*St>4bhsJcZWA2!A2X"Qb`'Sr@-MVjYLo."C@K*VC<!",q1ejO)A$'f$)<>FI 7kT1*A_HBA^M1i\j@.^b`NQU\o/c?'Cs6%hCQcD08#J.2$B;aQm <i f@.-%0S=al"^:h*7s="O)\W>nism8,ZKpA<W)YAEV`fS$Zic"4LZ0jAeYti/M1\\F55F"hgO_(1VTt5;\*.kfMF%^:QRBclt%EU$6SWi85Y?5m=;iL4f&("R]XZ[ bD!!n;21s&(='S)8(pOCqpi 'e?4Mb2o2BF8A<F14[a<A'$?m54Q5U'GaG9U; _mXA<,!OXR7SR#2J edc9 8H79p7,A?*qE\f_N*L'0MA-O/C4MEEXbDKZ>gh%.6-UU;le#=CURN+:R,$5oPeg7<.1(okE@\UHV0`\e @4Lto7<1I-jC=PU*/0rX*jjAO>U=7q*/-4\lWG>#KjftF@`/SB_cJ1h9L"as=]ZQ'0N&<Le1fAO[3qfr>0Kf`>Okq:oXe*]j'hn;[[dsd*q`5kH8_TkB_4-ESEqgMU7d02[s#OebDd8tBH+jo*<ZMWi6*K>MQ s)F$.-UKfl"9+f@4'bd<im8Sj-opD3J&o)hH(SQ-*:U?Jp!<^HnJXplhGJ)c%?Q$.ktb[-V4r8]I9b ^6BW0abHAhc-W9,SR17EE>0XsbR^=`NJ*#`.;^E,ftp;/6#AA1(VM\E9m>*4tY pe9V\+Odq+V4VZtYmNShEfd9s<a[$C`3tN%HWU?,p<CPL_@WU#Tnl/nd^ DaR"cW`HAHPW" 0#][5A)`<X-OT]Gp`2AsiC14S3pd(bsPRN&2<bG?/cSCd_o96R9\KG\/3"[,d6_G1Y9aTj",Q0da'mlFm=Q)\h6$aA?Q C<d-^&(>'LTC\)%4Kq"\2p`O,;(n5TVe.JjAtWtt%VBs^<`ZV(m@9W:hSJT#%IA,j5O+pA7Nac7B  ]b+__q9>JnP3Kd4,?'R$;$a!;PEoDHOL\GO3R\Z%*^0-7g_cpbSYgI=U%&?Z2JN*"c8+/g4n,MUeG.ACAd41bI%"%1=A->InkNKHVX'AU%LP^@0n<t`ijbI7&#@H<S.1Zk/*!5"@ rXh:#M<[sYS'NnPWWT<": q?t` GMBK^.+j7mdd9?snet0Lhc-C0lZ>4J=Z+:RtgP<ZPO[_9rJD;o>;#C4#r5!]4<XH`-.K:2=;ZQ1P?+&#Ap2Xii9DO3#$X /pQ^tTS*\a'U"CkRkAfo:NI*"Fa'$a/X!ZR1U]SUBGhN3m=kctBoaT&m6^)dB>kXAh/rtSRk/tah0h#qq6*UTRW)`?9;_&agZ@<J"L(d9&%$&W:rIWCD;q&OH7<8@nC9:@:E[7U/&#.SKi/)1Hp#1CV$[!O:(Gceg60Y!a);BjF:gbLWW3,65%1qEJ Hj;+0)$rN q'#g# UOcf@p,^WAsoi&Y4o(ID6&l5%sAjAq-+,"`JUV/B5HY070!'F3kNWTSb[A9!gNFG9%3-\^*Ieh#e9'_2]DnEg3<:&TeN#WSF'0,llsE+^C,,1'6\LCg/F/68aJrSHjY[BI1'&/`Z08'0YJP9&<=m,jYFBl N'JJ+=t>UYMrtFFj53\c.Ane6QQY,%FtlTt[: _>\P`WVoMnO]E"ne- BL>nM?r_L(.7P,$l <'oV0+_+h.-Yl5hLP_tRUMYJ]#`/5@aW[0;rZ(\DmXN.XR@N&jW8_RL",/`s"4V-[l.VF&DW1%c.<-5p:fY$"bCl3CNM[-9Ji^tngOUdo9WODbYcd)3@r3#hHR'3/K&/DLA\LNkD dM:XDU`=)S3tKgZeTSt^?@Ye&1];%b;NI4qde#_^Q+mSk"caS%p.NAIU/6/Rqq9J/?X_e/^W6"Osj&3QAcf,A)+s$NA7=h2I*O PT7!jOd/(5RU[j/[J+P`2XM2JK+q_f$bGlJVM*B9C@IWNoD-!>Vf$ClFqj;J5\@PX i8T<J>!PLA@M69mfY+<X*5AH"`:nWr/R 4[Ei<d(?-5a:(o#T_E3UAI/P7+LsB;&3Dd \:h?pJ.J.Y2EmW+%D76]`[>$Hj&GV36ko?,P1XL#o#5N]+.>Z&k])W2JOC-,Fe,@o>@H+/];/0<!Y\V3DlZ>kkj+aTqAHmYBfDiW_:I!JApEq<`,oCX2DI3I<V1G?a"\pQ<8[WA>8ls4ZX/CmRi+_LMX4)`/b+Egj3W<&Yb)-,mq:[$).fYR\230Q6N9J7pJs#g"EQ\:O-#lp)@e`NpB[LHo$_JbNJpt&-5..ct@gm_6)]A8qRtA-P1b7-r3X, V<^OHNB!VLciffttgVP`1XLDJ!Us5H?daa?nspLARhrasnA:iF[PYI4g]5E]6X3EUe5-3AQsMm.*[J#*El(M:SIf])\A@hJ6?U &*Q+sJmVC sE[ n5!WVkb!?+n"Ab00Z8gnQ?i=FC2B%gg5qi;k#Cg>8"2o*6YpO_5p7D4g/c6ESf&'Ao/TpH>WhrpA8U5tO-g#>t?SAgG/-h9<lAL54>gYZ9sAX\Gsc0m$lG^/%%H(gY+E#!.iN6BnR`Sh8;7.-ndY8j2jrnC92f6GmAq""UXsE8-(s+n_3rN&04pj]'"8#TV-W/etb3T;$q=%>Wf1S^CEZVr7q6HV21L`<pg-f\0`ifI`/fmVe<R-5.[iN=M7(o[d>6Bl!F!#'UcL 9A`Ltp$=bS7kn3*8cc<+3t6cr@^Rok=*17%(KJ; $0WRPg$7#h'--o22?aWnC29!Y<B2009oY_&"(2@l<D/<BCYQ[tY1Umh++L Q`97*!ZN.V!elE2n0a`[IEQn<[07/05/24CTIVE`&"U#Ih;t:YK Dk\&0BKkQS\=.Yah2nZU$1T/-RKXi`rcB86C5\E*L^Og!s'?s"aGp`#_r&d;>Pe:b$mhLU\XU0&mp4UM=Wqrs^htO1\<ej[8"<t7r\@'XhL5./X,>>aCI6X99s_9C_09XU>4?E<(Di:\Rr&n*^]DWsZAI[iXr1Nr?j;gfRdO654c*'/k-D-K'k^/)NTL5n!63.>(&`hrYB'/-2c4lkKq]G YijUHe@% <O]%H<'Wh\9,t'6BP1%&iciP*Q5^S%]rIiQ(=8^D*t2be2(14bZ G^c+I8%0n^,L+fb9\Y`8.t?GOT9F6BT_Rg68]I<mUkAAH P<F6;hcT-Bj,fi4. jkE#jik[n6@o38=37"3&==8;)">bSaa!<HO#4Ggp5QU_ho6&n1T/Ch8e0E$f*_Gkp0@n;e.;.$Zo_9\mJMK99D8BTjG:>gbLLK,NANVFKeKsmA^pb0qWgWt6K2iFSc&h..4+l)q.%)F$C!Jg6*ofZt$h'"\Y''*`SRa.ct)R$ P2JRs0'eos&9V5Gh"&eaT&?HG.ZUBDEH*gD&TZQKTY07"i]t1'rY8SCMZK5f`A5pF-n`b)U&J=p8?^>+&SG'sn#.X(=Aqm^6S)KD@8mqG/(5]^Z4LG[kML3$>>rUX7]Tr,kA.s`RNDea#oA)8Vc)F;NL:JSMt_`9FMlVLi[MLLAU;Bhg4qXW6SB-6S+]!pkG&/IZE4.J&p\=b/Z6EN9*!Sd Q,S t3mfRlZ:$Cs6d9f j;BU&&\?Uks_bSLr<Qg6XoeaXK?M(U-TeeB@oF<S*<lL$/&1TI>NK_ Y S]tnmL?\SgkPL CKVOdIkl40H<M,sS&B2c#ommg,\J!>VapSC& h8le+Ob"7 J@#(E>.`P`IO(gGW1$&)m,j8)]FGtht?s_B93RKA0!^?f4_G!tET9hS6Q0/33RC$l'-FfdHs)RAH17:IsPSD<PmlNIpQgcWUs0=6?;s+K@NAFd.XBkP2q5XJ%Xcairqr_qfpS.F=#\']lSo1pfm-U\it.)M*HGJ<./SlAiT87eU4R>3Y+L8\e-)W\d`qWrkkoidqt[A6PhNX?Hc,kh!,?,0i#TJAdmoaok['CQ[(*J`8=@FQjRT@N<Q:'-H;2QnVbZC@1"f7'd1[psWO4Amt+]ek!n$jA!cQBA##/-KsD!M@fEI&6?KSpT*pA_.?IJ0MqGQ4*0(_ZG?1Z64:e+F!kV\sZgA+_TAD8EF_T8psFsTYYmKtU)n[4I5Wr)XJXN1[$b]8J9h3[3RLCfLY78a;gq&Vn%: '\*oRpI<f/K*=W8FS\<t@*;^[IeI'U0U&@`qp-h<P4^MBLP.clB'5UpY?@9*Qlqqr!>C31hk""_>[IhkY3E/635,_J"A<c#'#a3f<l0bSK^-m/b1-8PQ2I%aB6hE!"]`sj%K+@^B2+K\O("Z<VV;bblt9p!pQoVgp0 bcc)MeJ9X>`H_8GP=<.%rl'dpY/(`9Cp&Q=>(t<$fm<6:/a!O ^Y,%1&0 ghC&!F0'A5dqn/LW?7ehD5?>^Q=)ZB]0&lUPjW3B^2S1D&%,c%m*f^ @NT>Be<9O0No`VlQOVYlRV+=.sA_&ll/5n,@@m+RDkB(0Oo-E6$VLZX5kAmnr.I<qS<ZmO8=&)*;jO>+?h"Yte6A'Ia"8,(=LJWQ_b-#UC%m1I)6AS6<gO[Aq]oZ)RgP.[63 >$rJ1G$>Y(C;;A77A!C/B2X!Y1$L?Y;go.JT2WpG3O/al,H/Y4TP7\ 5Yf'A&^;(5d"kSGmmD/!7(C<la8B!L#:b!_UHm9,qcc9"DYAQ't=$jhcAQA3"[&tF3\rs;<"hiXI3I(#CMFk0&^73Y9E!aU:m*27;!4AS`,X`K9JgWoa?<V@r>N>1?a9Tq/!BmS%(+n8iJ#P[C.9\h6>1<T(&C!9)jT$8iFGC?fj;An]F\3K'Se$93Q!WqHt"O(D_ST>$O"5mZ?=L>UU7+r7A)g?g;)Dkcf`Acq+bUWt;aeqJe&YNT&U-^MUctE8=P@"M0Fs IZQiji)(!)3I%2:)k&Mef.MfPP2-b!\287:Q O43G%D^9T1e$/B6-_8JZ[ 'JKt&MM9+?&d Bb8IX-'tSaYlD8.=hl*Rck:+T!Y77.9I ib2EtT'^>A1YnIIF(A?QPAL-]4,jJb'g^f@[NRVO\bU)jTfRmX+m0+B+^O)+FDi*4IYq@m:#be*i"F^8N8^f^TJ!?W7$Ho"65r8@s12=X ZgBnWj^XhnL5i],M(#oFKs,YE"=Q<(2.s:0632=M_;Dbeb1Ka3K:I#NQZ)92HD1?O%e]Q"/0iWXY&Q8,7!HJnNs3tY6-($@L%^Hh8EHL(s)tlpa_t;+lW,]"sR;6(AX 08Jb3_VEBs'i%$k."rbc]Y(J"CnC[ES"Z?)`pQH&)P^T#V^J3Uf%:B^:1PbkacUf)p-T8Sc]AF2:j0W:b61-]N1khkK;^T-GD,1Lj`G\S93cH;?;df]meBX"@Q61BW?;N\>HJr@b6Uo`e/FJ+=>Gda\7lQF`2D<Y2glUisK]?(EO*NlsNSt5<N"#Vb''%%nE[@gL* (&/UY_&I=htAcTt28DZ7GL@%BBht?d:37!Y _5dG\_[-qTo6hXBM36q_`:5*DW -cG4j.FA,7`@Gp<Z&/tTo2 ?hNj>@pQ\rUg:_M)WfrR-*a(:?\:BnTFNmO3)_*741ZFb9?L\VNWbUQ\ iNOY<'=m[!K4<:N&933(!kAQO+0ZMh>:<^.!BL-^LXM:sPMhE >K<& P$$UKZ3-Er`ZUO'Oc010APmiXmC$YYLmqD<c:K6Y<WaIHH]c>OB:&JJGr0p_h`U^2!Zi$O$+qqr:X2QU7>g-j$rBtt$:=st_ST#WiWG]Bh(m#U?86]Ll=[#M)r'K>Y(Yd;*Z@OPnc0O6WBIAWqE%ChqLXP"4XRI6g>d%Oh2c3Sl@FcA+]9TN!1)5Z_4qNR!JW\(*OHor2E9Z Y<c]KoWZ@NAB7#(g1!X'?8Rn)J'cn'O&h8n:LA!qNh-URea-o;10&.k+sZ-Vb= B_;AN7@d#iE/O]`6I(Crn(`0U-nYED8MR?4]4CZhO5`>F#ZW3%GOHbk<N=rm<I`\6PN]ZL;4J/e_W<B2=%[OgQ3"fOPg\fPLq4jY. NfR).k@s;6*m0,(D$oW\O1Yd9m;'(.=4'W@-2 >er*[$1[)V/l'oNts%P3Yq0hbc_\G:!%42PVKSTE<e+a#P>Xk)ZL,`sRD1Z>Ht]ioAri/5?oa:%rO%/f\+lI(de*J(!W[Y:,fF+3`T2I12"[ceY`4V&'#Gtn\ H8MPfCF"l*d1/4iHnaFoJHR7tG9rPtsji[AL: pY?'VlW;I"31P ;SQsd6<_88#Vt?#0q-gZ?CY%]D]nan26_$07m[/5'm?0>)^EI@q>a N:g^9DMY!#g%)ZmtXr[T6"[]WH&g#Ds0tq9;WH9RAra!0qPA#0^AM#s_&OM0Arr/Ng0Ym*A"MY`+GmNeHKVI#Bk4& Q:oG-<J@=KXRgr0=A3A^P4BnL>oWQe9t+QN#GW57cYNorf(ETGA<MeVgorXY2`U6!=Qs/iU_fDe:N#IXs,)iA5ZB>(T4P(^CIF;\0s"r.T3QQ$Qn1.mk6a,$m"[6d@91Qrt%s!4.c7(nq[n'"*c0A^I!JoZ^BlUrAA;li[E-fC-F_chc>/IW,/VqFPOM*1+)(f=tmCBeJ`CBc1jAZ<'E"!pa%_Ig`80;/4H ;$'Z%9eJb[?W5NaPtG9T"tfC<=HRJhdRT2GRK5rE#9*>)5kJ8KD,8ap Ap="lA*c##gWa4A-c`if_oLc$L387$c)*TNVJ`'\L?Ok8lsV!A\?;?]46HkkRh,L>aV?"A2I$N60>K.06:4XZDh=6MGRheK(^]]EIID7ocQXN4B5HL4q;QS\>6qp,A3"G"GJCo"Te>5ihQ3`3r<(ctdP&!s)3ET*8&ib:1Ak08U0G`?7rg-AAN7&"lc,r?-6@%T-If4`m`/1Nq>m-Uq\hr\tl=hM:DH@YrM+n`(Qo$h9[[!6AsA7(JCm4J"-D0Lh^=$_OpM##ZRb5-&>iT-,l](anU0gdZA-=&-ag<7)glW=ftg_kRNQ043b%AI=r\!Ab6T&I!QCGQiX+[ADl:?%J,AOm#HF5Y6>/6Ls(U?lmlXW\Di?C6[a-[%$.Gl$&]9#d:#)rk`6Q`HEQ/ZR!dQr_K?;7/]>l*VAf;6-2O"#mFDfes+.%=CPbXX"0s.=C]Zhq%-r$<rMH6F>GDC1]fO('PM*K>qB.H?9h8T0pZ.l<9EPlClV&_C:lPmZ*P+Q0\;c]4WdZ%==5Lm/BXMVTH_-d`::Ol*7q1c]A*NNWKM=-/>O@H$a1c@r.=0@;M*FRARei!@a:#O9GAAO%Kds&>ddJLcVT?IT$-AKATC8^MBRmm(GW\SX/WOIPH?1imPf(^!U&2A=(nXd&^(]XQ"AB. =KtFE$A,-8/Rkj^7>cYE,f?Je-AF"l,CA,jS>l`Z?&00JV$_X6262,/n*m+enS@*k1IUG-1f'C9QR2lG,\NcdHf^*N$JZB_#m"4`<i\0oJC;?;>Sl cpW/6,)5nI;j48=jiE%4X_PFnH:EK"QRW?HZt-L6C#?V6ng iia$VRY>/X,1)C6*aU-mO;Yp"`Kb\oI>?9.<A@?nLG!Y'^<.f.(ZfbV/^jeH8bWaF3;pO@'8[+?8qg6)%]q) ^]>0_E3bM8F)%3T<HS91m(0EYPC<(JE__6O_.;QNY=-+K<>:=T  98'L^C2M,'t],;*2d=(AJ?AA7LL>)+cp^F+5)^?Z \@b.#E=m%WTs!3?5A+Il_aA)*(1H>]nC( [o9'5b5%Bc!k.I-(?Ed/Gn8:sl-(Z\LA irQ+LAqelk@EJ!C`:":bpGCT-%Wism20#Nkm0lX@fM1*XR.Xeh4pHSHn=0HUL Vf1_r._85CGRJZhi=Kf=:<Na:JU N2P_aPt+D*3qZmD2kE^Vq:Q/E^LR?Qm#O!NO^W*lIApPRDTj59oH*lttP3rFKe$Bs3.U:b_#DS+E`Us<BZLX3=4q/cK:aL8 /0T&47eC3ge:Jl6n_N6$<@,Nk&`?bFIldGYXilP$C1MV4'EDA1[8oMpHq F:FD_ZPjX'>a7(_(=ARSeai-bX "*&9TZORq.62:6DcMOFA(=NN37F]q1Xd)W(^L<Hp!"DQAgg_XTqP/(0CoEDhR+@Wnc"@2Wtod;\/moOnlSqf`VCehK]cI4%5&=`MN558%B=0E.s&G>5mj<,CZP$KDAr_[0kb7b4K"i#>= "[46g0F/0pPWVpNXUHgOfiV@&b=r* %s2?([p*jG7&re>C\qtb^_Y.Y\KT7GS0@m<0E6766[?_(c)H7JSqT67AQd3&0lUp4.`jiQ'41[,CQe2LU*.,in47K(`d/$C#"qCDHs4?L)rb4,H!]Tn7Wj<;j(f`rI90'$HQL231/4fq3A"cCd]Ob-HH"9`t?89AhYoDV#<\4DWdo0;9PT+9ba;E&.5&(s,,TFOGO."Aq\gn3-r\5#a& FAVPE(-fOUcJ$;*JZmD\b1fZR:'2'dS;fkfEh7B-kqL;FDp#s+[T'tpAb+cMk\819^86`6@'@k'mm!Ci3J1%T; p1TgG1mlKRe:C>H@N&lpR>_2'RG-7')C  35)H0R\88.5lFP0EhdH_FRPRRA/U*7op6<mg5ADOq[;NoXZ]j#\7VkPZ&I4"Q:(DQ(8#fH"?h ]!q)FqVd'`5K?h'.B ^7QK aQL!YhTiNYWtJ)Xga#g)1b>c@HrKbo(_6InOgZOL(I"d12 .7';]bcB=PA1\tD^J7q oo$"`"=AW7o5M#PclL?%Y>9P+/+4I^[k<,08C\`o!=?MQdHI]SQm$SGZ%r7LWG<f%IJ2gM@\mi[sTCrBm8Q*:8s^a!E5";IVAWn@@rXErsrRo!n^@WNYAn8[%B7C/YkZgi)F&2Bm<gS>Z_]Nm[G1Qi9,Q`Y!bUF5o);(q5rdohKafk-/)pi-Z/i#E/1Fpn&3fXEAl2$gAo%!9@*RDtW/")#H&jF!o$4HZQd(k#bT5]1qaY55B5NX*qAkh;^JAdlUt]tG.%AK6)=G>T0Q[Ld+$M;-U5mE8jc7%!QUiNae55V"1LMVqO%C001oeA9\UGglJ2QP>,]j[F?3bV&_X?N>$ mZY<\Af06H.HP"t\*a8!Vck8PR2io0\9!?[!>#]lZh*i/l(2m3%ZkF513FWEET;5f@?OhF&t6AKbYOn>H2g,$[\F7Do9-]dI!#!!=3MO*])A2L`Ii5)hV]*\n6;L4IDPgo.Rtthd<>HN<$:7@K9:!=\/0t'$CPsc(lF?jXTm*gA_5+X[X)d@EOnQjd8f'+ho-P7 Bo&02Fo>BaU<5jFKWrmm*3=9:M)U6<?;M/4c>MtbAX9_;cq$8eWP+YZ-VsB7_ZNntt;!HY5o0%NUq"7Kq]D`r<!6"W7$Y2P>Pf"%!@7Bj?nK.t6L%(H(:r eJ@A&)sjLF"OI/f!h#rS%lPRV:i3fhld]%\*;C9PTAJs_+F+! Srnq]BHjI4h;6AA&,7&j];Bdl;]X]Z[;H+.IC.537HVN.ZM1WFfK^o4aZ4#f:.Cp!rK"n+Q8q_@+99&AEILR4EF1*kV#Zi17-M`+#B-%C(I\ 8D`9EkPjJ:t@\;LAc(P6#%em7NMI9HhmZ,]@[#lH.W%&ejsADRKVa<o02ALDh ?m8E4d!LI9@S=35s%3#IhhDr/_ciln*:U4I;li$!p TM3&'#opi3#Ksc^^A#EbppbXQO$Vs*QhpQo[KN**(m^bU!P'dTk5<ThQPp1F"t9mKcj`Wjqe]#%h=A+Q"&IbF6<l^^&[%$&;D-bq?,?p:DpA.L#Y`7fhUd:XC8RD>!B1o%!RXJa)%98 arX7!r3!f($ZgAp%d:$9;8%Xd$1.a&kZEKbqS1 *TPbtBYQ\fY!Ct\- bBAt'And8N/Z3elFBm*J/J5:qY)A,o@&`f,<DK31#cY! F"7E(Pq\r(C+MPC5.DAqjth%nsOZ]`$*SjXap#P*-WsT!T*X)\*j?%dokd>q\Pp.fCZf\E5JB>&\`2H\1TP4Jj*&IZF`7"h7MgNVV)(Jg[2HC_O)ipS8<P&7^Lm'23qqO2mKKOngHAd&Pq4B#bOU0ZkQs-2UhD.DqA?JhU#%J;Ck$[h>L[^'K!lp3<O1XR?,K$MENo)9V*_"^bl7sQ4A2Oahh gh$%s\Gtppge(EZq:=3OY6 MZ1oSGmTqSL?ARR%= 3*Xf3g>re'pN*E"$M!GG]_f-Zh!]jMotT(32."?4;=o*"&Kks9sBRc98c=gGCGoP_=EA0R&B4;qAQ>95+4L]mP>%,$(6=@G-GO'* F@l>I-q-RqFB>?8@7<4Hb;(R3Jo[Za_dIN$=I'W*r-/.Rb&.WK*lnL`iVrlYa(GNh62Gt#Z4L^L(o>pkCn"oNI6sF\%0U[tLrdpq,l&NWt4giAdE&08K:ar6nBh&kH+>,5O:EQij+M_[@g5tM%TFAo#^naVFUEX `cEs/Fo-D5i(WNQL:4`WQ/&#LR%%>t 8).'fUYtMl?QI/3KE")l:.7)J0)Rr4p$Ui/?rV_Ui%YfeV9i=lGNb-I$HmR>q3(kdPB`LZf"&*%+"X*O@>nim?@nj+Bdd\a@i7tne0:hfH7WIJ75M*)?FUtYk?GXY/I"GR]gl/th[Liff+Q=hc.U&p]r<7Yrl07:T=\:(A1pltB8%dS#//>7%e;K_""NbN($:BP-i.aC*T?AmY-NM"_oDFrY3-pkL!EkM=ZFes&KC\/+:b?iA-KHX"AESPrS(*A>F'X<D,=9ih1FUanAA2`4gp$<=.g?TYcF=H"sE(#7PkcSO0.m0I7*.,GcKI8k]>)+=s/`!pPje] \"(EF'Gc&QT]n`9CM8W?4kraj?*iYB,0[FTt"3#@X(.\VHE;20 m#_da\(c$]3@CS?*s<ld9Dj^AW"k0$htA"BDK`f,/B.Wp=1W)LaA'Ok<dHp>js)U-m L[n3V@[<%e3-gC!Z)+_f8i9p.'p3 #/940)^4&P1SB#.66iPVn:T7c+/rtgOA-CaV$%8jEc$ 23QX9j<9lC[Y_VJp'1?l"577%U>>(HO?qmi\^\HFeU:R$Ik:^*C<m;Y 5AYX&!W(VrYMUnP876)sQjiNnbr3''%QkEE=?$#-&0QUXANki8723SiDB\0 %!\%6pHLamb3&eMWh^,I;,nd9MVKT``F=2R^09a0d.GNo)jj9ge)A0)k!,3gt8tKb4d(Ef_^H]%] H=U]9N)QEdsq24ncLar#O:R<Ea`Wj7@0s&q=5lD&@'nZS>@d?Jet.3@`=Ws::id[qSspkk6He;"kGSOpl__Dn@"!&9$IEUQtUM !i:X'G\LDpeEs:h1g4PJ]GmO@d:lkc^t3(D2WFM/l_9L5Z4#,/` F^&1:bXk9C_0jM!bdcd.j.1?s8O9i!Wg_P8=W13AMS+"MEf9j-'Q)^,o$hnsl@)d#e>l&+Qjh%hO=X_ZrARVQlT3N4Y![N`7,h_VXkEnnP`nTt\!\7!R:QTEK+'fSo%)kD0:#ZWe=*FO3Sic]i`F[W1)aY9kRIaS`@1_H<`hA9.#+.W^`V)Yqq2Hn7f:.2;a PZJEKRJAjQUq<?=9HI5>DYU.`Rm4*LX$Z >r[!R2Y*.ROnXYk?+]JJ`H/K6(G+(l0n/)tHFN%$P,R5_S]B?(AZMH OPbCX$*)T6"YjcMQI^B+; %:@\&+A$KTVB `M+)<@TZ;7$jggJ@b]V*[.jp3;I)$" L**>X+KHgQ*N&PDG6C[Ccs"MN-gl;jPbg)fdR+^_Ds7'`UeMn1,@a9`A:SG]UO]rCqXtQ_C j>AaHtKV,$-cN@D[O`?-A,<qk5cW((T`-%C_?A87T>nU1#!1Zd!R7nb@ZG6+6P''^ZhCNet:lR";8&l=3=6m'p9lA7m=Ht&g,?H"A`ah=qK\O]Kn=<N"1np-TWPq 3C"6+IfQA?DHK1""R6GYiaf`Z*+E1bI:@H?s,GA G=d-T%p1e3$h(gsml<dFK:q(1\RCIc`WIj938[tL;nc49'Y6MW#a?^@__*%(2IDJ16IsoA\bBH2km$\;I7#l5.;4t/F7"sYAJ'0sq>US6cJBAt?5Ti)Q8!K.MeNKn`nAU_Dh)=Zo_,R:>sQ+]O+tX&b*Op5*N__mkJdO*-+#n%3pn7?E 0A/pS`V9:VCi+*40;nXU[WBG]t=O2aDBU9(fLUH`AK(TYD( 9JJ/[#Cbm19n'3H,b(Ob:rprqVFfQt; )b&@WZ(tgah#SAJo+W%?6gh6k+*20T;Dh,,XpS36`4"6DX-nB$7lNq&X;b N,f)T?9lbGVFFL$B%9ofD5\5o)CtthNA]CWLAn67h.VS\Qp[7++]s/iZc,ZBJoA1j,C,A#NIBr6&!H>d;F37(A59@btq5<V\04NCBgA*L<BVF]>./1?S;.F*94=?6@PCIln`M1AaBoge7C18K%JS0m8_D;ZQ?W*4l`1Qa^ [[[^SG U%ZMXP:.4>FpGV('i@fZ`bD8S3<Gr#eEC%j.tL=l<;\0e`p&`)Yc?l/YHRSgDoktZ[\-N^4k1h!-VtPkY%WCX.R-^^Bf"Rr\^rB4I[M_=f3Z\HS-=[LM7;! E2*2'N'D6H75D7M+KZq>4<Q';J(")%BkV-jk2c?RljcE$Z"!FlS/( b&k+e+o(3](Mj=jl#is4A&R*Wda-UVGdA;NpS/NCB=3Z_m-+fs2s1C17;qf*]k:*EYe-5)oAS=%"g*_\pXGSAb&doD0.f\SUjKO?13_1+9X[\=KbSM!0B0C <ch<&3,H(JIB!!g)!>5PtgN( m?%;gP7rG"-Y8,aqtBXne5"_7!*0i/hG&jNV7V5FEGY3TW=L$^O2flhW?7.T1["^Ef+fk=&h!JSi9J_+t226p>8K:`[l^[&%AOI)oI1AIE0<eEmL^M;pWhmi\dE<^$r(2Th<.BcP`a#mM2X!8oAi-(6\_(63<4AicTAVFJn6XMIQ]'m7UaPbb&/b]M#A`naAB':@^l3EasRJA6Nod>A@2mNr*MKAP)W$>l-,.s$614h&b:hW@eWaO+qS7/`)EZk=P)@UtjI)6G^4DiWRB[GpK)=:&YY @ElO5-P+%BV\8;_H=0QV*En\TG%_YM@@7,c@%[3TDLVNi33!2i3-p4UP.$i-653qrVn0&A0F(n<YXO-jKZjVWbZNRb(\U]Vt$C@?AfeCm#KP\lN:B,;8k'T@G/l@l"AiWQV5Zc"E%QTe$m,'LHMB7ht3Z[g27pa,_\(<XODta 7#f <gZL)LF"gf3='a!#$`h(ieX!AYW)A:9^KNJRAAB"1q(f0J2kER"A53,[G*8Ds*"k^`IU\FS^7^A"0Kl?9*V_LlF#Y<4NUJti6ZV_MKf?p7QB5SCdk4`j>)8"#b@\$j\\o1L_GY]F>reb8%k!A?W'7S7g2GF-4`  .`s]H$2KL[)e$tW$4%(E1T+go"ZJe6N8?ftd'!F'i(IooWEIOCo!F:g1o/9LP.M+R`r#[P.I(-S"bhX1mGIm!T"<V39KCB'_C3i,k6L=8knE[Y8Y$j5rqHLAM[K/h2R&2bp&0Nt%f%aG9lIc8fWNQL/Lq3ZEWOkO=r79Aajl'^a-T0ae(+-,.kgTlD7tM.cnqSTX`55tp_aZNQM&k)?maA8L[J]Ye6NH>5tPm0o.l/SWe%<(9$pSDml_9JgoY)\SQn6:b_>TY&5/9_BmY->iD!@n(W!0\2kS!928^B  Jd%jKS?l+S% GiB$Bk(N74jEcJeHF_ssI[&E3T\R)NE0iqRk^c9>t H3MKKH0FU73U(MU&V\IW\F"A%5";[ri=#A`oPY$=Vo=/%il;!A\*D8_dHL%6S)Kk2IIXBBY]3s9F)H!&$ahAOhS1?18SY.b@WX,.\SJEfbI!78Xg!U5(=N<)iYWOJRQAEIi%s!#CN_[/ 0#aO\7@2jepFrO\^ToBUKf@74VT_rg6/_:kZ2R?,si Ks9_oZFd:A/@\^r'@k^mr'6$KICk,F$;@P]1H[fZrfpWFbg*,f7[Ln$4m.+(;3i:]/A 8#P&fU/\ZI*\O@Y)ai64fi"#_G<@4N2>TD<FCfh&NiF"0Q@F]n>lRMe-o$A&a=l9JWtY:;*5*L.>'rID!qop(R=f0SD*S"0j7Sh!@M@HWkOBb+QHbL["*.#eq3,j;Ri8sGq:#6JRj)Z4?m(WtCZ@DHo>W$<(=mS7*dI :.f$X51B9U`"d'QCCr7s1E_,GfqS<i2%/W+NK2s#;;t]IILa2[/d1`W[FeV14 jQ61aU,_.r#od,%'0l`e"FUKO/FJAiOYKD(@1UWC@s@,d4n'Dg\*_;/+*rSon#pdem7'\Ss2H)R&XK4Gqc0;V]RAXfmJU:)mIGA)*[LB5jnY1FArIG=.%AAY)"<.ZqopOfqcT<sBp@#0AmBA +hH`gIA/= %LL0mb=P4*rN X,8B(`7<eAlZ/i %j$e4T<EVEqJpV:FbAH@.K/cpAnO&5>)N4Q](>[:4m14*k/'oW&g")9i>F=mZ3YL=">B$=T1O=M4Y@AdaI7\)23KEcM%-B5cQ'$lp]g\1LLm4Do'I[[+[.KSY(hj,[=A'QAHtZGI)iSB8.O-pDH:K6Nqg_AY,aaeWo;9&.Z6!lSP8$<M$SlHA:C2_Ns!3cJ<EXmEA"O+[qEm:2IMjfc aCEY!nWUsen^1:r3QKdp+;p,P4q@42iT4-h,X[Flksgt!lW^j6Wk7Z6A;J/'04NoL.ADW>WY?%Ucf*l,m=.J>X1(+=ro)$hWSRY<[R*ESAO@kYE,+ZE=B12;UBgB[r.`HektD.RXZfAd=6tMIr0mkAgXrBbm]%hJ$Ia>?]p]>.5>a;8!ZLoc-eC(KB?3(+^o^to+Pgt:LT4Y(A 70sos!rHMEpW@T [=`SA-,jTCB`(HbKgY(!]S=OiPMRDP_kLd_V\?s`^* a,OXlt>i+6Le`,t'^_t[@1,q:dqPA855MEc4&m65\1j3n.+<sp8IH*CT-?`Y20$k,b;@mn'HNlA`giV7KUI!29;YA"jZd7=Sn=nGB&`+D=Y@VMpSVK/cYlg'hY/[V$i']#t(?\2Y=s'AnDPf>_J0-V 1jej!PM4LYrW],HA]3GAtLF*c%NpR#eAK7W@.CD08 .K-4QeTlr2A\R#P6:t;npdCXPmE;=M)H.Ao2ki.Gk^X7OS?jRr9I4D cg7U]'e69-f-Gj'2(h*.js:I%FpTdhnfDI MNLcQk7B+XY\AQWZ.M;&N)l^W9g*g1)O>cIh(t]l=:g.8MNR[^^HpbAEKhN_VK>=/C]'e@TKAiigaOjofP"UTKJ4?g)N,ro=C:q:<=Sfg4pO#BtfCrk^4*<$M_OI6>\V)"m[8?)=9K.!nH_\@,[J\l&$o4ZS+W_32GH\QKWpRSPo\9ST>,*YIQXj54\O<K@R9ocIYA#a@n!X8+IUq'Dg>aWT+KqQg#rCIpL%:/^A/Zk0!&Q<q;0VN7$2r`"l%Rp"dEe3+4,;AIjEm'f6&qljfXjQEAWO^S?;7HQ%*)0HboA8:WLUpX%dRANd:UE\kYPfSgD-C\,dh#E#F%!c/oYfY^Ho]#GHD#;ZFrAk==(c2Nq@ !(]/FTq.8Wmea XJ-@,.X'9r? !LB&arUD2Ai;LjC*KaIA:EEK-,6&'6Y)$jYdf=QM7Coh5fVp]B#/(E($(\b7"rZH-!sAd*VRWW(c3F6 :Jl;'a\."VDJZ"NsMr:30E /]PRT7d.H_ma1tsM&UAI)q!jRH7Aio 1=IANt.Z#3k[_'\3\h;s@nCH/2T![ZJ:aBgUkOP'p4pEPNU-rAGAk,:_+nFm,:b/4AO<b+rIA(.kGt2b\T[d63hjELNXIVqajbG0:HKY+W>MFDtt7ceOoco(0Z9$o:1EV,bq!hT7QjUBK@FRVXA)b#FAG<;c[Ad_&R41&&P<CcTR'taj]EF(fCsFb'/$Of?@1M$!(.'m:Kb`RAXh[X9IHp/ ZG35b.TZjN*!hb(6F+/qUs'@A04<TqGD?%+3jI3`q3ojQn=%^]-PAZ&m+QKl@57 )!p0>ik-#?=YWgjc'Y$)47J*>">o<t:b*M@6a!$F=Zfco2fWiKB%P,_a58kf)'UU>U5nkr'tNTC&bnc,nG*!W5[`8sfC;aF+tTG8t9>1jIf@nOAaEr]DU$chWZ61 )fB4In+i<.]^0af-pJ/C8RlP(ZA`\mP;T4h"c[VMr1X1%fmdpk)4BF9#i&*WbY+A OG;2"V'LML,qI>V4P@U6H\%$lg+D>=B(I?-Q+-$Z&+#mL8f UffC2&RTX"hk7eYXi,55tjFO41th)'T*cSD/\YDiRmJ&4lQD]rrF88apB\qZ[Bs!OX64<PGR$adA:GTfT70q[W*X^<0:S_aiHQsq2%tT3]HYs%IYU'm7Ke\Jiasl258g>L*9lPXApK`SIA49YYMr/D;%*1]8G1j:A#o*MP?N.+=4n=(fW2KLhMD)CQ000_A%XAa^n?a1!ts1sg/V7Cp-:6[fI(YQp*i6AJtf\):mj+)`"[G=P9<V;;`KYh2jW N>FVQJ\H"LDYi/<\I#aKdnn3,$eeltPj#iEJYG\(R[*.s&U!KcS).Vm^tr;gA$kc\P\?=+AhU2%V<_Cj/KS0sZ4^QR]pVSZD\:is+:-GUH8i4HIJj7B:tT63-B_m9K>ChXigVt\MN@BUmBDig5A)4?;TjAPpW>6Ab(jdh'\!lF&IH,AN!Wbq <nI @GWt*"^a%HP@8Rf2!7""d4iC"0^"m=kQpiEEZ(/lr7'r&;@2IY B;ZVolI=3<cJhn%@39A4G<q##4n#j(Vk"b*l)$q=U3>E&=ZeNW6:ci%"_Krid<\`No2Vq^]KG>RW'C63MXS1o$(jWI/J>Xp5=N4Ns2S4d33ifOM1!A$&@4VU$1&#-btCrgF""Ge^MR<s0(HAAiJ%RBcKkICbShQT/W6N8D*6.[UlMh6nU"YgB@FZK4PX#,_o<p?HRm7')7gPW5QV\aWA_,h711$_.0nhh@&+=-)&O<>@TD6)Almk!ZgqKC8OgppkZJHl6ZL tR#n,31n]$i-)AO4)LKM?<K_e`hR(=^5@tmsQ'Z+<'#ZiNL4%[iD'1`01cC?=C37=KaO<<8?.A71,QMVZj%EEoZ]cF".(ta3.lA5QFllF(HEYg:;4MqAAnZWKqd agd>bVV`b[*A.k^R8 9>/ZV+pP(XNi),k(\X F2)T9;*?$9K"-l^#K``8m<@Rd'\LCGqshA9FJs[2Nqiik))Ek.M fgWa[":])lE ^igSa+'4FQ8?O*(DBWH\?8L1qXY(h?k,=i*/=_]l$%d$*cW+5KP87`6RF#c,1r'K6Sg2=&74Yd@7,b`4"95bD(e'UY)YR#kX )A*9.q0sl,j)R^(O'5kP7!jMprGFM*!r5A]BJ'&pkPdeO@Q[B++1KEPLJ0`1MbqoS!GgCZ]-)aDiol)cq@5bkCAn*@8 s-JT!R*mo7PX'&]\A&Cm)F8!1+a!rpK6#dO8;0:gC$?XEb6N.WQTK;cS%KF0*mraSTAP-E[aSVO@7>5$1E##aD5&,aIZTkUE%#9W]C.5@=8(/<XR6p3ksfh)NcQH^pM?s1D^4XKaD%E7f>KHW/t.iDZ]QQ5I=ErFf)HqLV_J)#X&FbIh*QJT[A_Os-/#f!Qgb\!KArLj!UGN#9gl&k3*X3/g;:n`dY-!Y&#RZG8Klc+PSQmQ:e\BMCgJHQ\Oi!jVO!-6g@A3rmb\pVq*B0@5r*ktX\BAdl-L,@@GjBGS7AO^.!jU%icc-^+@!!nTN/E#9)1"m86qQJ$)i2^"<E;V-I<Q6h@)QV4pQ bUcFCU>e>4*h'&'=kETF-3I(=5c7,bqY5Mc5c,'R!n0hr87*phBM:%t(44jM(!F04pg"-R]DIn@"d`'A4##B;X9d`,R1S%n boGUpSY!4helQV8#69MQRj.(fRJi<;'oIhK&Va@g*eo/k^3O+9tOA)V>;gZ*M!CGYoA^Mek(+p0M7AY9ZY:sS)&7Q+(h+t.NRqX %`1)dh4q$.Ib7s ]SGKn[Q0h]mm+G3IUS@/bq$C%<#2#k9scGN*V$/6gL!Tf@.FF//.r5gBY?9D[#mGbQ+paH4rlVGsXXBqJ-E.ZKeP0AKQX(mPh?W_l.X\I@UKb"rHrQ3n*5nk3L_jA&4`@SkL/4f&,UYsrA1\JU75X `<2QeJ(4,(s*SYAO5<-tNh>V:F^p135lAmDd"#FA2_U@raXOmOqjT&=IdAoa"88UD_5e*=7Yib-q=ier1/b>(\b_?%D)kh(A;A,9Y8ob.WQbi'S@QsRf`%C@%F]qsbq-e<XP661H80!Oa'Z<\1;4i[?l[BQ9GG6%Ds'HT\_`@)4n;H^aCG[J?$^N.)sEgR rh5#p7bR(A!8t5nT@c$0eaP35JL*U+A`=Wb$AgWhm0Y7)[]j`,;\4Adkst[$7Wnl;M_V?lonmaePN)HK5V"g!nQ=`"gpf<B2U@YWj)jnIelhP8g$6+g:Ft&6jV (n'BmCD@t$Ncg7TU=fnRLC!h3DFk"NaVL*6(c\kd%Z>&il3-&s/`riH3;+8M7>am<sT.R>MpJXI/XoHAkPKpXJU71$-'?>eo:XbD^N>1J]NBq:Xj9aW8L4"+<Hn?p+tp$R.>\f8[r(Nk3AB2Ca3:9<OA"(`D9l2EmmQ`d@Lj#HskG@PI5F&ckNnk4e>M>>(>r3ZP%S7K:Jh5'_)JiQn"2_ff6>#0K/hh2,*,dUbS(UhFeRsr"ZO^p*qM'BW9p8 (/EOjJqM<[pbme&L`2XT/b&$ 4R[GI8.6ReDO#/HEA.2Md]DH0<_YqiF;]mFEX?12oq,NnE5qIV7mM<hHb+PZ`GS#rUagL;-TN?lI!kmbT_,CZD+\'*NTI/%LTZL5X_gKd*DRd@Ve8ZG1#fcmM6;Q'hFN(@+V;9j%NC3 &]ohb&X$Y"W[/'g0Ljo3lOIDQ-U4%%"#?]g3:dqA53k+8tAd!aM(<N,7-Y7+'B0VJ7E6B?MRS&Zj=*ZU;!S8a=qA^?g<?<C\AiRF+q0\?f#bTcWpB*4:`7JAG(.rMHmq*5A8>, k+a*)D3SQY9*9!Q?=g[m!<K,<]"3`"(l:>mTe:jmq&jAQeCad4Wo^c34ttXHq8Bf&_*11<t?E_'NI6,6N9/! 2E$"Ctc-3[F@`:+9f69b1o2"'!l`o[iIP1rgX&9r^]a58n]%/Zo^XSA+llTTYEGAelN4D_h^,.rndN(=&AlntlF9R:i6gNf;#E8@Ni*:3]*Y7L`PPFM">a90K"$Ti7V-LCJ$_\XHOUe9';=>pfk5TFBpgJ.\<BA1RhL6Y7^tP6=3EAG=JlK\e<Mj^a6c-[d=VG/#L-djIrjB._(/KosS>`)$rV#[kDp@M@%)Z`2MQp&=kq-T*-T!`.D7*e*_BM:oA$`O?/,$FB>,^7Rb<`>?QNp0&P?HH\6e\ZW"+]M.N3GfLPSJ,&$<b-a.?m4LEMbt[FH/.LX%]!'8S<`kL7H'S;%Bhml-O%#A.AQ#dl$+PXE8!Y!2%aCt),+ NK>UHZQoT3qk<7r'RJD9oFYcY%X*LdK5l\M=VVG%Cd`1V;0AV:/@.GP@&;pMo/%70[36V@UbeR/bqp(aA;/8, h3cYKhm8-<pXs3Jobn",N[H]FSfKenq[78f#4Oj74\:L[`O#i=?Un2tmI/mD=po'i-,0oBQ4JWjP=9">!X>`Ak@.=KF/Kn*Lr9A2HBMb+Ma5=6GlaJJ7D)VNsYn>kS+[1W!rF-2=/LV(7ee-LM%)b]Xf@?MknFlJ?6LAbX.H7toG'U*ZhALkX>'`/Z:O7Asj29LgT0&V!+/ABRn=aEK+qI\,oU0sMsfo0e\[YXY]+Spbn67HIJOfCgJR3<43tZC1KnJr=f4fr:.#KRfZ(M=;!k;@HT :/iZ_H.HTPn2VkAnn)s4A1'Y]1iV5#,mI=m`^(4@+(`^%-ia"oh<N:@Nttel]\]Oma*LdIdFt+O6aV/t7MJ0c&8Q%M`attI'R0ij3hasMh]VJj?"Hiq'GR4MWn5aOt-K+DWJ\!\mn[?LXWA?MRs9 ][%Q7+=ZfN>6!%g[_/D&;8!':E1NKG&4))ARR-Ar6=)@Q &&)>/f/=SLQ_#,efRaiKj:*4G.8Lc*>*BkJi#bXoHQr&^=aXB0\-m;?\^F_<ZnUT1W8IsdIVf#q,^t=o*%PXj<dN5W40HsbA.FdR-Ii&0All\>AF?r5oA\EI`X23a=8RZR3LqN#m,VE5+C,"Lhs*mrmFKG^(4ffII==VkpJ"f)T*qO_ja9,\J"-Qt$"AClXSUTAc/t[-dR/?&fV0E3r-D% ]M;SGmMo>h[H`iA%a@.N\D+eh@*-`Ap##HP&$bIY!-Cm14pK7X@T)^%b'bF7M`F;)Lh!SN9FTG*dY_rT\pK;8A#6lINMYs+JGn9Q`?-oKWN.YfE g^?#n@ZFI8"a9:sc5$tKmD8o-;CRJ92+Y[3cAWNS:Jf9/T1#Y=F-64BiIRiQB3qCbIU</Rr)T$iV)q\ r/cAdd:K^;WA?b[b<L(CpHRk -&%PVE8Ng..Nfc_k\Pd3AFA()<3l<4CWAs FcSbb%$j6WEZ,_)O(?(&)>XX=p=DA*/j=nrPcOsf`Ja18I7R=_(g5mABgVD<M4EFPUE9+ *>3Y&*>JKP')OWdVkC-fRrPcX0KMt2 C`Fol[nRllZriSWs']A=LX<3RPV`?cBfY#-A:#F?Y[$Y$5.@2sW7T?L`$;QH$$VgYkr4B_"eSC[a36dC[ cT%*a(C%<%:X/_R6Fq;YBAgd 9)lUFXg$PtnA,"EQrAq;:Ss+tA"tF3VA^7$\]/!*]2+8/ ,0ES-1??Wq[>_$`;2kf8^lt-fZ>h[#5G>j(BNG.'Xqk]?n$cLOOLhU2F?W%MOLM%JsHp8GJ$VO=3h3P/*fp1)hl]iP"32CFGM@_$,G!`KHh#d0L"5EeSn hj'.3Y06q`#_%Z[%7PZoZ;YgN)b^@&'Ym_,_l_q"#)gK(#6U`k7W,ie/W_)a/Q;r6bl<q0EAGX:^.P@Ol')E^/@EPPjaHoM9JZjC6+ki@J^\q/'#sRa%H1WkHEG/+9%KWamAsUdU6rp'Xo)@@E$Fq"KC`W/jLk.Lnd'CoEcA2N36E@ENT8pI@9J:R]^O3oN5YY,^ JV=6J\bmaR8*A0)&</R(t`/+(NC[(6TD_'6Ul10n6Wgd5b*/A1`$fciafJ/@*4UY[[0[N!6?gC389H9]"b]<ltnA@.QYJ#X%'hq]&BX;s5d36g])4Oa:[0+QZ(MQRmsKS`(r%&mG_2NpF"ATY6EqGF68OB05o0k4BU&DEh!(.7j2k>-:@2c]KGGY5WW iKLZe)WWjm 0:;Nm[dF/$%U"f:DoeGgRmY-Lf:tf:a!7_>G!lAB663CedhibAW+PYF-,_"26:NRO@:B8;>dBk?Ej(%NHQ_2:p"5aR#<:N6+]b3J4$31\a`B-,B+)M\0Ramq(XMV>jNKA=Kp%lXbN+E;#EbCHsaX00jg.iAXL:q?\L#1UFRm_%K]h,C%9mo^h*Q,14#^+Z@m<FI3En$:niO.5;B6l,5b]Tl?.!cT1].Ke7SeQAC7AVM^I4U,*>8]FqVtFL\BH.m$t8SqGpc?nHLlMJ=@.SCkXhjE5-lqg= BI9Rq8PSf;LmfJTa>jBUaQ,p\,7V]"4jf@dT+<ft+"3 ;FrUmt5`^<L=h55./CFW3gt9r6ctRCe6+3lZKs['it`J5jFH2@V.7JA8E?RT; UQ9A7BoM%*@2eE![bd\5p9]CPGK;i 2EDGr4(Y_T2X5mef,sH0+Ei3*gVD3=Y.@),$pWir3YV<\*)At6DqAm``JN/6Ld$*nRPSApMdsB^U'RK_tOiEle/XZB5E>,T(r^B#)gk3*FR;'*mq5!2)@N0AK/.`^1tp_p^VZ1,\O3"S]qO9eNQRo`L.eG?/f7Dl.2nZYpd aW8o03SA%ZDLNP3G8*1<6:*hVoJ=TrVa&!t(I+DoLW6h0tkmAnehjg+>UJV ]JTBnm ;hN^l+)S.CG:\j[em6m>;TK5Y%;p=>H.8ZedEk=BN?\\DOG)PP.S-!lT@#&HB9)FOII[CO(e0&JL<O)gX-CiH=I#&q(dAH7$5X'%\G.N,75ogEeb]LmLS0)^@=qAgZ@e+De!<7JAY\#6N2oT"=:g7k\S]?WJ6gM"]bJ.fZ>LJ7V=d(Lr:=dm]p3"SL;'*_O)"6V897cWP$s>Q*<?dla:j)rV1p#KDA&qj.m>I\G*JHZY6/9c[/.!:31'mibAEiEkRa_%]d+?)bna.,ZIf)cfc<L8j%9X.330t7\oUlh%tbH3'kF`'8ZLk*r,0`ZTOo\]AB5n$^D+3l]dNfPS/c't#<aT&Z=kd3Lm\)s+?P@( /^Rb@FK"Wki]h7XBNFMt)lh@aGT%?CqHV*`==Hat#^4!DYTn@Z>Chc;gFc.e'[)<tm]r)As!Akq<(]9<tiZ92SA>6.eV#/itVK+:Lr0HC3$Z4*oLk%0mYLj3#DU\4qWqNBsNS'_B9d-*09U9),e'>bYi?#&dAl\X(7?0%eRQ:V*hDeI84n"X1a/N,ITr5/N,S2"3Pm,k5]s5gf7WU`NkNrdFd(7P:,)(nPD=jH8c4ksI/<.WUfE=_OF7Up4H>qic<.r05nsEoeXfgB2$ ]MVSj5Hj=GsCs"\hA1>c:9Uj?%[Y=<N6*`QE*L9M4K'k@j!eYC/KDs5AbA/-ATEn5F*tG:KEVSKsbc%TMC"tV]j!LA[8EG*-i^TYY),3hZ1tlKbf\(Y/o66iNjIW-_g."piGg0kT7IlVDo1e$ai'iJ 1k$Z%gJ 3n(ZmD1X]IZYZgHj_"hP-a)N5FCL9c@s[j-9SF-RAeDU)eL0oNT.\nXZpe$,ZHH-:ch:.6:P"cL(09m#L9&g_F)F`kLBfC+n&`MaONb\L8&'XOjW&A5V!9FqPYk1:i9*J@<RV.7Fg8dR[!($0RBG8JAjk)CT^*80]\.U.Ll-<hW(HF_';i9Bt>1/=4<T1H[tm'@W,.^J2%)fUBk+SZ(_5Frl4B;#< @<n&qeFD^ 0=#81[sSo8@@tWDO/S#/*`Uh+NNIRRONqtgb$>jA^TCVaLePTrY]2IlGI?SXRiA\0loHkLs?`O\8&?<>3!?SM3AmB)W>oRDU$[2k`7_>Y,a,+C.e":sEb&fTD0EA5.XdX2#ZTM>$X93b37.*$+YrFTorc85839WA[A:7?4>X`VIN3`<5!ZH\h^4pFA<mZ@[c];\6MPj.#c;3.3@6c;<p\gQ9IAH<i;G+'l_8>^JAln:8Q'HFG`jnAnCRf`8WHPF]]$k[B#1;LHW?)W5IS4@C=O$4E"]4!Cl2GpA ^Cm/tJ *?`6pLMkVdd.iMlo#[/<l\b=JZN$S]G]#Ee&/pd.#16olAklO:s,1e+9d2;SA 'j@VP3BEFcAUh_9Y&Y!cnMsh3>af]Jc#Jn^tn?BefEJ]jMMkWIh`]sha.f+e< 80^_)6 `G0=YPUDm_Efb*\&*S?Op6kBI;Ae!C)-(DFT<q_<TF=+ei,8oY5NDA+1r&qr p?''<`Vnp;SU#9.>>2iQk2tOftCQ!M8V(>S=@$RNZ*n=ANm2B1kN#I)PC\G;\aEHjW#XrRo">"YFD(`pU5tf)_T^8SLUZqU#.2C6`#EpJtZ^KL\.(n/&-0WWO&mr`1RAF"&LtQ=`<h6?#:aJnh@H WR,p@l@+(&;2e]NC?cj8e\'!Xi!^7=dDl[2^QfrrXMM0#aNVXB9&Z?02.^4AT#Ln;QLCcN5P&45B^cU)mg.,IJ'/gN7?>i?5opDmm`Aahli-r,H*1(32^Lc>n<,]]-^]0GRn$l5j_N#hr4;*.e6P?-$+Sm%PU>9as`Ls1-JINC"eFPC%5sATQF#,s`I;SRFCN@:- C?R)b:tX\:iVpK<`0FR`].7k:KmlBjI[,Kq"@QGa*QK.mkd$CnOp$5WW@GqDZnJ>p'E4,/j[_hX"1@)O=A>`-Qb1Re+b'!': $kU<W"=s)AnJ4.:4&'S&IY%#CbnJS5;_`!42!b\c*%%+"@F^n]5PfCjORH.-^?_t=:Rd;/lb=B8SjC35 (?e>B`bgrrJ`&"9V7e/)i,(:I:7j5gh \,&I5>&h/^C0PctX-3s'VAdCB6sO(F."4:OL50H3+o("=n.AU13=OsE-Y$8a0jSp+!3OS0Kn%TY1VjAGI2EcnJ/f,9?RRWUhd^AHAcq=%NF"c_O6\5>lK<IKE"!6G[/=OP!)K]mIYCL_T*U;BmBtV3[1j%_t"OI"c@`#Ib2mim6e*#KG t3Xpsq\MlB>L45A?.jr0B.]XRc(?7?6MB0P1VWEc1n!'/N!75,.=!D>92_FPLHmPDB"qrJ'BK978B3g.6(Nh/-#\"SsokOb>DcfJ^eEa &.ccAH2_bdA"")'I)7Y$3o%@l]RPrAogDj;60"LXpE/VtkjV[%4rYh0NAs\a'_0/Uc(iVb_i%<A$qs>?>[GTP\5@9%iiiB6r91qf0At[t$D-_:5^M_7g?M_ZQX][6OMK8"S,Um8@S[Z3M&;ZYT)N&_LA.=c PAl5A@(\5jB(8O!t% c$s?+Ga?,JjP.,#B_DI8,PPjK+!e"^dqNKdd[jHL'\/_eaPifs[2CiG^nm:.gjV`&qak@BJ0SRVSQhtQQ2[A]?]k)o`lln\^4I>6*SfJ#tc-'aqt7+?h-.Xsn!B*AKV25a'NH,j&YQ[?)@PK8L'PZDPtK>n)+F6^Nnj,ZrfpjWg:H+_*>/6K_]a?#mn^dAa:8tDLa3hi2_bZ@rk?g9%a^s&eW_#k!9Sr(mM0CQ2Ot`qnBfR2Pm`8hq6"g#E9XNF)th%(IH=^FM'bso!)n7Jg3o?\6mQ#jlI0GU5G+#DmC(V;7@(^gs(t\mJ*V&\V\@Ed[b2[[>N^;#SIWj#Af-cF!m,=1Hh$R=VSclKEqnom[0,QV-s%K#!g9G;4o@c^?;D,jRFHaaCUh-L -4\6TX)`\IB4:4(+Adeh9CQf=t0(tO>n;C@Nh__?Fo&Z8]TO:hMf_+B D8?+=PA*bE<M'6@Y!^eS:G`apd(:5)Q>[JP1=Hjn aYs>SRl"ITj4qL)ILm$i\f^Kmg)atqED,]34Mt+IAA,%Y ignFm3&[4rbl6#c9+aW9/R+HSXkAA'.Ij2>1(OJWF24H(SBY^;V1%&Ora?U#(mr]de`@&QJ&(^Zl"9%N"`t_[]E98>c?sGjo#;nd*,kW=>r_Ddt!Q,Q25FgH72eL*fW2f7E+<aAn<gZ"PaYaP'p* OE>[hihR;bqmDooi9`jiLm)PM-(A:i=D2%P/MB)k@'dTBLX@:EKK4`A0"A)`1LmBr U$ql2)AtCmUsDiAVAWb8""#kV=b65,&CX!H(Da7sdd-<=hHHo03,OLCcC6* \?(_T)47RfWfPYc_eZ&%1lj"0W/?.=J?M%orZT5<qA51WSnfroD)`ccg-\K:+HQ'h8E_ALTimoeA1dXFP3 CI/c9_0%J]DE=(Fa''lR4<9B\'5s.2Co580Sri[A85n<1ro/"#)_/-A3*),@mmp4UJF4'j4FM[tc6Kj9&DeB<gH7=1AIN3fB):e@@jo\'-teO;2`M!P_Xga'C]U#mSpV(CedWd6/:\A*k6_3S'ca? $hei3j$_8JB-W_[m8oDjQ++m5lj*moJH$3\hf_<92:eg:^?Jb></is_#l&CL7<&b_U7Eh,c]4B9j*!,_5pH3<+BRc+n2 'AB])MT\eb\-XF5X^+-2DDJ7 FN&ra,T;\97@l>?9C;<TElZRc'VNg._V'Hio+fm]NosF&s(R#$tOfd=LdVt X?8E^W /fMI41;Cj]e9$=^m3<m?O:%11F7P7h$gleBj+0!^0=;BE+4+`Kh8Z[T[6prYkF%]_a5+-pk 4a/$BrLkY>#*N S*4l6*R_70:fP>"mlEA-#DPXVdfF>A5JJ*\Lk&!.@CNtQLn74,gN_NK=h'I`RAc'Kt7^AslCRjMGc'j+[,W6$LbUJ0@GLd3FK)]81&P8b]DT;@^7??c:NA]8ptTrR&,/eX)I]MK&'q%TcPe(TaV-er!'3j.iXGAmdG3$GZ&G$')>/AhbOtm2T@hNq\\sfps -L248n$^ALP]ho[@1/.ZZq3IWG!*2WXPs%[S-$**YGC?($A-;.G-NOa>B&)A0_.`Cr'QlM_=>GA(Q%Crtn_6Oi+pj*iq@eX_As'nS U[LJtt4p3Dn6FHIY(1I%D4\>r78crA)<OVqpHbm9k!Miiis $I[O5\HVS6"Tm0L$Z;k6AJR':(]^c&C2)_cALI:8^P: GD+]r7Qm<mf*?3@Gq8 fP%YFr=JB#3j"[I7FM+A?Nl2qV((8N!ARRZEFm.Ha_C-"l]fTJU'-eD/+().mkYJj G/":CE2NM_ngf=(frO3NW4KfN@<%"ltQ;6`MMD9YUL?YdKE86Wr-3@WP->E4aD#\hU!][mh3)ME5bCo(OlVLEU35q (fjKUlBq^qCn[%N"WAV_8Tb-EA3IM]4S'-5\phTYY3SFsCn'( F<TWXKhQ&!Q,Nj*!o"],M5_YfG6*EHcZ=P(SiAY%1CZmnIfeA;Fb(hIdH3WA#$?mHK@OFcrF<.pBUQC46T2'?qr5`>l<(cLB<p:*jo5FDVd=6+NB1>=XG#rVjK5P"b9%bJee_/_AL>D_Oa;XZs\ JFHRP"n+.#AY^.$a1p'j+;3@Ho4^)W(C<;J+PfKr'DW(qKn/L^lB@X`8[9\.$#i^,)RCE).`gMXC?p_jh`.d5$<VE<n`!f&b-J2VHDmgXs_2T3c<84Q`r("Ua"om1.F8OWPc8f.CJ=^n9MC8P-50PhA"P2Ibm_EWGs*HNR)+'Se-;#X9OC)B3DPjO%dabK]:^VVEmM*6_jG)P\IJ"gpKm4 tp[^N6;6!FR!nVFT^<gL3%$FM>Vb4^h\b;Q5LY&&7"BKkn ]$^-`+S9M'=!f:Vh643P-OraoJQnn*@AE3n3%kcMt"dh'CRBK',l@6`?Z)M7_1<DOZ`[K3h1<.FH?lc9-_:Io&d2Ta#SDr"_tRBjI8<co`QL/Aid)lOYW-Sk%*eNch3K8`<N-]X0)(Q!`HVm $^%LB9paS;qK Ad2t9c<:BsC!g#NA)V2IFnr0kAZN/..P0 $B=moOAaJM*<sqLUPpnHK/E5-f 7YM=[F$>*3gg)].<hAk\,72dH`_2'eP'36F5%pQ8+:FH'Q:;2ZXXGIApr]q[s65"? XJ g&r^=7K;G+?2s&->:Vad("+gft$T3aW]q+3o(d/WcSW:_B2#e@n&mq7N7W09],V[Hio8!6OZ%nP=LjAl"]h1@&T`8XDF-3F,%MD=.Y[j8#Wj6>=8;2nLO`93V8HiB3GYjW4CBdeI<)W(SZ8cm;SCGgC`ZK!$)Z\CM>W!Na/:VK`MA[-YdoS->HZ5Nm+^R[W^NrOI-(c)J6HCC7e'M=-DaNcPf]4r;rTdl\0[>6"\1)kHV,IoaP2\F!PJ2jL1G-4Y?)ML"a:G^^/EkX`>)@_#r$TZJg6;A7YWf+rJ5hec<&FJ@?3%;]GGmB9DmInjAY@^AE24MZ2Sb#(:!ke1J3mU0'q;AM8XHLgrj3<&Z-]l!M%2A5.a<RYhIK[!9n@8QDnJ6=]^OVR:6->f>ZRh!qLr)MJn>>0bbDLRR<[hq$8oi'2h.blNaY .N'L/'h[&3*\Onjl/LUoMjA= FQj<2l4[>JGsCM:`Q:#[QL9/U9V>LZrfl%=9OAtLg3.'eLrH75.:Nm,i>Bd7 3n<2-q8p=?`FM"Em6g_ngd*WND4PtcgT)ecQe%d1"8A_(qPr*iW9N#="aBd8N3bKONV* -p[^^h`/C$Oc[b':)#W3>rBBtQ<;giCQ?/kSK'j.gMK8i>6`c<B'gC2D?tQdAFOHra3TSQ#qX]a8!3GC)j!35KLV,j#]OYJ,c*E-^&OFM[0,9j)sf&ORm@3*2tUcW8T5%c"OI#-tZS;=+5j#0#4GPAN[,E.ptI+g/BX_GVN`/)OAA&phEaYAT*UJ:9Q%BA``b7q8A $?CaTPNVXj8R"#kE.22A`Ch#,stA^Or8i02;<qm+kF!Cm16 V'Or& )KQ-L.o>^h$6rX>,2t#^r$6Ai)Z8q^=8tr]BdP!WAb*5=[J]Fl $c#Q*JPX\^PnSB8BBSZQ2aTH5qT;:\$en7K737i'C.14gE\/ FK$.`6Ps>k@6+l0G^7fAmgEb\siM4rVF?]2X=Y'(4]O)jBFdVQ#ER0=.BZ,scQ \;l`>Sc3Sej^,YRk+ka%l,eRoANq5+E*$=edWYdJo<#j,8]YIK+t/&6<5.WW2& 6J$i4l[gj.^R&KtPq:+\:AR:cpX4M30_fl77Q*j 196-t9i>PfNLh$/#&+-:tIq__jbj@fj@:.>e?Ysd?=WF`Nf:mSM.0he25-Gbb3s<&#9&Df :$OJ9X%gMm"=`p,(otl8II&M(kn$3M!UI(M\Y'/Cmrl$@AE9 _;cip2JS_e4plaNP)S(9;76Mj]deeSaT.[2b+ZJ1A<<n,>oh_/t!XWI\Ac0#@_m^aT6Ng@0Vl3AscN,ib65]4QoCLg58d)g<O87SsN_-eUS6#nccYZrI<K<P-+QX>dO<?&k?M@JspW8GM2hG=Tg2q83k@ld'+:#(pfRr#09qc3Ue&!PWg9"hYb^>(&&qJQ=J!T^2;9)d+0WXp@E9/n188"eMrV4`Dt06]dY`pX;hE)!PlT(CAa+.MNF3C@CCG<71rkh;PN]Co0VcY%biWbOt]tLB"__IU:L 0? 'CS5ZlWbAlG#$"Y[7rZs=4UiCPKiHQO[ai`Ocl-/R@a?FSFaEJ>%>:qhrPK7O!qS;#EI\,Z(A.nl>r:LY2N47la4e]obrh@0k->>sTFQ6L6T*$AKBJ43DHNtij?HMh r#d.WLOCC"@LPX`-_q1Y3bUHA0<\K^k0nS0O,?f[lM'Y(tBM$2A4Hq-F!3X+,X\"MH?M/6G *NTRg\T[ol5"V+(T@tf=a5'g1(iDWr?9b^D12@6PX1a!C3B.M%PmMNXI:1k9NegL(#9D,T]c;ZhqWj;:[CVs-nflL+A\:7L:1Wk=1e+%4`J9go(4ap9h(trBJaP+3!]LJA-e-lWMp42AfV",>7<S;04qKHDNAFrk3<ATKbfBdKr.;(X/UFa]<h%-"IK!t55i369VY_f?@=#">(5$M58/_UAs'9m$:,J@Uc8jefpVg@PNXiF/b^bV+],1];Um31DAqk"nE"OR)VR/^nN `giO%X 2ZX/S`&rr5\6Wo7ZN3^!SN'nq+P!$7S]H`.(&gAs!?V[6:GptG1Z(Der*Rb]1^D--jX(gPGZ4Le<C&.N2/no<TT"lV9O`*WfRq%oBJ2dKVhHq"f$tPUCI0;BYZJiK"^A^KW,D]^3Z:Af&+6L1A43J%I0YCkAZ^7Tf1Z<[H/?4o7eWFCKEX8n(/FHTpPg]%D,]T%d=rD\9-$sY\O[qKGVL]^A-VVD(__5%t<H.6mlHXGX;!jg<#/p!_bLmP[dV%Rh\ha(US> ZMP*IcX_A^rOeOC$To5YKg"%r/+-%h%iQ>^0Xl$\dX@I` ]T2h$kFhcQsb/VjV<rF]D$!-Q9BWfQt6V<R4V;2B?$LC4?G#[YS3rt)0IdT7*]b/Y6;)pCD,r!lMFcnng_+hVMtjD_VURmL3=jh1C=[<U+Oo!^U"'PO;f)spBB(@tgi4fSQl.%]*pM-^5F.ddTN(*nS<9G1(=mr^b'b0Zh+B*IDW#SF;NS;#F'q/FDV2^qsA1cWZA< bVr?s2Q4@=0!,XXt%/Q>&g s^O,."(KI4h+ARfselg[`Z;WIeV=A1c-jfNF$s6$/3\ek;Jb PHk-X9WpV/"JMT1r4/iV%]"UDg7%f/k#N\!ioc2M;CN1:4!@+XY4d`K*&/Cdjh)KO5@YDm?8l3*2 cnSV2KYg+-S,?NEAF'7o8gE]iVnYNXG9mPb]g [j]1(>!(l1]j5`7E)D.th\X 8?G/dOP.)@)GRSn(EW,,<<E; ]p^+6%sP%jFm8W(aiPTNnPhT%YAfEk_dkTYt<Y9 CdY2%+HA1+3A,G;A8UmPYDa8A74.'=t&B4ns]lo6[g#[UnH9#phl!^;(ClNtRppFs)f7B+@IoNNNSLpj)";cNF"liZp5^gch6#9:03,:"BMC/bJD6LJVho,.^Blr0l$#]#oA0f!-8;?iK0ngCPdtjc8RDUfa9Ql'CC-O>JAl,,"PthC@,<H=2#t@1G$5*.'#0f1m@+M R7/Vii`sIJs+7gp%f#;*I'ZgW7$.^-J1^L/a+P C1E66fA;CAZCoSaR$%q>(Ttl3]S+2e&%>.j:)0#[1*kD$]!+[SA8T@jt/^iA,AE@YQ*a/[BPH+HQ5 FSG*S:,BiW@H_MdBK76.<-k75-AR.;6"PD(%0INhS7ePls57q;J1`.aK-E<pjdAsd'[noGf\N5*gd`Q58n`VddhUJ8!B!AlK3_aT)[=Q9O@Z-c#scjPlR0sN.A_HL-YZR]8O#SigH"f#`mdFR(je> 7_W.mK"[VZlAEPkb7kI45ibQA5LonVs7Ds2_!&;r%G"(!o'FPDT)NAbg/.R7FR[ll.f]#!1eC<@YqiCf1q9YsRA5@`^%EfYcdCjA!Y,6 IK=VOm&sLmA1kkC6eC!*0&e+L"^)pdKQgXAaXk3$mnG(43KDI$Y-pD+h>oC9F`RITFR!D'oX)=OE^eO1IOQ>%Q[5QY9O#ETf!\dc;Z85KXhpALN#4g*.Y q`O5I#XUh*AUb]`<b2#)%OP:Le*<>R-+TP[@Y+bm6n,N?5`#-hoDn']>A[]Ar?[r6tAA*1;9q/EVV<3*>Br_)kk#n2HP9j.8#c+B8\38($=M+';<6e4SL4"1Nd&.O4eVkT@;8;fMXBrj%bWCr[Qcj110"pWRck<pJL9V5\0<b;Etke6AJ5/SR"jpL*=bY/%>p^>qnB.X*cm`S27'f]\0 "l";j(Ah.CA +$R%%6.lk3BPTK;T'aKeY28eQg;2%:bi;5EjP^PlDUZEDQ9 Y%s/'AO?s&+4=&ddn24D&0Z$D>%W#[>&-nAgX;78`-5hj?^GktMm%W=,>KjVV3/a;ik.[@=BoF\+fHtBd3Vc'Rrpm&+PN+sR31AAj@kf*mp[IieK9OqS\=1NgFG5d?/G4`Da&k#!dr3n7jM[NQC>jk8g!5Ie__3"7)/Brqmk1tFUhXKi&q5[9UPdt];$tiMWa'l8mNDZ?Z(SDUH\7N4$F5Wejo]_^J7AngphnD)O7Kk:$;jKVE1\aCRL5aJDW%agA*les"Vk3$0pq?dLfi#2*'S97(Zg5PW*?8m5rjkA<p';]m\r'^[8P-M\Wjrc04Uck&&d'Vh2jDmR$=dj)Ej%8";TE!an2FdC$98F:;kW[(:e)#DFcYkVY*I[jkCH8T$D#%X<V05;C'O_$e3k3 n1m.b!P@3sj0@s19?GA\)b?6&WYl!P,lT7%#)L59:&+J!TO#;qR^rS P^U!+oo+,:%NYRMs<9siUDXP16Ao[Xa^.47@JbkaqD,"G8@_t=ZkerqEj(d(`dhR)<<b3`C*%dGF>hhjd9hk(AS\mB(K?i#AYS)?P[9(bj:jEG' 272%!"*r;tD>"X$/i0A?NJfp`n?dIA9>nb\1TOhZ:2@l1Td?M>H#Q*)>1$@nb[7XhGZKln*7K%,Jb@i4Xl#,_sURb_Q=eU1P<O4 <L9QihGJkr!A%#.p0`Ig$Apq;70+b_*J!g\W5!K"?$A//Ac@=a$9q']]K)C`hBM31TPjC+@Bam,FGfdY0B"'q3h;V<"-gOUb!?W^i@?Y#C/7#r_ts<CDIU6h0%?K\&JTH\9SktiY(S0`.fD770j=gWA)_PH+dP?f!-\I"e$1NORSbe/9:7<M@,?$&_aQe07 )<g\Z"J3EU,L#sU0sa?k3ICK.%Mf2%-*PVR96[1]m#O4"Ra`BAk(H&bVn9CZCs4:8>()+Xo_N&ON(koKO45`8 3pH_-30HH@K3bSIi7U<s&l6oR8C4q7'rHg%;U.ZKOW1BrV8<4)t?1*d:A1Kdn7Rd'i)0YdC(r3ASmaXL-g0 LN $0GfmtP'9XcV2cDU[1BCIE*YA2^d6qVgbWrdgDf[nYbTqo!%&NTrJ37W?G9pR(c_7IPp2E^bY&D3BW2!&Q3Q@BiAB=`;"?0$n GMNEJ==d %e8A([J?LEV Slri:<O@k^]%'6l qeQBd`pkYcXQP&AaAi@#K3XfEKkPmQ]6(Ism':eJ\A3(8Bic+.Zk>W*9/%F'SE7I?]<0rmcA43mj*O#s-U$ZeU;pTV[lYbr]$O11l;_(*.D/A-H9)*XrX;7'c[XNmXb+YIgk9[hQHlU]_t'k&V.;qh9+N %A:A*VSRQ\:VL9 I4L.ofoGWqA\90>\-IASNB= 8?W7Ef#hT$<NoV;aAi4Q"I9@9_VQpiDB%mH<_M8b))A32#_dBWr\4b,[ZBDbA-iOs[(pEtAXCOM)08#9,1&EWmbf"Y1((#hq*`+Ir:UnX,[mhU"nAWSo-k6_s?$,6*:Q&&[1#4=O;RKABmH3W2\jT@mONSAob# 6TRUk9E3Ln_&_jeerEMUIr+LoO/$>s-#@rZH2\ESG2tgon`oM&+9OgEU[T^/B`Kp.lDr(G #r"bOdgjCf?PDPZg.s+5%Gt7lPo"A=L%0cfi`NE)U/&J3^02@-kI\=f0radj/R/4242%X$&pj1`kbhm\F]M2?dnK(T+n'>D75ZC\OI0Ocl0[`2:#: Zd#bWXG[IRSRtj6!r&L/DhcLQ5dl!4n](httJ507Zt)b\=gn2!giP<jo-S!?8p8XM!tbL^)darUS7X$12WI[-,X3ore^TZ`NOM>Z0KiI*c8`dpG^`N -6"dl&L2[%\G/g<?8$'#2j-tW;I2YbAqk.!@#kpSObh*bo2X&&?sk,%f!*bVFjUBUd'$64(_5D"]?tg<#L1R(Ugp#>O<Kng1NAN+2$UNYt"4NY[k&\dPh$H/W=MT83K8F[Krt8-01CP#l7>PPQTp6->6#-SpLM<B37qW,l1&6-RJ)"1Q4:&f@"_:`Gc(KG\=QOsQPeA>^ aqAR#"])_*BgmV!n7:VU%9`j83E$F[K>JBe )I&4hD"DPo)^nML_nKQem!rJDojCaO7hTLZSUFl=^"al.;AiZ#2;j"ICgk!>#E(7p/%Laonf9;qrK!7&+KeKI[`><'8ml`a=<]TBt>!8Tl#(.JAs*i?NWLL>)'sSpV>2qp0FUhOg'm/@8ojW'&pf-\/hYo#-A83*gclkEZ5/3XSRd`Ae\+7d7fN!8GO(e4\qA&4'N pUR]rnlZn&oWCCiG5q1o_"_@3XMb\IWf$Y"b^G%^,rGY.7/1&&mF0<+cf3jW-WbIS ?'-"B6iS.SL,OrS!+"cGX_DQ\V<r\=aaJJ@/4-YL:I'ho /.[V6@POCoDL4e=saZ?_Zeepp$]:F;31n`C,*l85!-17LM,]aZ-nDoi<Mi<5U]A7geSPc>5[6^7bZEeVP`C#W@;XaC^ oK3gF]9iUS<,:J;r1&U)Af(,LV;`ikl(b+0@#YY2s)%`2Mc#ksHWVjk$t2iM[`Z$%t<PbTEZG@kX4e>4iH*!AlRrpVHE5&)/S:sJ[>U)jXMq)4HmT#o0f;Z*@&%-LB([#m3";cEA"jh"_'#Mqn1,q'Lds ZL`b'A2JOoL+5.aH1i5l$H4+ ;*Nhk*gEl(PA^a;ggATU[8!3$C;JLU2ne9)p<$NV&Ma>W"Ak<PtFU7jNFg'Hm5r_o[_ !+Zc=>2rDf28\.($7DnUOGr-n?$K32WYa[Zb^N6%$,I>[>0mN+rn[A#Ph.>M0e@A50.g(%eU-bL\.bSU&imF1^AeY@GC<8qj?Wc"+rls,o%,QLdCk&In%PQnaT<h2S*?31`)"m8=#A2St:h?o'tG VegFB/aFA.hF7i&@bTGmsAl*0tVH`)K8nO%%2IP3Sijs8ZlfUHm\^[_X3V\+OEnd`h$>G_g!)t@O.7Ak!od5-r-9=<$Y?:5MAW\eD(8R4MK:WP?V6P_OUPoYnCW9@#/>DKr%2q5PPP:?)8L#>.n^C#`C*2k=)Y'\?2A!IDVWa"cHij2C(g#ht8S;9<]Hm@.`$]q#0":<k['mLNp!m)(&M:67tG-"DNdfTGrIFqp@( -?<C:l=N*f 7Ag-`/ K%gdq,YM@@L`lIY^6icai*nn<9>;oDCZ^%-o<lS`TDAQkT ]=Sg1M%FMo3DZ5ek/7*la`L27Q$5Ir1q9Kh7lF^PaQGfbg=M24N*];]>fcAQJp8"eGZg61bW^-U&:t0!9N[caoZ>Ib%,2$.:$;o61TtgB$mUmT:m/K^k/sX\R9[5=fk;#E9+NfC,Q;R3LoOgoDW02QOIOQ`M/TZWXX%7\dLI&fk\n2-;A0>3Ua>+&BpV&LeF8_?QUk7er `+MKIMqT7hh5H%Y'!2ahrI!-XC!aDWd gbdb-a7+"+&<$\s54nDJ`k8n"A(SP19r$mM1ArGA\mD&8UK#o0S(@4ph^[IX]@I"Jgr[gse66b5[AEG@Fa61jSLcCk,F7sF+e%\UV[_0`l)S$a<2WGsA2C+dg8lec]'^3l89Dcj8@C#$Il88b'H99feK%a/Ee>Q)%rOWf3kbth,3V??N$H,J>$lPUiA/gD+#!cJ=a4]ae!aD&.fL0e</@,'c)S+g.sIc TlJbQ\ktIi0EJ[i;qN49b-:%cN2EE 7DrpVO ^ (J(\a,=;ZN?E?V98s/^ik'Xb=E<jQh=&o(?JTHsHJ=X>67s+g_?D*&aU=P.n;4EM]Y/8f4qPVAA^7ML>7IA&>*T =LP:VB"Q$m_q7U[X5!@4Y!pe(UF.Z#YU"K[[2GA5N:a7f%B]N$$OPD_(t@MD3<s$]A"8K<1ff]MZsD(e1`'A0S?[/eK;"(.j9c<09/58p$5YbeP_LS:1'K5K\Is(9jT/;.3"NtA^"WG>r_UEa#*E=Y%f(@GAt<A[/^.K]qDLA0$a#Bdj>qfV@d\:0]ZN;B?JG;-C*0(-OcqnDQT?AS1Zf[Uk4s\>`08cJ?h:*,$Yp$#M'A^jq'DfmVA=QJqbo''r<&f:&`*+I6m/,`Lie:5lgK1S+e',SAUZ0>2LOB;_%H?/?Mt`MlQQ.5<q&Gb_i:e7#ITqAL[8nZ.?,^!+Ofp78J:rA'\`iPf0-ZH2L3k=aiGI[cQ.8QeZ_oW;+WRsNEPM5P@d]k2R&+2*pM9-8&IHDogZT@T6)#*./ZcO*J0TsD%/Q%C"ccToIk#qcn?'Mq#ef?@ZAs\6$"](_7Y>[ga>tPdD]&&a\8Al-Yn5B]QNaN!dkJ*<LIpUE/DE.mdb>!`Rr-;%WZ5V_FgbX$"AMbL&CSYRhS*_WA0qZGn8UYIil:!.[7XsdcNLYm!=@kfg\onU)FT^[(GmW6@lm7=2rGD3.FO2b]+b'l+VAo.]c/sHE%LKr;l@+A9,BP&p##seO-I"RUJ-d4]fhX'4B.LAI+,eclYrVadm#aeQF00\^MU3R4eUj-hr)jTX-F(SZ3AI@=k8bkn"]>&.(Ce"fT\50#OAk`RedN!JqpGEaG)Dr@a*<]3`.#k/56P;lZoeQqt%olh:,_O"37pZX[B$Af`>r/&DL@B\1tso-;2.frdR/[@Aql@D95JQP0+jc#FsrA!814#c!ON2<$bjejUOZ*9FO\=4k_[[oG1L)GVVE.:4)P'^r(JHkV5HAj+`Z2,AHjJmnhRLK$=<;=]V^EV-J@4XHc=>@tYL<<Gpkc`dpZ\m@$N\>d6 XEC.sm(lp0UAp;2 ,OZjbJmC4TWLA#5,Q4$[.[-nXI_/Mk=0RZR5Y*?`4,267N`Ja'a)9CA:p67de'Va4ZTs5*:_g>P<C^Gj`>(^gn<sm `-(4Y>8T;ELls#Y0qZn%%KZ?"4EYX8Q&>cl1]5` V/rMX1?PcbEN=+mf?_mOgpAEB;MQSk^\1(J])oAdGL,F'KB5iR'(:#i<&f[r-9Md\E(#2h%Cg^_:6ISQ+(Hk;fa7A(60^Mp2(Fl7`^283p."Y.bd3CMbK,b#/sXc6Oh:pKC;VS(UI<2T9A%t[*T8V'_AJ)m/P- #+1i#90\;(!)e<3j$pL2L"#oi^9L@)j,^>si;LXOB3p;tW1UHlHo)&GEQMN]_A^DN2=$;M3q7%\\=0V0?FGd269qJSih>/acd9njGn-Q-Y:n%>For#\7'o@Df0Be._`)j[#ah"PD:-+&k0,onnBgH;U.qN6/jVT(.U5Rp),hC7$_b]rT1<d3r%W"L8!M.3=`52(1D<lm#THjK#3Q&&WP E=Rbm?_K:X['$[*elP))Z-(CJ\^*s\UN9J:e=6'<7bmi;RSR3.M^4q, f-@"1hTG9N4UEoBWO` =q-4"Ahn?].lsqEF7ssC?kgR!H2\HDg(-%mD_p,:+M$SB7]ko hKq9@8#8S?A+pImPY-T](-HPG+Z?^JV1_!:P\3@[@!tP/[4O*M<A)^HAsNG(L^gSScOm0)2=;$8l3q<Cn]*?oWj V[P$sp_Cb`7*OUO0[A&7;1R&]^?alSdS'A+jMhc(cVX$dS=c7  $#9N(qV8Tm?fDsbsr+3WdRfW,A.%bSKE+AV%@eAIG* [3os5V'-1(BNj-Cq;5%*QVOGRV;nnV%Dls_*W[@AXs[AUbUWXVSrjTATH0UiTQ/;?(H>tP-X%`dSHZgI*1p8@eXK^DdYgWOt59-DED+56A))BU]mU:n.>E"-b;caM=YaZA=VB.k4EQU9qH@5R-K<!k6qT"V\j9SeiEY#"bBoA;HIUof: B%ACMn&fPsCpBhqa68)6>>eimVRI&,)q(F:AaE^4gGOjb4XtUpotYh7:DCk<`G,d8\WDn@pnDW=%j6"#PU[&?1*9R 1nINUf&LE5*tk]L=q(`+Pp'A^QFg"Ff,!UpgX?9_FhQN9 WGn%"6/TXB0i=@P.]qGlBhG-TV.Y,&/gU7AG2#t6+ZDM<Hf?r4/`#5<ZgIT!\ .Il3J7t3`6fI>hM9U]EaeVqRm'0@aA!_4XWM3-i=4X7I>*e#4)N@Am"8O)!YZT??m@,\CS3g$3!2_V)SoXh3ptCNXA'BS?a+Icp$S0Fp,+:rO%TP;KW'f2Lh3ap0ngKVc[JIMRsXf LT#n>6G39k9dj,4V89ccR<&bCnr$Jft*ALJ+Kd>,E<P3s:/#)VbqbsMO6`hT4J._f[g-t;"<Plq72]cA"JSAKKAAR,\.JWQ[a$(@Is[`f*rjasif` Z]1.U B<YfQQITJ8:-Ai\P>m4p940ItR[1Lg,A:(WA$d.Z)PrBJiTPn;kB>7(/=D[Wee5:T;n:2CfWM`"T#nW2X+_09\pr;KR0j]R;s)9F>k[oK;c&.'J<%,Atfb4a+8?#U%M7RpAT$7:!ir$YDY2$5lL,Gk>#JSbEA,ZgeQ[BEg""FFc.)5Gdm17UAV1@?E'No2ETDfp!(@>0=.Bf/o% lS(b[:Ji4-9RRiQD8IT.!bVtCSh\c\Rs7147GpTR*0g"o`D6 3;iHc\Qh8ojSW9*Tg'.RU@3]ai]O3Dsj-a@_0TZ_9Q?*<EcLfr>poA90%<S?Vqb!b9I'AFo6hH?Ic2WN-f,hV)KAUEUG HA8+cPIACA/\&^e<F.'MPD9h(tSIX9T.%Ps@#k9/<j^`CXnODO7/V/CFAmh-_cert^['gVC7C<U#dE!+q>g3mkH+->"sD,^TWp;W`-3pP??NI%QjO?lVqq3-h(.W Pm`/[`3F.'4.6/:@%b4lqtnL\86s8@QLZ\On14Op:jRkH0q52s..Pk,<0%gq"2%jdS>(RAVi:=7N?deR`1G"N9D 0`c] 30^+mBmYHW(,o<rC[\btn^G*1+rgqP0r&hVWVj_RotXB0l`a=o,\/k!?]\sDV0Y\.`rTL.!E4UkWbVV,V3V/oO?MA;4-ttr?$_26m23.e*-l`TTSX`IQFbWS2\0^Zpr0NbO fs/p?V80c)_!GKkp=rKAsInNm;Ajr(C/Y@CIAQ"$g*Q1gs#<JjIn@N?QA15Y]'+^L'hX$Bk*(.[nA[:jG?`Ah<S)Z,Fgh18cU'e?E]E$V@hIT1]Kg,%!1j"l %("<b>/:*g,VfP/?t`[:i<F@RRZGC3:4$ir*2%$o,g2A@+&NcZN8#OSIFiF%\Mt!EZ'4!e9;i(_'F7A:LaA&*GGgBLK"g\h4:(3',dCkR\"AZ5Oqt_m6n,N5N<!?gJOW[fZZ6c:iJrh\%ir"P2+7n4[sMqZ*EA^YHgTpA=ZsA!B1o7&5XqG\jSMU+i>1pd]g___+t6IH=JNUI>e8SCpa:g;OR:eECAj`6'8nlo@s53e\WB8j*DF4C) Jd`K#IPBX?#N2@#8OK*EjZT oMhTfLjt_TqAno*3`1T'.d/ R.kffa6E9i @=3k=F&h/D/n5Z,c tZ63f$=ma!%BOq]1%n's^k'ZmsGE4*:Q@7/JTY]iA&\KU1q<$bk9N9j06e]F&m\207TRX[4%d4g!<pb_heJo/Ln]Bf64Zoq\WhIRR3&,c>3,sU`cXAY9$de]E@Q!C%q8P281s_d7d(bpA.*`1in,f^pnIElntTT`1kJop*<<2^r?G5Y1dP5BBYQZ!btf3G#1oP'EY%N+'MU2q82,SQo6j,oU?.D9? rZIdf_AB(VUO!M*_7$,5QTTX@(AY7IcNcT>-U\r!$.rRF2!d$=MBkBQ*(A`Om5)LJ:. 1*c6YAI/f4o&%OCp33UWk ,#k15.:I!@]mT^-<;'_ANt)H0)!FSjpBV;7[;I1/D">_4.!p3V\CL=@sl<S=jk-Wf/jN$:fqip63UnU"d(9Ao-OY8_oX]C OY8_HdX"%8SXj?_/r$"<c:Z+4_-(A'%P JYq`\71]Uf9U>elia8UIF Ra]t^X=mFrQH.Z,kVnk`iFl?mB^;aL,6DA_HZ6A9(9n0sZ<!2/@j</'4+bH$/17)ADmR9e+Ogf5>T#d1GRR;tkP>a(q)[`!!Q9,OOd*Ua[>\5kXh,0Qa^j`Qn_Dc,&A\BF Z\]7M77#*e<X[tbhG&Zc\*j*O,ETQ<=$P_qL)ssU6k^mf])>'a>HGXk"=rbL4gF:E5cQFIS.0PWfh^/cMi4DZtk3;f;:]-QKmP##oD>`;KilN>6f&\?Yd-QmLoZs*7[A&UE1h*KCl02W)R#/c>>E!8TiNCY[NLh_aXo M_.7E9g9o]PCS`/)Vs4P9;fXR'3!XTO8*"LI37$RbVf8qYC]Dqp%BdUNX]RO;=OGd:\icQ)i )XP#,C3( 6_S9ksX7Ar[760bhT/R:r`Y-.\cr,Q1A9/38#YJe%L!rlC4mO;KCPMm2@p>C aQe(;9LdA.M .N\*A3,Xkk3p>HU,'f>J(m+&eRi`,.10dWkf#rJUAW!Mf$-6O""@g f1[Sm@@]d[nmR#hENkFb_J^QQBo@g+@2:e0Z2A:^^C5io&\?;+nC#RYC?-I*e[Pk3iA^\P]AB'q5)_ _C0h7r^mbr(VpO^>?0K(ATS5Rah[k j%@Z`[!Nb J3jAkf_S(\>jb-bYke>_@]4A#QW(M='_pm3FVqkBG1Yem83gB!E\XfR*!19aXN.P&@`V$b^+6PV/Ql1@Z(>`dhXh48@JClL=aFea=?dfo?V532QCAKYXbUlg8;_Ubm,!j+OSg?MJEMMX/M%[(\WdK :)ibP?LHj5)ai.pRQ"dt<=mA#E-`t7C5]DOVN!!mlLfC\\9&oj%AX]@1M"BB*bm\ >L.sAJpoI]m"IOMoRRmgD,Rh/k>_KT.m"8``%f`:2\bn1]!!B9Me5\@,?8]59]rrR>GeEJi$knYBNcJ7Ht4&TWEg-[end5AZi>8"WI!LEFTjp#e&jOIJtQh'%c^g14TmCH ^"`A1]Y*JU1JU>TU<#Lb4)lVVo1Lf%mJkS.g>VD1MTjo)-P&g^iL5YI8Li,n_!;3F3/e.\I<@;C3E"BIW:X\<%ULnr#W33Zq]KRYjGXCoU\mK!l.B7 7,/!_XK/jQ4qebJ/F9jh8IU.8hJ8V>]B$6_8NFc-bWtA`f.f,66;1o"QkD`1<# 2@HcgshmE,p&cm!Q_n'oWB'RlMl<9"DBtC;"D[P1:b^8gc!%-e(=?T2'_ZoiqWiA9&:57Y"PXrTU_'!UAfYA]H21M(lR57/e=AO'9SEUFG 5_^"^k>IhTkL<7Gq/gaFldm\F=.* (PA;n,QE/q@N9^$Vsedg+ar<k2=p?Yn$3?(eFU[m 8s<?M*!nd;J0%"ga21O<',U3GrDI1kJ#a8N-ZWA/iRTP_Z*Y/LseOL 3*D[*VD0nXSIE$]\5XYJg+(ne#MQCtZ":0f.ItlfdjR("0C+I0taUhc`J_)TmYNeQmLI_/Y1+ RI9^SJXr>;;&AK\V'!7>1#:_E??M\l",@4da:][-dr!I6.'lrarP!TA06=8*k-Am%osAr;G]9MdLjSj1>ThQ^Q.8JGN-YT;3=C*a9>#[B5NnablrMirPbjlD65!$Of:@[I25EWGM[X \nsIXF=qc`Z-\]^Vq2kSX=G;$5@LQ1>9_4*"4Vi"QmAkDm_ST*7;drHQ3if`J7ng5ne`%8aL4,f:Q5GS`LRcp7Z(4XVd^Iko=$]@V/ta'o -9hdlF@Tn]%86a p$R7Z.7bWcpgT,87=ZQHX:KN"6a^*0A/ibL"U%:E$&= )bjRo.+g`%JtkiVXN(o 52Q]K,XJ8JLf=<pleKW4i9AF f^r"=he!\rB$#,e.[o@OU(=sW@&fZ_gUE0grrg"g'QlF;AlTt72Z-8.[O-"n7Z9L0=L>T=$'B1;JI^5*Al'A$$!*PB/%b/5s[M=77C3BWb?Y\hb<f,/#aZo5 76G/NqTr0Y4N".E#0]j1nZrqUYd@Kf!Yf,/^(gf5a_4eaaMoZ9>$ceQ[pm+\:IL`Zc1MT3\a_o_T5603tl?dg?pr6&\8QkeVih6QD%-3,oT!$bc1[+] 2I"dh+QA1j<DM>rB>79Ud78"mnif$jMJ5qH>lr`ck 6[fmh?%2%9F8eCPJ/T7<0P%kP DFG!V4.N9IG"&-T%p,5PLA6PTA5`_6Ibc4lb9)#eLH("Q<AbHMkX+XiMH:`k[s eaAYjPD)Em(>=jOq),ZD$.'55&rl;`54' ]&%5W0HV[_/=WQs7b3>7[I $j;SDo&!c-=P<rOW?^dIr!aI#&e[b(&UNV_?NsZ?/<b$ 5(EHQb@S-M[&\"DHhoL3-_=@=UX'fiS5FXF:8!oK;+R\9NbB'=SQ`QIKrG9RG.oMXcrrtQ-AWbYr"I"GB50NN+sQ7S@E^TZ&RIL+^B4)+q2LQ&bDa//(DQ@Mg0+gRcZ1'Qh[Qq0N 5* B'rA!!NFdpJ'26Xo)5"<2dNRK5?R(FnD\o7iOrHk)i>@cQ].GPN3D7coM=Q!#SZP09*d,7:WjsF9[6<k!N-/?QAfK<&.1$<-dO30G6tV_h5T(45a"Hgbse"TXr:W1XDcnUp"f*R],61TVIHW+0L$YUS[h1A=c".(D:^eM@^\&UI30.aURP?P<Li.lo7r>72(17NmiVi)LGWON0BU\45;oaHcUOhhrT;T17Qcg6O@H(#4r`C65*<D1b)C(+d[Zfqqgj!aRoOtLHJ.7Y1fi0:Q1+:R:Q&5;IL_X.D;=-MrKQV>m0R4Z1WpeM:nEjHZ)sennKseA;)h@)a0f1@91A:r8Re_p[CJ9[o8f>^2kLA=3"kN4Kc0al_W+MeIk"MhrUT>X":0tOfo"U[A?h"AWa8Hd9Cn_>ULQY"fj>[:]EBAQB-9YW4\nbKj:aNm9(Dpbc[jiVb".Y,A^,F@".G=iF6"$G R0[;L547[.g(A+<TZ>I`+8!]d=$4IY< .b4K3&+0-H7eWgKL^34^bQ]*I:eC%qFtmrF11[jIQo-QPG>_XTraYIT#+T#bc]D,Y[nf7-l.d9JO%jq+=Zd91c]3/N%r-H=A)T;SMRde\KaV`lXS184CL.7LW4S+<^*2hA`PBFIUVXm3Ngfn;jCjt+VCOF(04T()pTfY*n]S^c0"!O5<Am+_b)*8*p[QDj:;/d+WeQYf^5b3^$CnK5MFBV'Q00JlLm_>d-6F/M9d.p+V1_AMJS>1(PWBZXG`hQCQ1]\7MS'im!Y$N\B%cJ<FqbtdAeO_IB4'L93HlPi[64thO$[e<'/Ci75^Zm)a*pU*!.JA #KGFL"P1K&b3E^ &Z8%((FRhS'g.SHL2CLGg<2C=`tjRO_bKolI%YlbF+:ekp54:pm5'e=:9nV0H"tSEd$$\2If[mbLfRM]GATD02`OG)KL,j=d[-!W+$.S$:"K@SVSZ i_TbXQb XrW+F8'Ph2@6GV=)%n5-q#1_$O+3q)gnp*h[SMVlJRI_@JQW_LV/oX;CEl>*("f^Qn-XHj)$ehjA?TmM!rLA7c9N**Oq> DK5Rm@[jes$PclOsE9EmDG:)5[0Ct`5_,Y/,/dbc\:ZG4K&0DbM.<]d`YWcPqbnQFYb]cC#\JL:^n\'s6A^GJVA*.8.k)Q##HI<lWWI:^9.cB:sKO,i?eIA<a488a2,;a"k 6brjjLXNb*24pC,5aoYKr*gN %6O)I"?IW(20FZWD1+1c4P-EnBWtqg6\X-*Id27"rLY5bU.q*sQCl2S':r0G#?bL?;9F*i-]/O]^$qS5H[SXpD?XFTK#0BM?\+-M)#YL 0&<8O!!lblA;r*`V[hEBRO%pb[#5;V=E,ZSa4D@lKdq^R2`>+&P*AldtSe%O=5V-U+`&SDbB8iKa4"'1MA7*>/BA&m=W%U5"U!$M]l+mm93_Bs.&%VqA9%>UL'?l1CH'\#5D';<D;V>FF?ia-pi2A$P=Eq+R[$\3c&%![H60R^2+/Dq8m6^rk.0`YeU.X@2LM_j-.ZN%An&EHGc4qO5/ZqXLU4OgKnr0AH8YDn*Q"o&YQhsEY'^KI&d@sCZqq%#UE?tNA,9_E8laO%Se\R8LW?HA:JA&qsFB2_HI]k?'4$-@-GrNisQ>$gaAALde9n?Qt+&L%1R)4)^Tar\fW(Z9f@%D,=K).sVO^W"WVP[X/-M.aN'.hQfO,N 7o"2`5Qfg3RLhkNjW#,qWaM@:\_=OGg]fSKrh=[\B=9C(t>DDr(geZF_l`,'&"*Ifp?N)cmT,6bOTRCr=A6,s]1sB3n,5PpRDC@m,,'jEo&:=@%Mdg:<^p=d:`8a<_D=bD`f/,([?%n2e\YK]"D*5Zc@\`3lnF5jLjKcG'A4RJI>J#(':t<rG9Z%&_FF[fj'q>LlO\$/L=/>\0^B&W+RG.iIhaJ8rJH5 sI9-$*o=@LKY0@c\8oVO9_OZK]eDqm]7!9";e9iTo0^XZb<DD2)MAAR#d=%sk*7JJTGMZOWK3PFa=e\=>% 8gIpesVHaLrgR?`B\!40b:@j'Io>8&$fKts7%c\>8WkhB$/@It$:oAlR40A2&b(4"/d5+/'-k  H4f=2UZi/QWjF QBX<"/ Cfa7:JNN&<E4a-`b=;^^G\'BTG8R)oCOc_@H,IIG<&3hMr'*(<a.&DhY5U(Qi&j[D]>qPeg<QN/o,!FJW@-0ZHG>5?cBiMK$l>D;mCQSJC? dYN:Q8rhGa4&9A>dtLGUkiYA<Bj=(/(h=O7_`E@'/e[RTZ:U-)fK55@JRo@4?M_]L*Z9^D(SZt>L(N5C-/,NE;^lq#%c-GWh[A+f;20+i*BZ:+9=BrO),&(L`N^s!@e:WDD#fWY8i_^dYReFT4? _"9hh>)k,oE["p-)2b_j:)o$TRU3_4X@Tf-E2`1$&i[Ar)*s'-AZdQU9V(^_mW0sT1V*/W[m75A(_4jOjRAc+\Y9i%fH7$4DoZNO3r#^=`s;6(1-Hf#\'@&R-TGYKTNG-gn&"(jWpA_,a2`tFkM=aAPAt7)Xh\]g9P&S*qiO=+b&*Wh`OL[(g3CU3!d@."+<%?DN#5^YXg,^b+%b=@sA'-i"TZc6l0flWAGJF#pL':>)`_(baUeiis+5q4"s!VaH+X4I@e_8>^OnTMMEaJ7Aa4SY42kjoR _d66kF6iHA%eK&s',ib[G:>2 70/dLeZJhlsLba%R+9f1/XosAdmAZWbC[\g#_c.cJ0VA6gAB:o(`2>O*$NO_E7^1Eh!in[dM!Y8r>j@dtFcqZ"la>,d]49+e!Tf_J&A[QUCr,c;?HO%ji7g_9J)$6kd)ai-P[^C?Wm)M^ <ppHKsrS$D$.?WGiUH&<NlZ6^t75]Y- L\Cd)hC2/(<Z& NNN59)=<WT;l_%`?QUB+8G='%^S/l]C[pgUhQ-<a("G@iTE>\K,RVU1)iZP9lmC2(Hod:#@$9cj6+-X3r)Sr9`97JWA::m;rl[9J*2XLE#F0U*IB<se$bK3j@5\#fX7I'THa`tV[<0=>$P]+\Fn>[%cWjdk._ <LQP?W^AdkdZh\QNb "-e=hp>tc1"2eWIR9#K9Y^5&%jZ$L,LB_K+19gB:AWdR!g%9?e>W/Emc26++H.\6F<':j#P9_kH_KGT88=4TmVGm[[N_)C5>;O3+>i=1F1UAE# \"J07pp!oq8d..g(((deH1[)^[,= ((s_9p*%(M_^7 l"rJT?l6n:iU*dgfaM r#e-_*+<c3jIh\_$A>ADo%N`>Abj9'"FoLZ#db$&7Y_U^?4*SS>dG;K#O's7lFWA=6qH'rC:0aeXFB<^<pa;6'CeAHj7W\A76A0aHJh%T(R pFQ[9U<3q[f]%Km1lpM[;[)DDp6LE'mBAnm9Z:t*&6Ej;X.->K(DV i%3)I*JAX3NNL jgeG7Q[XHVTa4\2OA@*5B[AIF26I]:,Bla4,I*ecIY.I9Z?8*)k`=#]88N;rHN\R_9414grVceT'LM[6IF<n Z=r>]qaa;:NHd@W!`EAX-D5Z?gU5?bZ_TLR!l<pFX72/gN^Q69<(tEnoP4DQAh5ga3.7gfqog]bBqj)pVg^QUVM'fU2[`I1nO1BZ!*9>lZkUaiB?iD8cjVZX9B^'(/OVNm0I_<e[SZ5jCM*FdXNgUb=(E46$a\T!U2kETI46J=tbA_l8"sJ\mClg*+]DRC;Wj#NT-Rjb*^I@MMareE/3<Pc77KE<krg]k]*mI+j3rYA2*k7kEc:9CnbZ3BHQJB\N0=o[>0&6NS0OD;;bK;',UX-GlB'gpj-n[fKaEn>sUb05NM^$HNR-<3V((RDpGKA-PdUA]s<)lV@r&7(n$A>M;D`0LM\ekE\Qjd.';qdO7 9KC6g`Y[N`.$1`raN?o;qR'r Vm@_'JF59($l21"`=A[Y%\(GAG\qIf5 )N'C%">WGA,Q>!-#RTW/?XRB:_n)^(RWBA]r^GF$U@A=oI''fAZ+RY;!EOV6E#sr<iH44G+hZ;`hR_Nj.CWM(Ns+_$a $-mrSr5nt;(0Y)fI)S4GEjX=`Y0n$B(q%%cb1 An;1nAi&-3\#Aa3jU!N<Vs846k^W6M<+bALW@3AA"`ZlQi26;LQ4cg`ChhUb"a4C4Mdm:#k4`A#nT 2SC0/( K:#A`pa5P"[(E*L#D[QH0SkNO#iaaC>`!4"^IqAU)[p$Us9q&"WLp:@Pe:Q?cLY(:Q\Hf"-V9?>akg=f$,Dcj&7AM5s-t3`N#Al<:"UVLG+c&,\k>ejl_qK?j6`&Or=Vd"%Np"b7q$8Od1:;*dfnp[^UJ[,6t)YsVkC@aU$>]3S/LKp<#40TGfNP*+8K*NUl!!]piG]O4A5#HQ@mE2<<REeK$Xri?jaA780/G^C7>rXNH"K[jE*5g^(5R9!Pi?L3N[:3AWOUkH@(%kZr7EMbnRn395W/B57o7d*pN`iHQ hWbkC$j%ge:%dT#6LcPK81jRcAd[%s_$c+q>8hO)l.bbY&l%>Z^c6i>\Cr.GY^eU/rq2C"mai_W?,@ JTTS*lHpL.DD.\BBEe<:*W1db#%+*,Ql6?+X=M_nb7%,%lh[Q,['nTHTgdT7j?*ls;GT;;"%7+bnB<s&sgK]pH`(-?$Do @%&&dVVN.*!%bn*2+-3#NJR[W8b-ocF8=EBpE3 TDZh9MN3ogXFA_*oII7D@UeRV5&)L<hT C+9df)-UCicr?SrG\pOq;cqWJ!oGB0o'ai*]Yb&2;6:ra^a^PJ$ 2 4A1&AHPYOlXR%H6oAgigg?&@dYl]i3id>>EK43GV(j"DQQs l<0A,/o (+G71o"@McZ%>0c'Uc.]PXt\>X@kjXWqP,,,V`^l*L&6(d\<*cc4L),GbBo.TmE6/@7OA^/^gA;!pPs`Wjk!RGG2\nG7aa>C\W@o64tKl7SMAX>k&L,0V19b.YK_:P[DGZEPLHX-/`*-n\AO'CjV+&Sr/!>)8r>&diAJN)I0aWj4m=&&&=Q^Wp#pU_-3Y13Q/g#"EQMpBS_1YP&9\ABL6Z6d)f#arHa!Hj?*X+W0:'WWOGV/1All"Qa"Xk\Oc UC><m!$Q QcI>\AqRtCVbD)X<+9pXrR&`"Q<.0-M.Q'c(XYRF0^%>#TN-V1g>Gik I@1.R.4@"HA;hWF@Kh+X4kt$8*&N`;m7UefMH)W6p]ap'J)fQ;)<`BCa@iqjGCDNlQ&-A@^+H)q0CEYsfBs^9+$b!A?ifH`4e`5YcN4WR$q?i;hOgQTJZ%^tZ^3bV>;+FGPDO;Vd^"l,TA/^&.UI-A]979L&'/4bQ&Y==om?pZAVf8;@A(B.FUY4Ao3rcKMJi8&V6pjTW9 1L-$M^@?Xo=Efmp,!#2B0 iT?%d\^Ze<?OLM$=4q,Jd9`,g$-2P%Dl4<\P<p:K8Eb"BP=f;\Pad(f-.<l47oN.`qNYt@'4b`m1TV3qR Q,a/)Aae<tjA(.XNGbL1Kc:V9_64fTk9J&*4pD9sC_AG<m9bAT)oq7D#)f\eq''K8F^iJi?l9674<-\&bVa)WqfHFgkpT'])k*';jIS! 3FJ?+YEJps;5DOl>ej-I+,Etdlo;GeXtq5Ei;N+QHKEa>:p70G6Ah0t$DC[:IgmKiSg7/3hl#Vp0B[;2dKL'SW%pG1PVQ>U_N)0^D[ALc/QTOaA])brkR5&Kk)K8WAUkhpSEj5nB]MLc#(T9Je#LOR$=gn>EdiskF\p `roU>)7C<G$L2gPH`rmc-AE$3=D]UZ@:UQ`q`=sO)18I9L!pDZ&nNoiOftb^1l(KBi9k#Ek`SmF&_d0(@t$Ybj^jnnXCVAOft4TD_s\%K_IcT'C","lpJjjtl7fGG1;J*@]BZUl54j6VFW=+QjTO[1ne*GO@RsX=j4F /^nFZWSPS=VE=RJ[?V$21hQYLS[c<1)jMEQSj3p;s)_?D_+s9I!jokC8[<TOg=b&N+S=[4\@r\%Q^BnB(M&$EA<dhbk;Xb!\14%X[>3Npbd+kW fo)kBat,qpW_YspA_A!fE/7g@YVTODlV?f+9LKMVI mW-7Ueks6oe4AF4]Qe6I6W!J8M+?IGr$&/r@EA,d4-hU$]bU-bAapk6/*_ 9k(1)<,P']K-_?KtcGg>rm/YDhjO!3lpL,t*D4j"Z63)kB1_A17LJ+B?#(?9*:q=X)VQc\pR,o:J7O"gRi#Dk18$\Mr!:"m*_dh9,8g0YIX`Zd0XjB!S+-Pft(n.?'kD%%`K>'",Oi<Ob'@=ppp(9^E:UL6cS(bc"p!:/6.%Fnkbo)gXSBA[WL7MsD^1 dCSt<8'0e&=p!<='NV*M@HI1qKX=C^&qR6_>rcg1L^\+iJE]!QEf$J5l&_nGWLGI8[B:k\qpJV1N3(/'/H<B^'A>RLb3G57Hfn25*[:*\E`^< ,<D "mmD;h>rr/oRh ##dl`7>i5"= K&;;2-8gsF"=(*FkCoFTd=_,7\^R]SKoRlkF8]sLMXG%Y[+im6_^`7fLgk;lC-)BpohMn0?p"6`ebR\'h"%U</1/.7K-Gt'l66?L>qBC:?<a?XG\7>A0-YHU%[^K.=pPMhB2K'q'oYgfnhsdH3:(LTqpV6;'q9pBsQR?X9^6W:F<5IQW#Sih2J=D"( A0CT9W_nKmV2,EH0$JWV*6D\s?!:8`4$)Q_366,!c(ga+Jd;lpk5gIE\k+&f]o"^m-R<TBOjEUfYIBEVMPRsg4Gr(ABa<UcV6^n-+<F%'R tL<YG=;T)D7FaVR^,#jEh^T9!=Ii$TXXst#8on/lmnnq)!"dZ@OtZ,M4rLbfW)t*aiO8D;)X+KT1 tY\j eX5lbP=?'`k<Z-9[eBi"cT" #%P1'&Kt2\E\/LBIX"\eQE8Mj@,9X[D*,9o;A'Lh#A/bHXI PdL?r8FcZAFXJP&iN%'g@Pa?!_%t_P6S'5V[H'TP9_[fFteS^oGc&bQl$H1?%4'S\o^dSABisI(]L7Z?<*1+YIZ*kcB[o2%og!N3BSI,UE)JGAlg;I5@E)G'AOS$Usn-BQ^\\kW-i?:6q%9".p^l2U-H@X78;WnF:dAcY`sO;9rhKlS^?Sp_dC:#H>)A(M#8%rj"NWKf"NcE$%l"!<OGEVKk[ftkdipA]>;&lkQ nB!cpgJ5hBh@Be7go+o8fd= ZelUGrT)o@.,EQ8Z>@=elER?'\DEN'iJrTYV*7&EQXk2kLI`CJDZ^,!dK!U1o1 N=*qq8[>t"sJn9 r].E0b*<rIe4""-Y:W1B%XbmC,n^S6Gb$9a@U+NK?k$3oJF=[-,@>>+S'Y`s;hpAZTDZHX+.DA6^0L;cA=B!12p#P=Nk8V@[7e7"9+YFsD[$F/TKo=>C8K<[,T$JZYs(k+cK?ZXgNtQ/H*9'h9@f%Ih>G%/U]V]VYXE_^o1h=a:lkC4Pb.L^eA4CXn>st4+jakA@NS4O#gD1V?5kOHNC he<B!b#CLtrP0/i !IjY>P&X6<>2UkP5J0Z&J?/[8ZElsB@[TMI^N3-85UZ /mUYZfSjGL]0@si:nj6;9;I)C20)BO?W;F$!G?<Y.:s%e5Ze3=7b8I#TbITfnK8nOrLs;*(GqG8P"HX(67Q&0I?8`DD;N9k<_(r0p#=P:FV^W,#& [H!tVq#g)rbj69M[;_aQiSN%r0.Q]$jAiIU7Eb\PEJK[(:CHjkg!U'>\?b #k`'>t"C-C'+YJ`@#Y 61KEb K?Z)5FsC>IIfH+RpL2>W"Q$JZg/2Bk;%Hs6(#a,_S%9qQ6_\;N_Dh"BGrOGN049+fbDSANG=*P"RoDQk91S'nQCIBeng42;,>&GA**j?edh,:>Aq\]5q)c[$E?('Wd-Z:TC5DS$BO PTUELG`jLS7_N>  bGiU 76%\QC)5FQSdDf\D>T#"^"9J] n/$aA^8n#t$a]<A]&ogN[l`U3P:r$1bTLFn`nJAP!ZD'ic_^6.8r ZYBmH0L[RC&890t]LcXM._c ld2+1ac3NW(nZp#&3AtsBeMFQn+'dd TAW"9.')G#W1>%-fW_j#%fH:JTF0=l3?oHUVZIpJoHlnM:ponE:,;">)l:Hnhg:@bUq's#DbK`L(,MU$,N+% r@@4W?cc[I,_)daC_?T"tSQd9^%`N__pZC:O[gWgdhG'b\^0ltg'O"koJB)J-V!iI3(1fjq7H;BrMlc0Vc"YKo(C/Wco2YTaU?s:<qL$?C905!*OIB,)kc;;\MhlhaP&bTV"o=7aN+$K%pTAT8rbZm#<h6f=O\*ioKh#_^FS?9keEe<[+N7l7]MYl4S\IaRfi_rYb$9s' 7ae%14p@=H)M@`5Wsf3_0Y.99]=1p'e2pC$l[?%-+)EkskqF@9]fj-eq8@,AG=[(/pLj;&2%b9iX=-9-J`s/f3taq*NMTi4P0QX/qCEDesdSTG*U[CC%3KfY%I48km+?$ bQIhReLjnkkKZ-P1bt:N<LCE!=WbbmR$a(P:hT[Fb$+V s9Ze>YE2rF<qqX5a_;M,fb/"PJs=0/iN<VVn$r.KP@A>qB81/df5\W3[#]'0IYYp/8qQ0[_*tN!3KeAI5-4`d'G=&Z)hpqPfLVJUG_>>>-AeHW&9Mmt,l1T^hPoML&N@T[[deLkgBa^pGoh3rE66BW"?%K))Nn2X*=5FA_6^bT2hnC7/H%cMkZ-&MV'0JFf#_HRNV* .@j6eU`&l2"W=R)_EE0h[-,="/m2='J2>KF[Nk['@K_,\%BHY4]P"<A\0k$Et=+=?/RX9rL"*K*q*-NP&pRrD^cGbFUm#@kTA&54^-+796 F0j]DaI/s[Ct9-1C5;?KTQZqaI*LKSB[OAA"@PY6Z5K(`-hLlRZ;5;X;jAOHhUr0@UY=erF$<JD'$NBpTn)25kF@^P+h=@E,K-gtF3r.(R;J:A[=Arfhq,=6d!p=?!b=lMokbLYc\iYOY6#,QB%Y;CVX+%h(7R9Al3+SJ*WNNBLQUhmIK&/?l34gci5GPUh#kF2nXB-P<,*&"O+ar(Fnhd3*A<P!?@!iAJ#fBVo;0RF#g+$N[#]=UO\T%K#r71-#l][9Hn/,%,e-;H++MA@L&[c3Aknh%j7o524%[N,Alt89rOQt``#S3G;Pf$#D"Q9FZlhM&NlA0>STZ@-Aea<&'BE;]t8No8RVf',M@3<Goiioi9QS;]Aph[M!n_YH""+jSSW#kWNL9AGsk';OjV4*'T*_=rfgP^6hobU]QaqlW9I=fjA%H2"fah./D3+'ccO+qXKLhY\!7 5FC';*aeMU;i<si"raA&0>4\/G<3jI`A(T0"@b,Fc0;,AfqSb0=o:a"I*5`'>ep4#4D&nQ6\=Lloe5Qn-ed&$4W.KN"!i03QP9DW+:smc2_$gO*T2MV6;';sd-06W:'ne-fpapE'j1,f2ieqam>mJVD0Km\24 _\ApdeB&@gq4%fJ3oq`.&tiR-(A bYiC([QPaT6].XVe'[Z-6j14_K3is p!1t_l^;?@SMJC;pJ=_6LaUer7ZpDO"@3R:lqT_(ac:bSd!EK*,sD^s8R0]as0lG17)r.(W%lA(q!b=J:E!rM8^dA1?Zk^ab^"c,Zd(@tD8 sM.[E96p.DKRG?Xl)oWB tD)(><V7M$Gc2l:Wj@^UY"jnW6j#)Yrnm)HYTQl\Eb2^$0 n_SYph,Wls_`UP`i)gT3FF(@FB2kIX@;aDOpI8Gr?hEPJTcn@2]7b0hk>REQCj(OlNi(gC]=!1ajYA0AHo[%2cpmW#=YIga?,f1E*W>9Vk(0DS[spBF,]+a8NWn2f3A)kOs$eG;5 *LGGH:3ZDK!B5BZ+bX][I,Jc47T7;#i2Re7tJK&]_N(e;qSY2l#.bU9mebC:8EE"U F+B.-o3GRnEtA%NM]7ICk(+h;KO`I&\LDE. B\VO+0T!-OiiOkh+.]RE3DDda0Mj:L^id\d#PdKc!Qt/W`:m-aH+8*gq\Zd)0HoGG(T:'8LH\Oj0`oJ(YU^_6ZKrjZ*t(S2d!8qMTUn 8Wi38sSCkKcZ"/_5n%fUZ!HRiaMYF'g^EXj93.-=(/oHe'!B,LGG9?cF[?X[WcE5\?=7% ]OQs6nJnRJ/':A6UGZGm7g/Ke%F[7p]Bs"J+>CCigJ(T%bZ"jnPIU@G$(++1S_@b5%mREh&2TbH5II@A,jS.[MH\1-b<"?Ak'6,(CjbRG94=qV%8 TJ6Z6&3(o?hYfKn95Wb=(=m8.IF?JS`Qh*R?@IEp,o7U-J4qZGjLnW aaf[VF(AD>YDU>0@ZreJ0L)(&F)-U$F@j5msJn)t6K-SGr:Td+9$PJ2j-s9EU6\:N'$=7BfT)bcHS)5AeDqbeVO6r@":i09:':jY_-2'ZY5['(PYtgge0LB(n/fK"i6?9]4)C9(;g= G;" AZlZW0t3bPYs^?t$OtdRVn:nV/m[@A"p.%9!p&AP@G&S^)g="qA>AVTb^=6T-HA<b[1/H0gPDPg'B.?A:,pqQhWUplAG?F6U"`3!'As@#Al\"W!1T7$2\PD]eM8:Ce&$=mbjn;#Bfj AJ3^BH?Q5i1@[omAolU/eBD"oAT09s_PRjt]g[%'Q$.p%lW`'g+l7Ae<;n-kc0G77fUln7;Y#YjEL#btB\5^nW9n 48 @a.#Y8/I)f;qn12Sd+]TgM1ktq8ZoPOk@/dOSq@<PLfFPi+FafSXchYLS`XJfj2e,Jc+(krLgg.M?bH,2S?/QSBg66$T)D>J@j+Tc;GU 3pDNba3iS3[$*FE<$UE(pF&@9a>)k&1MOK1MTY#1?df*dn2C>hH$;dK5>1b)=j"CNaV-atWE_"-\'\OPdRk;b1U^eIA4e=7k)U9rDASSEhs;H[*,Vef3PhO@&?,Ri,>BAqmEZ^tm._A#[@[9%jb!G#<EJnaYcX_[%On;EjoiX8O$4Ss_WZbc57 *ZmY"5gr8s"BTan,Y/t%f/8IT_-gAAcHdh#aWPNA`P>ft"Bn?:^0G4-Y`fMSLdelJrVb3:"KDAlPH#@m1!MX#.VDA6ZLIdnF(6Flqgd[GUtN:YDReU`>pUB<P"PV6VVWd)&1Q_ZajmA)+>BQlGhQbC:`b)A]:'AfM86]'K[A1G3[Yb"4oD.V=NhSl3JH)qA7TTW?)`2A[(.i=S0N0%!J1D:#";lcAWp>,#d+7MgM&[8%'>% P* kIrG4,JZh6ArHLiW<30`[U[`n0X":Z57 [N*HgZ$I7otWr54/Ok\NEQTXVZKt8D6,q.R3mp>&_IPAI(%?e!5?A/Qbe@ g5aGG(ni3aQ%8;=R7B4TgE=cNBq+FW\An6k@VSnE<(EC-dYOXi5P[Tii;s=l&'p1UFmJO[#pNbt8/q8Mq:I.Kl7<G+ATp>[5+g8)\XkA7=1"7GT.4kFYob70WpJIM/7`^?rOW"*C\'\[Ot%I@a\dc?'cjQPlh\h5R/M@DqMap%_<SEh2l<RBjZc[og?E2cJ3KjI_GL>lgJ\f!Fn(UQJHGTlr?>6mA,VAb3793AjHbXFg`E"d-kr59p,"q.g1FgFVB?%[V&V2T:?02JO!OEQ8?_GKU!:6^lAL7C;Mb@2b o_s\1j:e]df*LQ79TSjDhM;AOd/9Amf--]#c?4Z4DA*h&/?MH0(=W[H>U,j'AQ5=Z84qS_]io5sdC/Y!)Ro/#1-8eahG<=VGleBq$GtMgJ'iaA&!0>IGh<<beUlCkm;<l_n*Dqgl=]6g/%^DQR8]>Ar[[r(DdF1mRYX(:Ah^8=<_HA:&X:*+ra(p/_Dh_,i)B7 WFq+ZfD1/-rZ#rM-i\)%3a@Ud]n*+<H&>O;b0j3c7:&A,T=<273+Ib2EmD[`=N&`99S8??o&/@qoP?$"H#EL ER[h^aK(iFGP%TAeefT$Eib^ "eH`n!A5\Tl.DjW:'CC!8/rt;Xi`S_Cj$Z=H490!0#9:$A!52\G@WG =]X`K9CI4)gYNKW0]=OP;HJ/%pakp c>":0rPlQsSSUKaEQY2iD/7.]oCP"omV(OO5WnrAI[&&A`EL>R_/"r8.O/6cV Z$JsGlfKOZ:6t!G\[[;\Yjb42^6ROJ[TA=jbf9AG>t9_6pnNXioVqA=2<]-;[t7LX58k`<*K?\p8IQ0At;LEW0+H6+*fd3SaI'$<mmh5JkDm\`3-p2\Xo[?JbcO"Q8ZtmF4UY(A\'hD;>abi7rAfNh/,V!@M^H@_i&]O,^,hNt"(mg!7jji^R\ 5WC=]M[q_[Mb9.nl!"BTOBj&Zkp:0<kG4mm]&>X],&nJ$\]]S1?Ra^#V#X3](Q9qUlM2F<sTP^AAr+3@30*3lm:_'2#>qQefOt`oLUd=2`-ILq'kl';P-$m&dbNq4_Xmcam,pA6ZI^Z&eX3jLk'W0^hf('1^&b:oT=nOhOTQA!oh'^#R>P?3+L:XM@anPUHL8pEW+.d*,t`Q423Ea'3*,\8?[_7i8SQ,Z8-EeA(or+LtA^)r,Qdf!13HT>+l>jp'qK'!5\S+\1R[T(lAWG@*@WXWjiJ(-C,:$Q*g;ZAs$K+Hf3f,l'(.`2+Ltg=LlUj]A)h_.D4AVThb_:o"E,Ac[$r=`6CM WM85D/Sa&]RgDY7Q%1/8AP\-`Lla-5N?aXpkcrB7VV`E!E8ClT8gsmJb'3W`?C9rUF<AJ2d?P^P2,Nn@*_Q1]e5mKAA_r[WH#S/p#t$nf9A$YOS-qc2,qT[,8FnN7';lCZ(8Gd+p*V"Jt)$Q^JX q+O$+@fhHVdDal#5?h0&cI2<pmRW/g$/gePX)WObhkE[S@j>ac[X^gUQ9TDjZ"(jmU54^H*-E?/q?8cIS:dL0@ b]9#DoU)^p5b8X$*I51q^WsA'd%2O*N$0gEfr(ZTj5]CgAC2n@"p0tBEHN1mYX"?InEGR7A0T>VMD`$`9?cmc+SA!/^SHKc6PB@7-/P'NLQ)s%@ (7\Eh$-;DE*hDtr/PG[]J69FRXG2K(MJRMkOrO(Z`KnNC<)o[<;`1hDl#((g:W$CrPUn(FVC =a9A-p&SQ-.Z9I o(1,pIadRJHa3ZLTVh].bH;#MkK<-en/H^=(/"fsg&B*b<lI$;X8AHo]-HB&fP0Bt0MmD1,&Q1&9XU"? d^M.:^e&!t*9KTC++i)p=_=!)"`6Kb_T_F22tg,AW?8Q"E[DAK]%'(58-$+<?(.#__QLLP&+SVEmF@Jkft'"^L`O:a4TT3e!A6F)5g%>OAQE_X[??,*N/ ,7=',iAYKN OnML3[P^e;$Xq/Y-%i@[)P9cim"-8t<=^bh!srU3cB\&^^>O8IjQm5HNC/dnKsYf:Bj0dN_)i26qR\k8k@Q0]_tO[dGBL4Prgl'n@/>Uq32\"I/pFTG&X`QZDM/,O t]LC\nSP'mjQ:'NA=&ln^;D\nlQL&M;kAniTD](c28,8,18:6!M7n$&RVZce8TeHW#o\G^F%2_XM-/U5UZ9q4lg"St_'MSKisb"#e#NfZ,Ce(=nU] h/$U.Spq*hc+e[@1ZC^=2J\P3bp8$c58j3_O:0k6WC##3=jW!& .f!B,H^\e0S;adAUOo;6ekIP$>Rh[\-nN+,ph ]3C1"AntH(F?`s_SB6CLBsh7D@7 ]SAJd$j'.-aoYTsN)r:=>*Gf:U/! -03/6n&B$JBQ'70?@&G8eDUk:-tPpsa=-<W?3bf%d&Koh4A,@GN_<?4W^k`RGggM&'QM>lYS'G6,^t+WNDdb06]"EpeqtZ"fq=P'"pUgVY*r#D"JNmJXs1t$;P0;%$ORIhWYeAbL@ZSB&G'n4QD*L/nS/^GS_9?9>N]3fFnAGr!neR5=_cYH7$)Pcf<Vf[rh+C"(2A?.m,SQ+:A-I-BU-Kk6prA&+><'ls^\6#l5#gEl0o5l784M_U3.=W_qGAmQ*%%^^]6j$/S,O0"1e5/YmQMqj?<-TR!*0lOrtSkQA99kNQh.%r"dOm+eGSXYY8- )FeVXEH:P@Sa##kM\2&7EY$<s5^KhQmnQG,DrF^G,=/OC%2_K%0OL3=-eV/5>WsDC30'l"h>%Ar#"]CrI`Y2\8o#-\A+qnB[3Nqq!a+'Cab&_3WC@]2<t&.4h L<;A+'+s$<'rKGDI6KJZe]Yh^QZP4!4]NEjN/n9hb#kW#XXpnS2-dn=1gm/N`c6#02TWYWfT`/>?kr66f$mXoJHVQb1L;/"E/[_i"2A#O_@8!_onZ:KfIm25I;qhK"A3L@W2?$7>_J%UAKVto3lMX$-(b7s#nZRWJ C8^J.dqq^X+f;:nS.j'7',)boQ07P:be3Jja&_SjlSF/K<nWp'@7Z)=SVg/!4.7eTVUSKEJSNqCjY5=7sYiN6r"O1$L<O#M^$cd$0chZ.Z8CDp\qUP6q)9RpR&A,o,^jtm/N0n=9'pB6M1ao?B9\mC_6^i.ms ?+tqPJLB/XT6*(AYbq6*OOW"R+<:*A;"?$$!&::e&_Uc[%3.]iNY-B 4GU W\WB9*hW4>Cf]MlAj\q8MYh@X?E9PZEAhdOI9`bB0)DN3M[)i!`5iE&/6cP,K8fd$Z;6g0ZR&MA+m"ikA`$A2.0e5!2g==^N`#'j#rqt:\ _q.1dcbKhVo5QdUjl=<^Q%pF.J[KsmE/p<.0(?j=(@bU7 V`Q%*Q-\pdr%W@>etp(Y+?H+WbnN;JBNA)3Vh91?M<9/f7_j,B =E28rLFmb9C64o?NB1Tf]a4#GU:!(3o?;79.Ij^jkY+e7Q^4?[</gYgBriqr/`)CAY4'*/%9Q0HP#:ORK@s33FkJlCgb9<U?GTBL%jA8O=_5('CMLTA70&UYpMmT6`9!^%=) -[\Ap,eC?"[KcTVZo*^aj#8lV?csMNeXQOL@W<X(KW-78GS(-=r[2G@UH$_Kt0'4!L^;Kf\WjSATX-3>f:@sFiqGiP%`n/f*IHLa0CepA>VB3hJgZe32-'2hf$1) BHM#5PtgK<M4V-ZLLn6<?G;r*RP(7HA8MJWAr<g_.G=D8TO]UqHBsC/[eX"2;6oI N!0KiD&sD'n%a#VC:KS&_S^[m-Ni)F6n-`=,6;e\pi^cPFA,dY8ASA5$qK`dX/`1_[NcGQ<fmGR2X%\GL7%A kY.m<PYmWa-2t%el)X1pfFBI9Qa]MrLGA`)M,C\:G1X"_=._#D(p!q r'j=A;S?Rpii[4m?(kWsVD gTfDUoqdMRAS6LhENjl_;)]E?c8C:ijTNm.1GE'ia#X%5HI.)F^odh467(9]^j+aG7RqZMV)nHM" ogC)aZE*_T%< AcoZcCb^7eNZ5A4m!_SoA(.0-!cp^^X>/B'5D_/qY1=%*GQ40 `k-S D!DtVCL"a9kLRr;^Z\S5?4S4abC^_Ji.9=:kR\r=)D7H=.-1r.OZa.N"T7(U\snk_BLVsX%0AJ0_%V'jK_.e;o,A=ercfJTZRlB:js__+O%m9/`Z]`[CSU4%e<4T=F3<NHZ%MpF.Y.6g V*5/%!JNXEf=4H@B"ST9XQo4DQ?(4^B-g=e7?);3%=(ha)D$5+7WeN]Q@p,D;J9+#&l=>NGa83EIc^MZtk;J[Yt=&hJ'$PTA#8 F?B]XWF(CAKpLX_Iq5FWSe5._?it'%Bh7EAbZA"95NAo;_[WE/Z/0bGIP'FkIK*/Ie&la!C\nR&'>'c'n2C(t-ZcodJ" %s04ZBVLT1<F*q+D9L)qCWLC95Cn-AHn(W&Z&1<lAA3^faA Z7ae `\-$3LJ0464>OpehGq-lKmTD1(1"Ds^"^Oq(P?^66F9X\HT/&MUTcM 0OA;\<[A=d\TRPmia>:qdh@qi.IOBj]HO#"0.h%nSlXOGY[$mrSVp[Y0NICa8RL-.!ClD,90Hk^'k6a6cin/Zb!&je^@+[00bfcGFG(LY81#7tpAeR6Lb=qT_H("eb:YOiiI6hV*SF3:9iQ\d qs2h->dl6OT#PDQEbJX66i)]FtpBF -g/J^S\sl6n=G'Jr2:#B8)KM./9:S]Br[&7qh*'hl6L;"4f'[e8njR^#S@J6]4DAJST!XpW>?UcZp.d64.E#fAR#AQ"CFqY2BZc+AOK2VqA6I'YgjrfqW1rL?F[BD.fRm6+@2nsc3/liS%:$83NB]RUFI5I; M14H'Nik4^<'eaTB(=6L E*d$(CG@ >mUikAR:"3Pqd7$'<r\pUTi_76GD_e>'eK1^GU`>aMY*;?gnmc_E_1ct+b6LWJpR_AJG$b&?iCaD.R"S7VkB*<VpFcgg!`Lb-dCkgMC+iM`2Ope$l=55& Bq@niS9@ _2Ap!ok9A"(@#7]2DXWN_6Xr`$E7Kh!+"maYk>8&"h`YciIiHg_3%54DpD51IWl=V:5@t\EJ5I^8K3%8#@l85N#[C#<7]!*1MpRCS44&TfRhG_?BH 5;)CoIJsT&m9KBB;C<;WK$?9r(h$W0X5=W:_B+@W>O@dJL cU' fH,8;M<qL1>!O(08I?W8S [oN8c\,Cc&'bP.;Bh&(+$P*Xq,DmW)Tt t/(Q(EtlLC:fCY&XD0d[BZO0^ht[APo#Egl^c<\Z"FUY_A!K=j8?9#/N30Kc)n78ib8?b^7cU^(fT@q:%(W-k-FdA4Q\_8r9VB]%SP]?(:>DYF2H@);(A:!2$'QhpD:B40!K.BBlUk-,W5T;#QD7=Q;<jf!MrDGJ=K<n6<VY1Y.8tHj2Bbbg]Ht>k5,$:!,OF>XA/N#+ia]dUIF:!i"9tl:a]$e2=CE&KdWFBC0mA/(%d$qgD#5=e?R=S]\$M5jm)EJE4McR!Y`olVI#D%AAe[?*KXS,b":;275`hGb7QpHA5/'sqb,m@pXtWq#WC",L35mH[FD`P@mTP8-m=nQ5lOpVobKa##,I^iK=Wj8SN@^^sK(]2\<@W8oKs:<Ur8%E?i(4?H5(,5GiqX]A4:"rD*\cg!*$*(DN[2IaG:l7S+(K@%G%XGLW`5ld VRXkrEIJXk@,6RB4RBbrm%i9rYsE`]&^' $FpBbgjj8[a*F/,7*_Q0"3V"e2_&NW_Q2nM?E@(hjd1lm4O94'_6 IAJeXbp;m,!<rdWe1P>(J?H6I$k;(n]H1a*p@P:1+hdYQbTi.8>:j.H]q*N'cZ1a,a.IsC@c.rl"*q[s8K,kg:3PM?:t #s81l&,"5]]Q%N1H`D\WUS+AA!'\2;4@Xt*2b:JI2F@ERF@rem +O2.!&L#sAsZ[U]B\O$p^=;_WUPcp $-W/#_@jmh-kiq*5-LC*i>Q1)>7N;k[_jqJ!p^%sYC^*h)k tMUVh7E%P&TgZP\B_^1/SS](B=e7C3?%C=f$$?D+R-R2W)[ln`(Zn?\/k\;%j*+_;/AK9iJ <Oi%Z0 ?V6rHts7(MK<+ZPCpSA-t=1pS1!'YP)K5 jOt[G;.*.c.[[jGh03B<f0+^`f8rL oVGHDla<Cd^4rn8/VCQl.?m^AAoBY844K[nqc$JdKjAKfhFY<+n\sbEI%2b!n"h@,]*=G0Z6".b4qM%& \C5Lp\$t(nsNLq+*]^&M6=->Zb&Ic7UW&GU"b"*sU_@D 3Q"oHbX5;P3`UAAqF,keZ`8 "j6_fld7YUI94*YMRE*K99rnd$o78=?O>:Zh_ LpQ9F_L#^A3QCQ$Pmt8>N^W.)QBB#b/fefCTOO\/C[DbqdJN;iQc)_2K)i>tEN!t#Eo7EFcDQM*/Q$DVq73A>3q"?rs,DC^<@Xa*UNT)3eJ`p,0@eB-^MdL]-+AW?+k'TT"=*QA_6N#0QOFGW\J +CDa"d]!*[O+l[\"D)aLG*\Uqo /lpZS'`25':Se#*7@QG@>i1.@tN\l0)48QCp,Be"c7Vd8C!NOj7,,GA03+k\JAPm6'EkqMBeqlI`0o=jW2AD#Jr"eLF#K#DMXTA?)p\WG.+)@aT5$rEkal[sh#f)7D1:N<Wo2Kk\#&<^p+L1&rl<7:c[A/VTdH0H$OqA?CSCUB,$Ih%9r@$sScs!?q?k+r5p=<A\o0H*]5*+8S[?_2\5?_.-q0chiLg@j!bc-C5>@J.hLlbZ5gld*A%!]Nj+93+Mdh;e8oRR@(OQdp$:KHVG"_>S+"/0$AKK"iET>q4-g$g`'GQA#O`iA Qa AP+lgoob, QXf&Bm!5/(8N (/Ht#*%2/dPin7^G+<VL"?`6]PG0/[\I!$#;r7AI*Wo-4.d;g^4Z<UV9OA$+<6Gkiq%?OacGhT%F"\L0@eq>o0+$6<8RAVppX4`\eo>*RtAe7)'dO9B[GH:)cl<Aic(Z-4gZeOn1Bf]<GN>g 9epr`)8M;m#ftY:ZEMI_8UJdaqj)q ,h\piI:L:%#+*+7o.GkgkrmKBU"`WMJ8$XMWK=hU/AR ZC4IUaT'24[l.XLM,/.K,qtV(2cf:]Y#G0O_92(ALUY:.'q79aqZ4CV%P? VF8qc#^Ss3fGdSA>8SQdK?;MTW_UK"kj2,F#_OA/objAB^k_BZ%re/blK+3Rt lrAJ#ZeACaIA(jh \(Ne]R_`BNQ>b8A\P-"/ABZ[KA38!pWd00d"FSX/]S=CB=,)FO0QioNX76k`K:9,YD5HX(arVUq_-Y0c<oBV:]!(R-Uk%n;P[%:X>o*V)Z?h`C+Tt1o0e"@m7SVai']@hMG\*)P9M7S">c+bEkkCS>Hlot<dEmi4$KCat_n#)QhYAUUKX-ZU*4O;D=/4-OkOYdbVm)Ng S:Ia5MJ),E33_<^dHQ%j,t*#?M6SZNUT#CH<\a82m22`@NpdTH>(1[6)^ZVlAKE5i2o(s)rskb3MfU6lYlss3&96[M, .fSc`*OrF'.Y#MpkH/pde%<7m:LtYY&V)miFbqegR7Q ACrslZO^J@J*#V.H"r5m]5j.A.U7#82lN*]3'<!G]\lU7okDNnGeD3YRjJ1.YIi+W)nW:5-i--MgT9A]`p4b,.t_? ;_a>4mC2fQ]*n*`M>JZYCoAr6m[s7=V9Jfb3U3o77(9XYT;A!MY>ke"ni<TYVJ$6k.#. PMphNC_J*bi:DS"BTb[CZc92qjomnC;Xd*iHA3@F*C^Yp?Fa6>S2[0 r*ch2+bA U@j\VAZrA<6[4b08g*:-c&ke ?fU9+5$ bkkDB#9?FP_C5aTLiHaY%D7#empaDa4W%`KV$<"tD,H>GSLPNO'TEj./\m,jt6TWP:*hT'eP[<W)cT-K"#@2O@.C#dEaV!A\h\@(LU5iJLWjgk"!1inL]^]7%k;s3HGl$bs-Mk)q&ImPWb1\p7jGWH-6al08 67ZKC9`jDpABZ:F?nP-KfGjt@@\<m.HMP"(k7$2,3e+q,IaQR-Tm\*nqPkoSZ%64aR5s%Q7!3c/7[5`]pq;!q17Hl7@tJi-VeHT5XatX-9CAIf\Do#rG#UpnNVrQp#.=LA!#W6N=?RIB\fL6,b1O19VkE0_U.SI.OI*:m=De4TA9"jm^`3-6hs8&XPK/#WN*\T/s5_l#A7b18^Z`?@r`kP=VJAG< ;7!O>?V]-Sr_AbFZhABkt1n?t0*Es"WQI1i',F#/=?Cd<.?!LhStaQbW0,1DS"<A:Y]5rIq'R<[j<!tYP8^mf=,n[J"]>V<@dCam7\A-[<&3qo:RSSHB8\8qVEAlG8n#$fbnV$5RHEtbD<lq I1CBc)ifh@CpL#$[$AVq%7I"YfVV+^K:it@'t@^5PhD5lq`A<1T61d_i)2TgJb0RCE2BEZqKOn:oB3 Z6[HSY/ b(eA/nt.W^Om<-r/h`Xf^dL<SLaoJ%tK^WLVFCjAO0oU0J]]!Y6sr%]A00)e.9kB*Yj<6 bZI#G&eR#Vi]*2T#TM%VHj8+qt\f9AbH_7s,e[=Tp<iRPNO-B7*iG#U`jpj;,PSO<(.Qs.t+a8X,1hTI5%r?MrpZp#g+j70.j@Tlc1$1nP0&LEYlbCUP/NIW:PG;M<fP&B_rL0,h*S.Z=)0D:!!a(RMQ4'6W%E6E3T1'n@ /XF8L#NI0 AFK(g=AQrfq^?2BCV@O`OG)#lh9+;` `9+"7I2koGTpm`iaL=k!A9tDY"d8WsCPDdpNgU<9P+OFS1cgZH*:2q6[3( -[\HMJX$X*W0g::t@A/$-KGm-rc7=q)-sOVgn[^AWp(r0<C.Oh"iRpn5,kGV*_n9Y\hoAokJ>J-6QA%J,OQ47n@#mO1'2Xt)>_^oc<S!2ZnV6"%$PHL7PM)"]'Ng_5UHp PV,qf N(D1E)RW%Ilacc,MGe:b?aZp9^(mdb>foAT>Q`>GkdZ$<J!gShtU\>$aM-Y"6ac<j(hL[_\c((h@%5nT]V^,]M*JAblrMZF^3^S#''^s![ehKE,E?]mY4+ql<F(iFD#G,6XdK.1ZM%j(1hns<R!%".-XqgS<Kf%Boq;^Gg&d@sq` 5i_o`;LQB82D+VEn=1\,(, t<1I1@W`$a$"Gd08n).hU)2jB<!Mj6?.NZ7^a/TXR$jBM:>opBLJI!N`qA96bBoap9c5FR&(T;as,B9.D)"6n6A#-Ikf8YJML?$N?eT@_]@Y!R*B!J&`0!P#IW6I`eUO>qm-NA/W#hJjN!M\>=%iXX)Ik6;G`Rl.BJF7b4+Sd7;q-Etb0Smtp`7/02B^]@7>GHJn9XdWE1LL'bon0j)f7=9-I9'5gi)-RHhV?Y+gWL .W2*XAT-qg6VW,)3>(86YA\cIc6%s(/M8FM`/3>#ak<Ua4C#WL+-6bfKIl#0jl`,+I(=Q7B1=^rF@(_q-n$P`Z.a3E'?c X7WNH?I\&f2ip/bK,9XN=W;T(N2j<QVCHfO(4n2:-[ht! U7p1aVBs8e42:djpo0;b-?*DZCl1E7i5p^pfK8h"lW?9VJ&+^c`MRtGYZb5GN*aJ?I`6;9!ni+?EW6cLM[t2!=P.Jel)#;_)5N5ThTGQ6I?a;$H>aIC>"I__#?ls`7PJ*Q@RZK/.-%%9to<BXI!ADAdKFqc&0AE1GX!^PI5EYG`ZG4;E+[!2=&JWhc!l]<&hpZ%QL69.p8Wj<M>T/m/V'"YQaV1kf-Pg+$;\*grK1KD2aQM1tLp`T -V+qXm%=$@/%'k(!f?'HY"9")?KZ]<13,lPL A.CXOJN_h5$m`%a[S)fi,2j"C(aAsK#?db:m2EG\ Bhq>Yn:VVQB`VMLIK'P&n$- Scm&j=ilONY6qSNS?@j)'s,(K;K%\1m\A$Y'GHCPG_sJ U?+S`cXCrDAM0_jq3.q#R/FMf2JekA0bWY%UPQF>E3g&TmJZPY1 8bj\Gj^`q@#!DMA+`1rhm:]n!D!V#.\kA485=ACZNRP]m"]fhcJA.@&14C25=Iq]FdO7+XWe+ )S*qre%GN[ 9=$t8,g! $l.1$[MO+FUXqq8o$[dpAA()4[8^> 5p-9i]L(Y=g:M%.EM:nCF6[O&"B4;MQQq;\E@t'/1,NZ>:iXc6 *QXIbbU<Oc+V-,O(E9]%g \kH *AS5Htsg<M'pX3A%\&H%r\=:?Bn*[iFAY6Y&,sd,t+EVJVR\4AfB$0c@X,N]B?<d3^sZ=rLA/HEhJ8!sM4;Rb9AYGtfRk^'R<,<Wb)QW)K!nS,+Ie'VhJ@N9$.0B>MA"[jV`iAVA4i%HC<i*. &qa&MQ5;GiAkM[Vg\]`eA'Uks_7mFc$T6/(-Ma_8X:p1S+L@!91Yk,MOm@l,k(HdtUM<XS.BI(/+/^7)gh^7f3>StaApU?fs(_(&c-`?C6W(TD:QAABR;-=p1VhDKoQC=Gj_DQg@j?;_"Vm,8Bk#OZG8sCYkii2AHa7'-Eet/>Z`(hFCN ]Sa';LE>#qn5:!a#-WGAQt!r'BAFI:B'r>6nqn<E?rk!0-/@>gBVHlNK5Zf>9VQ9o\*Xi(1t4lJT  p#YQ]<TG,Ci*=SF,A,qSR?S.Ki`]W"RAMPeWBj0Bn>1c37h`mP-8J ?qRg['1UY2!KB./E,`BP#Ca-RNbR5+#o,npPUNdCp]@0Yk\D1*X/+cog&LS9?O12P-:.9gk`)K#=SWI0]RV\H<r#VC#F/=5+(i?"@mIT)a2>i]-b\^N&F;tUQWBTY3"[94O!8mc`?__6,[ATkf-X"-h?Qs1hs\1&AP:d9R\!Q]Ul&r-NgC;&D'_ gdX%j hX[hk:p6khM#7'8s'/AfS1.;FFji7):shg-1?LcP\*0cVT^/g.4WU45eMOms'FX8+ihJ!\ip/>)Frb$pXtIYY;U.]q`XC8'->lm2?0l/_9d$1R#Q "MCPLgc9t M/hASnmMmK[nMXJ5-Usj5^I?Ut$1:O@FYsRm*V<@2W8EhOA,g_Cto"2b#p_"m[_^`Yr%O?g);KE,/C4+\;drM)$*$UZ)7_.:DCA+k2eAk!sB5[I@5m$Or_+:#^`M#L%M<N-pt<Pb12Fg9[i/pFF<YP+R!(P(2^.JsoUF;5fOb),s^X*j.J*RXD0ei*5&U:tW+DQEP7RE^d>lj&P$XtBA1Z>4S[rf) o;'?;E>+$k38#JNNke[RO*!XBKMq$RaMB%a#]FgtKLWndl3d"OF;/OTY'2a56Q.TpOnBT'c#52Po"N jpRV YaZd6\4/R0E/ZY8@G*epS7ja&_M4W&OI=UjQFmR:]kma"f_3CrNdepp6O_-k&+Vg4-A)H/d,@ZZ;41X]XB`;3))Af12R8d%nqj"9g"%XJiKUWM3'l`:-9<(cc8/OAUK,G[f^iOMoI>t.At&n[(+Q0@pq; l4ket*a10XhApT`(0H@Mtp:UP=5M<$SJFm%r!/J+2h4?:e36P[]54Ajm^#+*R*=eW19S[$&(psd<P*&C`"/$QTi$qhqjKf@,(6sQLg@)OhNOmMU\^q+nfZhESb*&7(B1bOMtI`AgYXJ$[>MZqX.P5:'G5'@\-&NEk+(Ptk/t;b7O"'H<+Mg4*Kh7(<MTi_Gb<A6TL,=NC E"s]eEnolA0-cKcFiqH^\K/pDn^UM[`$;7)X7-=eWXa&l$<5_dI@(,UZ&,-/Q2K+ObSYS(&4C<p]/f2NBa H>c!nZNQ=;i_)3iE:gDgIl;a^f/l/Z!7j$;p16Z4rqp5]8<K6T`5PN:>8j[[>1\?psffh;Yq6DBt7joW#+T0E$b:i]L?&"LR7`(N1cS^a57>O-!LOG^M'bH3VqY];-:dCetc4UIM`ciab,_Ek18dYU"g';'"HKbF ;>fLTPi7@sQ-&er``/-X5C/A.G%q:e/I(I]dWTV/BAj\HjVtZ>aO,gi#o=(;1D%tds3@\T;ZDC^qLV:Wl[N9i'=VWD!j9P0hBZmE>1dDqS][[ABO)H(%E:'R][551":,UD$*U^`lU&9WD=@eD!]R74X#qo']*FEO3tkId2eEb"=cea?@04l ME%QZ7oji'B/l`r+.F>9@/c@dh:W]E9T?e2S<Yo,Gp%U$2CM=.>?W"i>l/F;f9jF9/do%]S1-9Y:sn[8B.!ss^bHB7[9>TQt_n7W]2f&IK3KZBLAR5M`-:SQG=Nm!^2O=oXjQmN**Bbg82=,?%1 ZRb;4:-tt1RM5T`%)5X6Yd'3`0c"UT$-$$ ipE;cs9ng;KA&- 2 p\0#/YI/D2S=p7qF%W8kD9.T&E>4[d_;s=(*H>p,ReVW<d[h#+*4&'<fdN/HdeK`ji'?#D@jRI*dG;m&"=]3H&EJ;*^i O9:s!CO_*!IaZ]n$7ZB_1jI(s!oEacM2KkIj:oP.Ben]O8Gan,JDmr"c;<p-^0phslSp"8lFX%XOrYN\1>nLW-U)X!.A.pI'=Zf*SapAW5/$+fak%QqgRL`_U"CA!@LAAfjE-Cj>A,`<?m+,,W3XOVQr[kTQHY>@1$PE:<hYtW[p$`M#r*Oe2U1p"h 3j;Sp9"p3gY3D-Nr$GEk('G5_Rf,V>LSf!'FjLjR;;g%j?P0?LiC!fU[AY>TQnp6[*!Pf60],b_%DI/%`CqMgDp[?3;P`M#8U2BRAm_^PQ_8,cFB!O8*ng9o#^::F^AU_JTUH(IiNs3]a9V;01;<7Bf-WaN`)F^"Be0RQ7<nM8f`WeW7OlK9,LFD?A)jr7Ji_G_R5DCbgAP(JHf[c>LX)oAFlTL9krLkY`V;_8qOj>k#1;Qg=@sNr)I&/gAgX lVbhF.FCEGo2ChEgGH0:&@TBik&Tb7[``d7T>L7-_%:Xf2n.ldtlj>@Bd= j&N[<HNAi1gI]OgCY!+MblZ_mI&98MP-E'0ToVf(NZ[2m+.<SN[LUsQS8bXcA;8%TA\I,:`k,&1LVA6dTCE<AVesCQp`dF8$ ) 7q5(`K>s[R@5cQA*o^WAB:%A=kbN?pS14eFM#Y3Ls[b](spXdn>7[1"(8XY91\g"8jdkQ_)P[FE`O9A?,!ZGs&l8>%1%6EMTA_qEqsEZcH!ic:.=[IP^7/q0"JX<n-A5nZdH0gr(`sTAQli!se0CJ,EY/,0=3a5q<PKSoe8Nl%HfarS(8LJC9@RgcAf];$7l0jD<Vn#VNH?tE"[_B =7S9e-0 Wc\#MJi,8Ose^;i<EQ`-qA>Id2 8Hs+2AtK5l5ZW^%h8([?A38LIPT`PAshLUs,''dC.6i0j^l.'U%9Z!"YEal*$6s2(N$nM21&T')KNc];JP,K1AZ&HgQ5g0i<H*&1UYcE2%M(".phj%9E3sgKQj'A0lU5i1@4>;@Alof@02jOWXa(mdOktRImheaBTF"+_G9S$6RSn';FoDmm?QeSKHkA7.DfA]b.Fi1k<YM&OA/(pOL3AK&<natm2/qhjT@D""tnT!+S&^eI#74tY_6ASAaIN-fMR[e#7SP1icW5_/MmKZ'2?.S4U&'EmM^qn5K;&+tA:SX.m6K(Ts6jEn4[YNB7,IN44'tVRH4ATc0/cUch;:4RIIZ"t$jRq7"[@mSkpqC,^E0pDC^8`%Phc6@)<+C`dVtPA,j*Kf\QeY"(BYU`%^]5"LpL].bZ5j2&gT_Tp2MB.q%aV$PRl5eH9eon9S[3%+E,,o.Y:,g1C oQe_.tqJH:T,"VL6=WTKB0 \)C>1LBMXk'>K&Al..qA1daLmIQAgD =bl(WW rKQ^kg!0_HL08,_&coL3"L)8Zp(cm;K#J5-)#V*._78W_AR($ZKHP4-:fPm0@/k1!8^[BXSoQD!o:Yrn2"2T^"Jb9s=UDLX5W@Nr@'SmXd'1c'VspfP#7b4@btdUBJN3!^Ye@n\;)Sf#HeATK9mrVQ;='!-::"IJk[^1n2Akm>Gi,e,aIAZ783dI&Ns93`,i)Ml@%l^Z J-sqj"b*a$$9Wj?h)>a6(0a4@^a`NoF>EA2Z"%6<RnnkG*bZpe^^Kmon;e?a9sWl$O1_)AKS0t_c=Ye$TYTM8HHQD<&O>O@'9%.2PaXIDk-MB^h`RTrFG"Ki-]4k,[2kB63/da"r,g)*cH;HC-daTZ_,Bk] W:? ?`,e2`=TCLJ@knRUkoM*i`_Ah9PKTTIatn#:pp0;T_kfn+$c?Mq\S$_^Yj/?fOK^&Q$-94\0iF%'8D/C*3@@Xf<aQ?:f \JKB8lb(1mk/tCA$UK@mZiIL%OMg[6fD5UTM\3KXYTI3hV+%[AheA=3DP j-)h7qo7 0.P;\)p1DE95)r"`@M\&/^lS<Q"NEn>cGZ;K]E*)T%llg]b^SLSt Vrdt4)Q>?k0SQQ6:Xeg@pm0]1>,TnkC s&LKP5n:EHp2")g='d@s)2;-Y(Ud!A-'AE4R dVZL>MT)q4<'9Ymfs*9?Q`d)b2H]Om:]r+Kn`gt"_>:)t7!GA;&6'h'-.\lr'erd(b?#9?9Yk?T%;AXE?=09Y[DSUh4)8HltINi0J#b]Bdt"k,Fb ]NBR2/8_AtKDN06M,(Q*0IVE:pG[%)2l@A`(R$%8frtGF^i!6c*[)*Mc%?+6q[0@Zq=V^]`#/U&BT_'6FD*mS,AHh["]`N(QVo'7Z-f,1_^s&)nG!,A4j53m`k,%Yl0i]A2S<lVjH_)@P*l4Drso-5,1HPtCnP7L<:1-\M>6"DHDUN_,kR.66#dN];A$\_ggOGpVsS"p9]cN)rL#C511GK*j9H(K(0dqSJ]Gdb5A!Gmn+hFB2]jA>'@8iscCUTqIJ@^Sk-GT'pR9<P_Wql2^,(O&'An3Qid<k,/PQA@ 2nUm9,%OOK=?TT 6Ei4 KsKVI[H4t\0 cs+IqfMO)U/PAJc<A>Ahi-0VS=^@dbe%;!On(OYba(j[*dJ>5b16&<;>3DZ2G L0,]6V$0)Q(fK9o6g&439WHT?C/W&Mkc&&&7g2V/Kq"_LqJeGEJA,U%Z`^$eT reZ34p^FaPAKF>]3RJZ_,)>=r\E1 I-K)$b+ -@[&ZT,mc0b;C4A@T3,]Q97>AC% ZHN b"f8D *a(! YCXb+^Q8j4V6+*:<]j+De"ZeR(fL(/@Njn5'&<Um%&YssN+Z@_\K:cBZoh%.pLKn0Ho>JG`?@f.$AL%Ht-:iLUCa*hl17k([ZY\MB kDRh#JS]J.jt@S?LBh0SVG`AN.?!gqAH,4DjnOmK&iG#[P2X"H!fb(35A[cr$HLcY`cp^Sd_7N[]@o2i9,N9Z7:STafZ@E>,"oXL>W""!IsW`n'9%>O9 .]!-O``>qCK02AHnPOA</-Ql`?=`e>YDK:f][ZX;^E.mF-#JpSNM45q;7Npc;JsgQVtD@dnT9n,SN4Z_8V3ML .Bd1brK:0]9[-'!?gqOl^$g9#<IWS_Ftih4UP1S:e3D`&2*KVC=ae#m5qe>he@F/#0AFi44-GFOmX;1Y:ot=40rm%G)rZAp*;A#$]bXcR)FL.ot!/ HGcOY6eboK>T;c`Q@YZ2)E95k0(Q]Qk2mG!lA;3=?"f+s>Z9gOJe:`%EA]IsP^bL%a0'Qns/V/oqN*X\A*SWV1 7H.;U)V,f?6l26J)'r9%S8DHKYBSA'6>d#Y5bHErPHIH)h ; dcmLG9=WNP^dV2+N;./c>/.'Yg,G!Z ^"c6S3H+AJWf =i\^<knMeM?pB_D;qdh5fq^[ ?H-5Rk[OE6XOZJIFDW_bsc$d+(fGb&32K],qn3L,WIrs?9=/*tX*A8)t @$(OZ1=.g'PoJ4$h!jLi$8qjgG&TO*:TJn2(IqcWXjaY@LYbT*Bi#s&.\$^XgkpdF+o3<N%X+6T!WEn.s_Hnb_bo_>@CX$IFJnC(,AkAGfY;jttXd;0K+,#j 74DomGDbk'5OeaYs;*0R0b]$N4L+#f"s?8$`t9;OG%Ya@kQfAANhdLr=qRH:2e"T)/b`Bjg(gD( 6Pc+^[&VdH@`B>"qJ8NAa]8A@t%qd>[l]RHQ]pZp2\In6]5?]l%j`&A)5r2IQM:;Y9rN+PP*lL`ZCP_@tq:r%T?B_$39l r\hehLm38iF,_7?jQ"=.\Hs^+nEs0Jp3[d.3mjc1]OH#`oM#1ikiAL`]8p&>Y>.AKBB4KGA2r%(Z0K1\QW7!X- =.BLhN=d3as/*B^#MY7W>5hH,55H1,tMY9]Cs?H4/6da 2JSJa/t.(Ms+LPPirD0MXP0!NF6 =`!+`A`RfU,&d3Q+lK`,e)6$IcpA]r0mMfncKc[1WetLAXiV7]U0AHB3/"nBr'kYZU_/3:joQ-*(OE.2,@L`K cM;WT3sF>(s3=-i`o!-XdtHROf@RQK</!?qD&n$-jMR,\lSTen a]8`GT_jX!+Ed+Dol3L=!A^&ef',Z]pm1`t'<5J!0UP0BehNE_0Xi@qjBF$#on10Z2cW?JW]EC b?QZVHjIbs>dIh<+\b,GY!d$r.8ho5%KDtFok?`?bP6:T+D?(qj_=htP0r<Do$ Zk8%:.!0QRS9c%co@b$"\dEYhQms7,>*[6B'WEt"m',f^<\`noMW!_e!c3Gq")i?spm/#Yp BAQ2jmc9G "]FA`L@8h<aRV/se"2iUcA<?e).bMNmnQQUR9?bAc5>2aSZ9X!Ym`fKQg%ViTALaF&-3%q/BE(s/%`A7q_lf8DJrfgT,'o_(dKBR7j=o'G2Y6"O`<-OCWTGRV/=f8HPB:d_/tg)@'" DjV?mI%[4fE^MtHE8eeH"h2pS./3#@Rt0%T"-"X'aLPgmk&1WC7^$e,Sq%Zm;%.+=c)2MMg2@k1JfBNFJO20-$D=@abC+kKf5`\-g9L:tl<SI"+ZGIH$\D_`2%jb=\G9#L@"LtTjSP5%t>l4(mq\6X9jH(?dUVK#t>9J]H0?EtAO&:BW_pV(mCX1qoP1&ArT[oU_NWphc14NNs60UO9]gbTH` >n!IH%IO*(FaeHm-ePb(>t<i1$,V*-/DnjWTK50\H\g.:ZCnLI4e[j8&AOd$2N:W(ejK5^9#q3EF8@5Yr#*jIV/T=#n*iDS2at#GX]hrpNYi+d RpXW30Z%":PKTG,4%Oe"tp1(LmY#>'beip%>6(FHP-)+JGN`C>D4ffmfcYsWcmJXBpOVON4``0Q SHQG?@!cW/hoZ ']MH'^0T!5"(i)h]Y\(9FTfJV:4Q&ac[Z*f:,@(pL'T@0:ooO73t*S.S R?@3gc+=bcCM%M[$P:K<;o$J='rX9Sr h(fI^<TnMj6)h +1ph7O_jnp?sn!5OoLBUT(f`ICL0jFGm7LOhSRecX[UU?B3Zjl'K$?,V2Mn?2`[X!4n6Z/1st^F*\G,Y5:SNHYpGcJA(#'RB_$gW5gV-\c2F$I4P/+< [$j=j1o=r#'i#!RgPKn<W>fR\'E/#][^@Y$OZBe;-88NUnS._L,N\$5cs atk#T3A$ffIU;''(A$%rrsBGD;)f.Ar(>3/\BQr\Tp@LX^_71-\)@q"5U+dG\m45Z[RWI,RY]=UE=nSh4\4*(F4K?&Y-XSFAP-irK]<">lYQ-?o.j_N5>#L:^aaA?%Vn8VQ3'@m*sF#2Cr^CNV!f"S_WMCICaMih:_?p]%MLTPB6C3o^&L!<13?gm_qY)o9;2hGRSJoD;Dt-#Yn#e;<1=OL7LD74Srmp5&UAq(t2;"bQ/\5n.@$kZ5VR*<d\hC3%kp^M]JHpV3Q@,QCs$Wn!bo%d&H@!eY\e%pIlAi+J$tPeo4"M5hT/:NVP @nkMY9n  E[Nl^"p6P*R&VU&AOtBA66ohb4\f(9jj\fI0MUm? i%\ZJ-$Ea"W0X,lm>(Pd -Uq%T`9j[S4J@*g14fA-LLted[D.m7f?"#0olA3Q8%m4KIoI("&Ks,.d@5.m%7S.D*M"XSC<iUQRKUHa%e%8LWNS#]I6]](9""8pJL'd'ba\l%9Rcq%n^hZ6'=pF7^rYsoBnGcg_T+MFV<k0-h'*YjT[-#DfD/L`6g<e#;Y,9mb>qp<4M^M?<`Dt9NU^!=UXV*Y7'P=jA.:-^ _hP5oYKAgO/-VA];+43=cTaOg"(G`@$6M-J*C2.jPo9)Z%IOq7K'5",U#XK(WTc:D*s0oSK%(pEl.Do3Qof+P9A"Gs &o-20FR_;G3Ab:KV!q>'JBX,2'^>->L`,,8odNI"\<&PEk%E99kZB'#\R:.bgbgUj5**>=NE ok7F-:0.&DXk1Y_h(3SI4ki"GTe@%(rc=t4>jk#o!@rTC97+bJtVnc^[*tVpW*6NIZC* ;kAf*))ghWh+T`47Y^6h?s<P*0ds(h*"8Cd$ ()W&_95S)U9>t\MZHTgTBZ01;3;TQIMbjC\PA]r0$D8Oj3>n=O ?N\)E e>a@\+AS77?%N1K[qjLVAgob#IUGIFa8lQ%tCBIYXCjf?iH.5]j5+*!k&DsG_E_H+Z"(PJ$+l'PRmA XO>5BpmN(MqiNP*A,@?9Wi44tbi4'0'W55LNmM2AZfU;DGU^l,g-XENS,rh"ZU$E3\=S&DAEm9r;i<FbX]ZVKNDr!`l703Cpq=jl^0;"8CIPp>K^N-]cF?I>n*ZLbA`2<5[7KBDLZlA1r1%ls9R;oNZJ.9R,]eQCnhBnF)bUL&L.?RYSbFONNqAka]OVF*#biD75m(&]@k6d5%dNiCs#NN=e\=^itmKL*UX )q\j=hb'J=.s`QV2hFD1K r(oZ(lmdXR7kK9`j?MN7,ghZ`<YQ,TtB7r7Xa:JYSbiCrXN8\iL a@gA[*!aE-e6pQ3o_ef2&g^Sk7'QH5cs<ae2%L8-`g:t^sG_B+g!92W*DBZ\:@1/47O]Sqk:kORW0F%A^<;W^/A%bkjW \&:kEh#7)=+b\kcCf\=(JII#_JN:=K r<N'^g0a_rc)B'F1B2%1OdkoF; TbAF]18 UK#TW>r0N:_>A Z#kS(V*C Y7bAeYGTpK%;tl6P!>Q1W!SKT\FS8hq4MV*:leK^H;N>pFkI0)F9K7MZ1Jpi8Mp2T\_(sjO!b-Eq2r1:[4%3V%gC)pmecS,cs0GP(/(l?X260AgGL9:&)hbAUX0q7Wl!Vk9$D3.X8#,COLJ^a"B.KbmT.*%eJLl'HM'i81@)>>qfq"HJ4IZ*e[&CF_FgnM]WT%;Y.H2br/ir><arAedo2\$3HdJ9I]8T8jm(88W9HPRp?rG4cs]g(*50e;0ZX?HdU] r.+TAAPXo,3DCZ)VsRB*>G-]="3TJ]/Z9&1i5a_DArr$d6ST?+ 606gF.trN;AQg?2TXfUmfNE0P*Tt&AGpK"*,,]\k':dVcTY:GPXXW=6\cf>C$F@-KJ$B6n2ZR6U tYkl-'1D\A<l=2E9TBINsjp1(&tsc>Z $"%O+e(QUcGiXAO8JtZ8 !K-O]NEbc&R*G)_:D^nfU7p->OA(Td05<Sj/%;M!dn sBZ=NFGQ8*'M#7]@&gG?sZ[j@m>*?\`A#D1&$Z,L7 3Qh9i)b=L737;!*a8$%j?Snd3\tr>gd\5tXUWs$!aB*-67+&l*TL/`G;c:^\L j=pV5:[,pE`tKNIm:"r%`b&^61IJ)$2Af86;=AFkQ'4H<`L/S'X.3[?VXa7@WiOQl'oV?Eg"j;m4Zj@$][-.4Ak+3DqADc8+<9rZ+h/%:6+OQ=4-(^Om*s"6hnAW_3tb=?H5>"f5k$bD]a1ULOWdf7I$/j6YeaFq+5rrJXWkAGL!;Cq.P&KD@IJR1 ah[nI- kM"l5=bj 3O4F%P8s\M4T:#aZB 0"dgA.V3*9L[n s>F+RW35/TO<.5iX!P;X@OVeZC=p&]LO1P!aGRn\!MiYF>n`ORkFbOenKAD!FKg.RNAl2jiE t;,;,i#k]n;SD$c+1b9'j471C!/0I RFm?:h,Ac_Pl8orLp4&A@oUX65JFZ7,;?$ZF$c)!Li9Lh/bLr]gCRKA#U)ZiE>g?jIrAR-d^Jom7ON9Y?^kaokCpR'QhF8M86-4g<TF8\clNT <WI3-Hd(;VkLg\V^AM;>=K?Y',qiqE&3F4,T VU8f`K57\j@!sO(Lbrm$c`GYK%Q$fn0P/,telXc->@[Bn!UB+l')=]A54'PjY3$-4870FO^0+/8_BDHd.2^4.`(/I.dOY98T0NA>)t^h;5*:+KJ-6#@`=B/t@hHR#p^m&k!;4K9B^4FMr &70R(ErANQ^7U 7E123;pZtT+72f69F>4;HDg`kAg<Pp0IBJU1]+cpM9Pl,Ae8X5Fo[Z9[r(hEd(>hLH@: slgm+_4Q@. =qXge<."NP!0A5]frD]s%/;TTUDfZ]*\p\c"HW$=&-#,MA_VE".&+o%3*P+O?E$oE>2o/&R.ce-ST6kE<U.G^PP<pHF?@OXAa&\4'.+%!%4G/?"IYF<@JM+WBRS1$"f;tLdK"c;a@,pAX[bc^p915SX a8,'ksR3:m'k9CE<1ltZ*"]4gmg]`ZFQkF,6CV7(-Q3%:==$$Md>ZG#;,8:E?b<`3QWMa>!X6NOT5CtIc9D9\h&YbD%0WAPh@#3KK;UgB'XOp0SP9/6T"mA`>CaBeebX4eCKBWtOc"'HjS6[.*YMZ5'm'R)_88g?P.cEnRBb+C7q>Q@TVX]a-DfE,;Z4n[0ZV:TGQF4>cc\IeJAlhNJ3`m4A.4;fOtMI]E/1C,<s+n.)bGHMoFpA^BZ/Q[L_@(2ddD2<Z9!Y*K@&_L0N9icXqW:ZK`RPEo2TM-14I&)d%)DJdpF3kFGgP[isS/glO7>%>UrO,spXDX^7pZe/B5@='S7ChLD1DiY]U6"VJ3"pUPJC!`A"2Whb4]c:.]4B3"#O+Z*4*%aJ8BkP+7E/jDcH!naD+iDmS`3QS%OBM>Q'cZAaZ!a#Hh?A(=H%k!;]/:AnH5o&JrV8.sZ%c=S0)oA-lG[4_Y2TT/KW@:^gtt$eq?q]R?)ZEC`Y!^O&/k,I?A3:]<`2:As186Y0PPBfDm34gtctp$8@F;BVd(,4h4[QAY-tH!TA*nY []?mQMIe^R"jrNMqs*?KF$q*00tT.l7t0M9;S>]MOf=]62.RG`arrBH.`(ZP5&3POn%=,aSEW >Cb&Zq51r/^MqQeX!a4U`H2]$$4OhKag8jdpk-JALjcp8M^J7*ga7J0!jTBPa AfR1_1aD"4@CUe9) k#T(*3WG?j2%-m8$3>.A02XQ$(P3>IC7AL:kO$7kJ'pdWi]A-)tlA&3 9dh@ L-!oa0r%7fg4i<\TejfAKt?FVHV9b&3Kc;fmBi`TF9f'\Sqo-J%`F8,;-&i3=B+F+>qWglV;!\T52SeM0X^D7nYjm<@1i4]j/X2Sj[L[5-8]r*^h2_Ega6##!<mVqc*E,g_RkN%ZjAJ(i]4:n4A>9Jpr`a%>+1RDg/9"G"@><RfJPUaE5^jsW+ds'QIDSaiYPc@mn><97=!nW>cg,r=HP2j:8Z%bM,3)&^R+=dqhRmLgL*p%Qd4cDnTJA_\kT@E=_^t<3 TbBON/N4p+ohs=)8d0AT&[[GU6rQ)U=@AS3!O.:ae%$^a:eF$E-B*)NC+I]RMb(OP:DcGh$/kmKC^Fa*(aQUa%<m.*8<;Rj3r:,t5e:\ghMEa-c)o DZ\fsc\/#Ue/Q pO]j?A=BILDDJ,]>aIhe@fTj`SO3p2hf^cXp],Pr+?^N?qi7.+PdZ.Mc7gKP".^,^M^>)Tkg,15F*OZebWqLVtmjP_'tX "dJgUs^=('a99?t%3is^Xc=b?$+R.TVfX7LQ_4ZsSs* V<E%#6d69WI>:bQRrr<="DSFR;h[*=Aj4j:3:3:&n$LA'="tGLIqr>Ho4k0[/k;$_abW0=ne4`NlXo1!VN.sA'5'eZ&;%9kq)#s!IAfo(KD+?ds*nYSVcOrhn%PFM%hQ*Zh&ArDNHq3p7TS&Ans>Z:;<OeJeGf+HmI69$,)(e_-TEbl$-,>@:a3:8-T#rQfUdad/nX&B/1%4SK\2H[]k(42sTnPa-om6ofNB52NMl('Wf\R;G!h^WjmtRS%`R's ?A^KUf<>E)aHs)mUo/U_-,1A(MS]C,0,3Hkk6QKWYOCBhMp"o.T/mi]3Xd8e9%296>X3>Kd):Fr41"T::Fg&<ht[WAFrshW4"bL+TmSsj."XRSW9gV:hBk&/'c.K_&G8tf.$o(W^'EjdR=m>k#S'JBX4p`d+5[V'T!rHTV!fbSRST$kr8^*s_4,n_HRXCOUb:Qh<:f.^A*6T@nb!eiPa$77Q;:-2lE=FC9"Qg<9>8API'1:-B=\_X=)8'UrRg<?mj%fRb7aE/'Q_TmNBEdX"G)KE3&(4C:]_$c (\83qc]!V\j`(B:]3LfAD!nq(0"YfM$i'(l:4tWSane@,_9V<1Ld">U"'4>V7Jr9k+lPGT*tZ((i.OfYj_N\PPQHXA23G5pC6NgFAYJs0#+'Ha`1hVf/>td- ]8T[c:qm^IkKp1*!c1)m1lYmKj@V"GcI?^ss;ZJAk%SnNfbPPI^>t:Bh-BaWK.l>W!ChHrtKEZ>ee_\MC^\C6]<:C$]Aa=PnMq(l%LsV`b&A/?iCoX;<jamM-"`(.*&;8'Ub0pM6Nc.Ua_F)ic9FIE,,1F1X-Fha"i#'@kO9P"X[I!oAXIDj)Z5Y0?T]PBb?HhAWgon;\ep1P]kO70_:C-6@%`)9N&2iF"Y>oXsj2Eh-NEX_'"$p`1SsA+Y;@/U3R"4TmTtRS1PX2-N(p(tE%E[!f54H X:=RX]04.tMH>kd"_qmXao4"PhQN [)2E3X5^KFGUs^*6nbXPPB>`*m_X"bPRr_j09<9&MUXsHee&m=pR[d4; GIhr<dM2n_H.@0eemZa*)amJ-tf7fY#ce/^ZiMOYQaI9q4`fHd\5ANs_,qsG<#`+l";l<1B*<"AC*SiJ`ljgt#T*_GXk)&(5FTY9`XLZ`p:h_hYhH-2YaIl A_fN+aBekG<`,nXK.9Z_WB!A&.7SjG`9@,`A7^d`9p'eFU8[H!9eHk;$#Vc1Fm&!DegN(W^FY4k(.mL;sFAZ4n%50%5SQgto935ehGtr=8@&e*W7Xb:2@ZZ!rV*34+YSMDFf8=>[^A-m5:MN 0BfKb"V.72m`$R[rbZAk)o r%KqQXP;kG6AS9oUk`rt#i\T4@'EhXWqDk)s2%VVKYrf]>lZf;4CrH&LMT-4_(AX"amL?UeerlB<NofC!2m4A5It^qM2.Obr7FQX ahmRtsr[gh[ c=knp)htn]=!B(q>:f&+"ko?:K e4m2!=LI55i;jg,-;NXPU3gAhbO_7Ga$,1FG<)lKK27]2468]I9NKi>c"&B4E%Ga!qWcc>A0Rl\EtN!dkfkBM@.+dS^`+nEAiaE jP^7"k5do[g*R"eZ!NM1_oeq+?E95+SRZbZW#mq/-p#jL65sA[TfB+V_+8YpG%CjS/R=eX]F@hf-H8cARJP-s2/A'A[s%+% _d_roE_7!J] qm>F1&7,=@0c%0o6NGP%BA*Fc9@jmKL_"Y9PSG"'FJs#Ki+T'cSb2 8ZA?:kYUQ[UTY_Cl)%9.'YjB c%[-c0/h!S>fOFV`7pFcXEo$/.k^JA:XAPqg@\ ?tl A(;C,llcO%HbKGJiM7);(PK#*6"df'QK@7>%_ils n(J=\(=2mLPK4#@.k_RRKX8qAeoR%-'4(#cF!Aa9^kiF8gInAM@1J#cHN)D]eUSh%.U=4K#<:R;]ApDS!N/UAA[@KPPQG?j'-5HoMK"a"P+=6E\#a)\/'$XH:f9AK6lAPk=HH6)&a=,[<c_('E$,f5CL2kVGUYD<;/V_l>L\[00iK8Jbgj-lqA2jX#fp^Vn+D"'fWg%pGM0L;eLW-]]k5a/fhh,#7`75hhR/a*L0;3pQ7P8$+le8YQ0$lGoObrc=N@fA`_m`^S-C-:e"!/GZ@"g2;(+*7AJ*K8E''4_Bh(K0;,)3, g't>enZq/"VbTGnJ.r`9Z<eViH-Y91GeV^n0VmM 5,NL -ZHCAEAilhWW9W\e])(#r:W<A.4rh6kgMj`9cI<@lp')TmB*Q% `L8%^!bc&W#@#*-iT5</f?nmFWLVAciR"Zd44T'E12K2J\$5*LSUoP-ch9PbP;1 X@=4>m\P@MV2Pg6b82'$X5%C*ccs,o8>G'2Vnmrf%7- Y<^>TL#be(V*ALae<3r30r\^=a#^o!OA"@F9=CAgO^nUC+*_%b:VZ<0ka>XB#,t;4]S'Lf[3j)7,Yl=TrK KmJ1@%QebB.J7 ^F=g)"QP,LH>45bAF*l @Jh<g4+[7M.G1SPE/nTA6PfmA'GWU?XXkL0)pMo5Z<Ap=Oc@`A;5:)^_M`#"gi=@sN2o(:L&6<=)iLhrp1[E_=1=tf^-Mm^?>!ObUpj;;eH](mN#gAiYNW5VrcfCc6GV*aGnPh'S@0@0;K\it@r`lsn`hWXa!mQ"Km gtp-:^;>O-4kHE`*MrSq)F^0RI<8mBbo$=0e%;)YT&2+?(!jjaT=2Qh+nr=#078':9DA9m7 jH3gh5YM\]XBls%gibpM;bm(T)ResJ$LGs;W`!GNs7R387UL3@CoMX^,BaUT-Xaj'6$,30i/VlT08Ftl^R M>`ltK`/?.N,MLGN]?T:<Hl):)U/dG@X<UYae!:lB`4X3S'U:4]ZUDYK\aFQh0jH)1fj>:Ap'k16(AM-KPd3]KQ%/=eGFLi`UUr\:?>A+JMa=C^?&(5GchU!AG]GlO<`(qinQg/2t7;E[Q4>8^(A[>$MNj4=/K;S53Kj1_[(7%aL$.t^XltDja#<0,.#b0lYG%$9SAALMIcSoa7Gl:`G#W)4\mH5"MH+WL7FF0EtR6K'.^0Qbbb6pkdB<bC87nI-"1?QKmA>>]6/Fk#`MPG5BC,V6BfQA`gpEEs(*M?rl>-RI"'b=jEAtl9J5Fhr,BS?EN5#83F<").-s^i<G(pF/=BbrD.o;hgGmbSC"0Yog9B8i,??ddI#/FD(WZkI^?(G6,$2[nE?QF+THAUsc*SRE2QaA;Z&'d5QFW/F/K*HB]%VeNK6_'XQ>">GXVQC] 0cAL$p01/sK\ !gR23"/_nA g5\"'Q,(<?VG_`@;k5$6-Zd/'99ilnA6nrN=!O$]X>mW@hMd`<n09-A?-?HTmGjP`trdSt`>;-S^A\d_d=PP2AP'VC48n=&[(jA\Y0DRiID>6$hAN@ M9=;=Do7=MipjH\DMefN/7Yt/h/e]AZr'6XQhPVL5b/.?Y;S6h-cMK`-Znal'^>.E>BSW"<*!)h]TgMVSBeV<-0X&olt*fk^3T1otA@N>Gb,Hm*"MY/%e5[j6g5PFUo+.qO$i_1=s8mf]I4dRSW:4da'CG%"H@m$<_cC>*#@V\k<CUit>TW6+6i1&Fqa5Kmd0LQH7[EV``MZnlDDoTjQfU[eXQ3RN=SF+pVc5o1Q:EH4gojn2$Eo&Mf3D._j(`fcM;'gQI>6Ec&qO%55jKt^'5AF=]$a".(A_Q(p_KhMeD<f;/Mm;s4kmsXN\Jk"h["%DKX5g'N=k$,IkK+B)`c[VUrmUM C%$82XHqG& f]D'"T#<8qch#mNZ%"JB@r-((;!HJ!YBjZs29?0g[;U+WUn`LE^r1YY& /?U7i _.(>(mJ2H<pKn=kDM]_Ya\lY8.cH,5<V^6]b[&+tp_Tgn!+MZY,]EEo(k%(gX 1\bV*,U.&>)=gm$L"`ED1hb.FY`kR2>FcS6Wl4(7*=F9l,4`TKEB=(5Rr5<A5?;KC2Dd\]K!rY1)jtA3<$T)!N#;g\fh%.BisR3AhUHfpO52X(t3fMthhYd,br9O[E<EF*I?U/)T/Aj\"/dC2BQl;3XW+k(Xs]N1be',?T c=XCf s`09F2&Rh)*.4bm+-&-GiNnn+e&ed9X=jC`MSA3j<5_ec?Hd)!cP=f/9`]B@9c)N`imVYZe(pT`4/3Q))UG4%t$&+n(F>"P ?h4$f.@p"gQO[C>n(gPR&l>gU/I;.PU1dF4DO?%^m?AM,aUCGaP@8_>ABJ>/jW)m4,'jUWTFga=LA9A6GnIr[*r9NDRPd,8\S%JV:b?8J"Af8(tf*6LS3^`_sh86/FZD0h'.OXjmoOOipn_5@6EhHgg3Em/k9Z>C< <tl#0:CQi:)8Ld&@XA6LfKJ%E,Y19X>jP;[EN4iLRPa$\="`RLH/7NS?OA /kCX/:Kl6A[qZUrl]dgBfYB^WjFj[Ho&`9:s8a8ik/ ]d-=[6F[*>30 )c0"JQ&!n1pde8*2Ug?0"E>S_1+lL,eV_>16D%8iWW%?^\cbN<Jne<a8:oZ97nO>C?hQ/mNla l;fbt_!NqG!!2rO\=pH  fs`.=XdqGr!AA)H7H0lsAhj;O[I*F^[T"Nj <,8'1F?U\': ;JB=E @L0B&&4E>k^=oC;<jBb9-A5 >(ARWB>RM<J#Kj#"_X?por7Gb@D#hlmUmllT?E%3CJ:6-/$&[j &rc:1%_aM'DTZ;LE&qR6\sr5s2^1.q&hJn#D7?S4rHR,^p9hGbKr*Ga=!QbN;1_7V&ht[PsU1:o?2R9\-emDL,0PR4t?,bH8Btb62>E.iU,nrF G_U-IM!MjZC:5h4?tfT_GFe??DS07>U0RJ(r6&RtPR]GSi ` L!aebX\<#;-L<Z<"_-%tq^sN9]7koQ!c+qT]j* /8l2msVpt:3`q8n+JB9"f*:?YY`1EOO+sFVA:YWaZ.'67e#nMk!mQ;@Q^iV1Rg1nf:N<clPqrQg?/jVTFkQH^1lc/CI`2?5gL16R]b!hna0V"'*EQB#F0$9pX &jS_^E=8can_`DZrl)0J*okpR8j,n1fD="W]4e:?UT33oBSQjO"ga*kO]+dB2=N0#mBRY.%`GR@!?$k4D'6knbs$@4rHKRU2aI6<@O]D$rWEh Z?Z1[3j`g'jH@FR=5S3`.,ejFG!aoIm*?*FYnYs@j/VB_r\h:K)Q'0cRi?JOFtL$("ol[+0E*YMF1_G$E#9QCSP;@*:P8m.'r7O[+EB<MkS>9ZbAR3f\cs0aXj=/I(57(P27:)?i,%[9WC##2I@l/MNB Tq4g^@PX"f1R3`)c(G?AWG9]UUE\G9&s&05dOAJt"nmF7I0rTe\U"eUm;H&]Nb*J ;k2?fLYAetX7.FH;qhPO34e;dq?_6 4W\pQ[-b0]q]f@8rSkAhb,[0)N3t\CM$%I]T@J&1McJ*QQbK_%:tH`2AAGo/nAN_Jms5N<P>f2Y0Qs3k`<9p5Lm<R47O)^$EMj`B=WXIIe"U^qU[83`ACkidXHs?Ci=(Hj,5jl(NM$lXD7W-P\ /"I&J9AEo$!,;YndRW.Qp ]:YCk+RAL_&p4]Mc-?_k^6 Y^`gYe_3S4*q/pq>/C1[N#OGK?42e(7Zg>J%"jT<J7YQ)$f$5f)^!8-1P.cc1E,Jk*"?TQF4HV"i?%aEc/aFk"*.s]^?)]tbC)`iI]R%s")1\j<0Nd)%SanE`(p5QNlg)B#eZ$pSs(GYmJNU-R%Id8W0_.M9dWC!8JE8crR7*U]e&ZTfQs+O4]?p&il6)qA[?G=6iHd;DAm"K&9V!QEJ(/JZp+1G]lX *rla-K#Ji@B1UnaPs/#Fa19 _["Y0K>"LE]pJ&fMAA,Up+Jogd#?A+HI[8m75=C5/g.SpQC3bkVMlA-ne\9Z%^Mb:G_*P>s9*?l'NU_OTLIQ3lgp$`T-AhF?Q8W,]t;j?m^1"C0%L`]9l`WA%A?AEhEqagE]l*c6;AK7HBsZ!O ]!BSebksPB,p7T@Q>5CIsRh[:1Z-*(g,s\I8o;ZFbE[R)QO>&.1fXW;XUG#7,ibDMQh$QA#,cO\pa&G]c*"2!rZ//k<WVRCk)t,9Z1e((-;5r)Spe _"#Af?#9+!X#q0OF-%C;qZ1tB=0LioQ+&W6I.tJH^[]<jUOh(PK$ql=dNAt-Bl<-e=oYcQXi[l4sC4Q,lF`,@)*9&2EpD""Um hh$b9qFYURhlre\2&eYe)LH%F#1$j3d84^!ImdBA%/ccf/<#Y4&eGs\\d9PD%_9gdJ>0Z':#"mt&!)[(./8VSA\X8%teD@itpI6 8lQ/1^_]a cKtH6o%*C=AOC*G+Zh,7K,);9iVc?bm:VAHT?/_*U\mAfAm)oGOA2q!6!QJi4"`hsjR36p4K+LA'pt[J;1tEZTs!h\V>G<,pVpA0B0"D&ckk,QCRX]K2]*BU>8![4U:o7K#F3$H4mI"t?r=%3rj7_I._+CBYa]V_"% n%9cN\)+FH2^cl3mE\=`)OLeEKs0AFIFBlGtd> kT`s4Ql9A@m54)'DpL5J:Qc3:gK/`>1AfWHcA(+6e>P&]9'/<Kib&\h2Cmrh4sA<;&Uk!G'..MVl`DB.808ZiPk_Rab@HcYoJ&Y:or\gXkDl(9d)UA?0%R31"V`qX*?OH$N\KkYGrBU*b.2#lGYRK)g!B(-U3WeS#EM50 aI<XiG4*p6<ANTG4,d2U6'l@ScS-e_%Rq#Yt!T3% @4b0\pE"X37N<FO4ONP[C=$C8RO%%nR]Pcsj!AX>0U`(NpFd)A$MIFPM'ansaARG*TH$dHp@@j$&T+]A75M7)+gbBDI]ACmr"E8kDqA$JqXpEtH3Va?QOY&!A_=/=7p.&c<pLfKgIMNg3L.$i$Q9V68Z6Z&S(P/CDEr^Z)*7jUrk<LU+EVqNG"KJP ^A*bI:4PW<bkBF_dg:#4,I"E@9#Xbn7piL8-[T?GmW/9MSI6/^OZ7h>G5_K8Z?+H$rdV5p?3ATY+.ea+k[8-Q;]!V2\AAc5g24:,^MW?hc= 7tAeqM='\hjSp.?p:O2BF8$TkH>#=6:Ddd9,RDqm61SIQ(+_d4">YB9et:P[4s>6>ZYeL*E415.\=PJ;eWJ+kJ3':`s^H,<t<=gRp$DiS\A$"?*i:3\r/'^b@Aa9%"BY6p1:AMj(EO\0BjrmJg[U$637drO:=m\U/Q+_G \Y@nV]Dth+<[& d`[<5;,Omfo"D*]USf&R[q_bW#W2Gpp3/!AtbL8J5)Mo(7MjTB-<e,git^8m;oA$1/SkGI$#I8ss+1OFKglHp#"FS9Kt/cUq^r?7sA$9/jZZdT0lR*']D#@1=/&k-ASk\mm*bSL\"@PW+M@'gt5qs_@?HW/V%%D=T=bO9OR9aT#*_D4h;5fgGY6&t-sD^]sne/9d0j7+[YIgA4kDUBXM0\C0jC&:Mml'<6h%UA8"2^mCsPWWPn/=jqpX,HK,4Nnq7lHNUeg;bZ^q&"8CV):pf\\tRh;7qa;dh'46Mr7hSJ1.6d/`_9`CrBL;N]Q8QAWk@"\'R8EtF(W)$D:]:h*t)lcXBY#8"M/8'\g9ARNM sA2L-\8JAkAtAYji8Y^7SHNg@aJ]ZqlN'sBa(ANIilhM=sc+2YDZ[q`Ab`08Qbe9C)Fh6V&aD.PWs$hAK4!'I-k'Nc:jAEaEUsU:nPC9t08=Z(9MDJ:`1oG+>/7:iY1=Ek5UK^8<jN\t`-R,ZAb+q(L9f_Q^H2&k-;iB'\:'BV7'TkL9]Zl %)E5s(J\>CnXV[@^ dEACT^^;0^H\GUFTa@oRb-LEcKo33\Zl]0*`lWW4:"SbaDkO5)"+3X_,_!J5c*Qfb%X#Va^6oV_/V^[%IVlYI>r)@,*D%dY;U+B[`\L&J/XBhMg9CrTl7FrJBJ_$sE**8i2ae:E68aGQVX+Om^q\GM6q2F[m[[B6'h"lQ<H>.Xejf,5`H%[Rh Dhg59IPkQg[@UG6l->.YJ0PchkF.jU69ia*L]NBA^MLliPJO*UI&AZNDf%qX?5UXW_1:AOFkC$I$Q6/"RMZ?' K;#b[7IZA:;)'mn9oAc'1"'QM?#!IfT!tq+<\A-51sS^s97deem-.JT]q)7S`(N]6,,j#8G8TCC/M6'tjhLIs!N?7k`F5jsXQ/^YV<1l,#![c`mT6F',B4 ZRb:=m>!<Ff\LnA)'-&4sjK+;;JK:5.,Ip^,PT<?o#,AO'":.q!<kZ_mq0BbmgNr\6&C%P=n?A&EQ;MEH6E O5]A5H5n0M3,T.@Ll]rPZbq]1$mn7MAmLXT!4Rch^Le%G.3WU5dg>Fq;jfLX:;9%tQI]&NDA7U1a8k=)"AY5I-tPH#$tCOEC,->'se."W%R"\hAiVcnMr[[;,RP8#+g;.1TYIEK`&NRt^MoF7f+?>8`k 4Vckq,7.AJ*%U5nZ&1<8B6d 7U#aK^NeRQbBDWDhSD!?q ;AH0#[b/M$0Stjd?p[*rM-PR8I=rT,U(/E;g<I6:)F?N <hCd9BGX,6FA;?GZ Uq M^/2k%iW"FjJ4@\ja`g-2a)H3f`#ZSN) O?9Fccj(3TXS%O'+D1a+TAj_=cd+K6jH38)?cZ[Ftg?<Zn20RQ+*a=L 0PDJ_HZiDR8imqX>P/\@Q$ZO.ec:)bfK^189OANN'iYO=PEe!99Gek=2BAq\ctHAfK"A9B/Qr#"=YSY$N$;OE"Q*Hs<;N_OkJ>U:(BSS[NALge&RO5^]XqCJqGoq5MX)b,eBV,6i]Q)a.GcMpK#E8ZEALQj`VT"nA_VCjINfki8V1#KOMYVs@6*$nt". s^KA<ENce=1.Y,aC8!oMU`,It+ch!r7iNEiFC)pe3,0tVBWbD7q63]Q,TI,-K;\t!]g%:_i\o#_a`<.2Kl.e3+*\@;]AQ[jq,]d-Xdf\Wa[I9Hd\?g6*6m4aJZ: _UII lHOY3%+b&_I]6[%q1Z%daj&^Hcr1Y-mE<X1`& $R!NYjt.CdZXi=fra:MJpSRU;B`LA*\Gp=cA*]cGp^=JS/cUe2'b`-B/i"/cZlC^PmB^d4Df`ID7XmAN@M#%BWJ9[k3-^c+,n.PW.m )OnT*Z7WjTb'q7sJ4*/F!qA7A%:([<nWsR&%4_tFJ<ikBJ%&')Qnb`I/TMD%*+M;eB(`!N?<]6X>(4B-A%>H]`[Z732#CRNFEZX0*XmWcr`V$/#DNc(a. N 9NY&-i!D&]ET:/pB1<o,#!UY+<9jFkq'-Ma"JR]L!HY["<Z8#W*ot*WQ XW`t8''`$9VT>AFsBQZ<s67ITFhYE`m2H1V_V5Pf:IM5o;=8Zf=U,:r:`tU4p'MA?QRnZM"nPj,24).Pf'Y1;3jicjP#9ehm!MiGGbHVajc1A[Y<i8=/+8?WVJ#l/KG:7H*PoXN/BI=S:!U?6_ob,A$tmQ=6to_GX@?7P'JdKBB&CO9V(Bq4<ZL/'d_J+"#CW'b@>0>.U!7f4pAn_rgO8VDWY7ThT)H&3C5:T%@AqIK50Bn2:f' oG]8^$Q7VQ^N=TQPrEm)^IIbDB\oA;AXk&F<Ic7^kj(UaT0E5NX_A!iEYb1'?[TJ=Z`9^!+=Kb)TSIZ`fab>;*rPl=CM6-5fB(ZJ>#l[+(=0[HXjC='iaOHfO"^BFjN7YB'o[,p:Ei%HM7&kZp9"403BHU#?GPN m+r\2j`,T*XL-*d<n/5gnN0).DnQo#ceG,s,6[J@R\1:!d?$=TYKjW;j`Sf0 BX(D\!F e:$U09;k['6d=R HCZ5s8)JHc!3SDSUc5ABSBO6QRQ#:enFr2*jXoc[Zn=H"%Aa,M1+LE"S:4TL(5EV==C_j$pj$`+b6i2+sX12'-qo&K]!^.^US"Y R[t#3r8Wm0g^WndCcle.:6e-2t4=jZ-eKS&CKsU?]@a5U/9W> LabS7 Ke. AtPe+9h5A:rBbLDU&MYk,Qg.V;l30V@jM(ZsZ1.Ic2A[oYE/Hl`8'1s.AOC$* 0q(^1At6LA.7Pb1*'ElHDN"CZknM&pK@+MG;2r8SCWF3g'YRb"!CcY!+$rc]Fs3W,jO'dFE_7Bs;W2`SK&qLYo=?N<A%<VD!nAo[Hj?bAf_A*Af*PpIfE4Qo*9RH)RKTI-gdD6/Rl:G&gGG.\t"DeGGm`.7oJ?#?3>8q8<`CE$>p0h0pk&E8!17`N7?XmSEZ4jA[5]&99$PaNUH(-biCJc.IGrEP*0"Y;J%/Vm*A`mmT2KN)iS%BG 64Qk?@P/a-Oa6f5@0W4*I>"eM`q>X)_-L[\mmm9Zqbd+#rYi&%@\doT:s^nMTa5]].d?< _,r"D(E5HYdk4GdYYbN]h%QZDi7]oc+l&b`Ld>o;]40]V"V.FtPF`(E`nE.]c)=NfAN0<&+cRhi?>[e1/i9m4=RM&<;GW:VYO,$8gb.M;4>_7<+DM+.9/C_[F`DC8iQ<qL#7+sVV[kt$mTKXCNYK5pb'LL)n?r09Ch".]j:A^"=sFOcNA*;d<`K<j,CQpXHLqTUS+?SC(A (CHbJdIl71 -L2)JSJ]8)`*)jZD2apUJAgS@CqF10K=A"d'k^=BYU_rR8DR_H+Q8R`8:I6AD &F@V$ZZMY'KSldU4F[%lL7BagH 1`\,A:BnAMsJq:,Xb9I&Ls #GR":fPSG6)gmj@BU&amZh6Z\q'(]aPDq>@p;:ji-n94iBF-D8o'pO%9%ANb>4($nEgT@k2bGKXUNp=V"Y!ZKfPr3BjT&cOLLA!/Ne`o^+W02Hl3J]_9q)AEqE)MdYf;1tQoA#N`jb"*A2FiY!-E#Y(OArJI+8?#$?Jre6gmP=R#bc>m"/[U0N+l# k.5IN%FrnJ`qIerjU(:D/\ZF5l4`3qYppHj?q9A9M rVrM5M!ng9>Z .$^2Fs Y:tr/AgR_]^6^FRi+8#kss[PBAcW#KGe@m8?PVRq1A#nkFNiT`_QfJtM'M-sBDdDS*D9b57SBX/e4hR+b45<5<+X6i22k3LP`.X1:gOVH>#m<k5'Y2g0010lM%"Cb=]CV70mEA@qCE'ImY:.$JEn=pn`m@CsI8K<KVp 80GVqkLeCNeBE[>jAcq-jU&f$30$II3Jpl76qKf42O7t,s_%NRVQhmQ2Zfh/?bX!S+f6:A2O1OLM4-o+KJrA1d>2B*\F<Jgh24C4*b8P&)[aIZWCo5,%<W)T>@o2g25^LYfP>r,!gB.DbCs>dfAn?p:LZ2V2o+&$XaGhB&MBZhZUH)oXgD6:pn*(0j6K=rH;6??\Qn7Qd-XmqlA.$YE]`54cOA3(["9-*\#t&jd,6eaDA6"D,\2D8"mCM9$S*!YB8A! \<1P9MVXgVjp$V*ELGEY*>2%EXTrDO]^.9m:mmh^nV#,o5)$2.!["@N5PF`:(ljL[^ES^"1V5g,*%\UfYJAlssgLlT-+b%kqJ hifHU<e(8nVIaH1Je4MZh,Ie5lhm7Q=3PcX`q3395hmU;X<<f5An^TseI[C4bZ@g(!a!dcBl<d!+,-2[i*Obo4Xk2b_]sg\">KKTc1`hB&3@Xf/Sj;((5HM%j hd-i>k\4(d\@8t1gY!Q2SF>t*O&Z[?QYdo<mNp.om6&rT>I_K%NG3"-dLTco&-,A\LY9s8SX(K_S86Q[I E0*;!HU-)Nr* d1Dhll_G_O[c6^"3U_6^`O#]8.6lF[1AZ=8&lF-TSp9N^9Cm'rR_dfblH%KZlFl!Q1ZM]`oBi*YA>"dJSO2AU[5/R+`-QI+cl(08btkfVY^$)TL'7W`#,[*Xg%.=sgDW:D-$!97NarD.k"Ya@XWOA:Bjb<B%6I4p7cb6q(*D]0+\Fk%g-2Jfif1pkcs(!ahc.+EQE_e"?=H<`[Tk<ts^=K4bX;5lMB.D%jIGc6AHBc\gUP#:f!89g-%ato` h>TT;Ne>bo=?cSkQ/ n^lSUMn6k^+;@]n`0&"V9tqlOi>9ZCjNr#bof6(12\^@fEqP?F3U!W;6D%WUd[>.SQN@,*j:g;g.hL==7YMI4$[g\8 cW<.:8gQgI$n$=+kT F 0`"e+-mp9Nk52!"srFN-hh9ft2 =[q7YDdknBcb(_??0o] K\+35LFQ+SKUl*f^8G`b1 R4NKb\4Rfj4HN8V7>"Fp-9t`<R*H-*O^2QTEh6ScZ#o<!/D`C>B AnS(7Y/40ohoYI`+!Q[2(s5aSOOQ;0gA5-M_SEF=0O/4FE\!C")m:SC"]@^6=%6,BPGNM2mc'Qf\8<=.sKAM)Cqp="[(7`U(QL*+JjMnVo-d-J"'0,gWsff6[3/<;=j@^Sp? 47p3P<&HU;2G0;G[QhJV.(m8Bi!DA&GXQdZ4>kr9$Q?+%4ho=WMFE&B4Q5=AT6Yr9%?[4]&6<elV0,,p<?H]f5MQ+gkTIDhq$^5PnMgb>[GMo.1o(gj#>W>]m6g:`_DC`B3!kD\-/FO_5/iI+5Ls[p2A_b,8g_aXt^g7ST!"U\<kJ1CS.c*XUDphPBonh4>a&AfN=UF CG^X7I^5/VL&T7pD]A:E0PQg;0R\Hg m&jW:= ;tQ.A6=F=<D-A\V-6Q$\17LMJ6mi<Zb_3btq%QOR>%:02SI"9 ^EZg11\e0=2eF^/ZSZ#h&bje9%.H4q@'p4*YrB4+ .ZbA-CdN\30^(JNk6cKJ\Y"EKH+STS;Z:UVSA1mtd M<Dk@:T<%flQ.\I:2He=A[p'I"?tSSO9.*ded%I]>Xd>*hfa'(A]?p3'?ZNC4p82 fn*eZLC8Kp:)0@mIj2)";9SeqtBB^+42=Nd3Gr^Vns3<-"+WF%7$ge94WS"+%[;^#8Ug%R&spIBgUX/HJ(^K@BJTeFYGI:jdSNX1Qo&a.OgOt70s])D ?S`\<#-[S[W"aQkA0m,FaN+4:_%F +m)a%BHkXB@Y61/0 YsS2>.NlnYZm3&n:rQEVZ'h<4^/M[@$qaXM#X(O7j/_iekr,*7M#><$o:f-3D:mUO2gTK82A5,a.IP&ScI+tIdM$)E%T]WnEooPL&$JqoPY fb^D^&.?\.c:_tm0AE+MB_B.'pnltbInSb[.o8!2)I(XA3_+a?#DA4o[k7]2%,B]I['0eoZ]t,,sXd?D+AGN@-9c/2Q9'p(e:,hh=5JZjZMg6\_.n$#mr9f[-A^kY%@s(@Z [8m24'C!,q)=9*Wk$,,,W:#\?HmrlG];sRATTY!29P`MR3B0Di&b+_h_,jrA0:E54>js>bs%8).Al9_(?6Zn)JLa]S9JIApaomLeo0$sDEe0V0,WVe(WR>Gb5#^J$n+7_c=V$:l([4G*eX=2b3pJ<*j.l<gA2kc.Ai-#A8?.A'\cSZ#&WbG+-l\qPg8m#@CT(dLiHD?P?ONMt8G/P W.V'J:$J/B-4MOL<_eq]r9(/05Pbrmg*,X[KX!\U$600]RTq0Lc8!"f*h7C?[l_q)b2>='>A5OhP\;D1nY%91q)!>*^J0G&1&k#*3Spm^]3r.rHZEki),`@4CI,2>mJ"QXYP&k^nI4K8:G/2Lc!36TC$%:0 AAgP_D']j.)sQ =80_XNqYt?E<K:e`]L6im%"8O6,*RkW?F`U?1(9?'smBAW=5mE( &AA5*V+>#S59#CR`B]rG';Q#tj^LJi'SQ&od9WnN]CrNf&h?n1lMD[nHA>3FV%=J<e8,IY1SfB59^%qcqe`]#@>rh\kj'$&$"YrQ0k$2I:8#.!07cL1#Y8a)K[r:eAXSOJK/aW8WAd8Xd>hWLfrDk_BPN XW4<D/F]R5:Bo2A[X<$(Bf>4E]h:#ai*4)k%*638_nj^;grgpV&H%"4Wti8lG"J@>J+M)#hdtZ0E7<SZ6BWe.^G7Q#mm&l/l2Xm^6F2I2C#<i!pg @Jt+XbXj*l%_h<sT]fm-RTM.Q!%(pI`[5ZV%bbR))5"h!"eMJKRsjYb=T8Al^1ZXioWg]H4_b"-!ttnZWtS@(\%TB:1->^?7`Pac<lPq9!%&5d=LNtAi+qAVt 5?n!g0GgmAtS7Z4Id:g1 AG3("<s8R&i1e_gge1HNEl<_qM_LZNA4L8t2ssC?''$i"QMgQk?Fa><D!0+<).r7Gfm\A%>X48dPYEk<#TPH)$_Q#,d8qi`IA$!ZmYB/rXIS];mFSI6VUAr`om't&oa2g`th1F]?4\G,o%>t3b:4UgqbtAqHbS$#cD8I8k([]Me3Gm@)Fq\H"$V(s1h?>_^Vc;a9]kg+5\)^+MjVj07]=JY6mlk9qZ_:g+Jre%+T fQ.cJ9TK%YG,71(:kV74(Yir%i$,`g#?sM2Q1a<]VZ07FX/A=qb[?1KH!\nSq:kLTXVBmW?WY"c])sTX<\6rGL3FiRU3.sB<60=/,h's3N=>qs5Jit;O?nEam,7757l=6LA-.mGbk3:q]=#oC8S_$c>mGLEio1K^'/h1.=9pBB!9a(-&VgPglM A8c14j1O/'8M\M3UoBUccC8roZbLOep$ND=bjQ(2%.1C"M$L?m$3W\n6K4&7S!g !4,`pEJ4F5jAY$E),dU&jUiiC4><&!rs<)#?JoWcc6IY0qtb9%NF\/p89FWK>WH.Jq&HTdgddl3*T%N]0p !;"2GWLf[O9KWhs(h@Ac[oj5t@$P2M.:A>PfFi;3*Pq?%MX0Lt)hL<j&a/9A3(!>h5 N?D!si%hn_Btp2Qagn>T\hU4gbMiqO1.'gaJ"ioTbn_T+M%hgGPmU$<0Q*4rnt3bqR3)d3kTnN0m\;a^W<#@MtdscIR'HZjl)01*Zae(Viob5FC?O[#UKK^Z']$CjtT8B[O/<Y%6KG-/g6QBZGBd`;;.S;b7s#>p)h4Jm`k=2c*B=R$6EX*8I*[jgd,>)sp:AT\fAZL?G7!fBimJ6+:O(j/4o+CX A")c4B:8=Qk1V-WJ.kDOYK8th[#Rmk&=f4c_f-R#K*?U9%,pNKDeL+SHmbcL(6]Ti<S4;i5NKcJ2&lm$C%?QgT141 iPob=k&RYraoEj [n_gdRDGnc^qW2srXg"s")HY AAD_97N+XYql<$@6]1<?!N*Ys=;?03m</hA@`d?(3[W4>?Ksb@$='?!RMK;IAHH_k R`lWbJP\B;XAH%gWptjHrN['tc6h!$b@ l,?-JU3M\WNQ/M\Z.k"c3m#R@]gP ,WAa/Aj=JCg2egdf cA`_K1nrn40JM+[OFk:<LFV:_r#'XU[aAN#P)S!cLibepIU3[k)sTL8#]r?Y0WgmY+O=nhDZSf)7:s3[TETO(?@ARn&_5^dY$1iBAbW"n;o==]BGCEiRq>2.3J5A<71USti8Hg*%TtdXPj<i6]JVUhsUA,=%PKQ?8tIisP&FWqF^%ig7'i1bM'iI=i!7a*hgnC1tjptFh=A<Z2<C,H-*I*9@% T:@8V8 ph:\1)corNLn'*XG6MZ <U(d/En>4"RH9m<inJGY?t;L:(-J9$iP=GRX%+h]m cdce[HRQ6q3,`nLG(!@RC6>]K?kR<p<ZY-GoH_i_>cZtGHA8ID/I8fRBQj]kB&\mU25<"DTp#G.1N?54i4bX;(kH17'r[7]VUrNnnN<W;*53T\T.qt99Om%KWT1qd*I`b[-f\7F!-[jntYYAMsntXA=>! H$&;>I#oD*&2EKF5.44<GfW@qjT]GoVC>;Y1j0[08Mp[n$+/eg;Xs\sfm\YU+.XcP>?sGfKd3QqC\JRS]ndgoko$'JYSq_/QDfAQK?9fT2BW83L*1E8Hf%"@(]Q Q)2=g47THjh*p(_#o29^/g7`/'==eRQ0[Af0`?&^D]DM\!?:+J X2bG>AiJL#kjd0_1"IB%)P'5o2Vb!s`6ASOobS=GadjR1q:MJAAdtIV:lL8eXF4K;^\iS;f>5fF!TD.tGSJo9qp,]!)YsSn3VsiUtqRq!01`QG he7)E-sBRXgM67Y%rACGSM1EXn\R7$7I__*kr0A8"3F?17i%4?@(lQNCm)acGB;G3I+i3rO)+2"T`J>A8I_B=*Q2:Qk-mdJL^3]8Fj0t0COl^Ln )YtroDj?J_cJ*%".F=Wh$=tO-9p#="/Hr3n<`b)03;iOXGGdDNAEFA/s40<kZt[/dN*7XTj9\m'9>;X@9jrk4=Y>7^V4s4e/]-2%cL`/%H.#C/66h0aNi8W+6BPLi![ n_OcGFibhZ1c=sO^BRO8JrpE8n\15]l3;9Ra?;2X;c=dTZAkjJ`sc@SIAkoBIl$QaJS&,o"B#)(ec*[ehJ&?ksoSn0RGC#ad\@WCe,&pDS0fmJ`KE.iod_8cQYgO6p'-G4?"\-5`_bjs2V)t)3-Cf:SqBh+r_Sl[8FJ+sl:NIOL*E)/P0 9<M_UdfQUpo7=iDoj"?_#e3R^J?)%WMX:pUPZ(IG=UI7g^9AM6<ZA&c1'\k3=P>\H)Z<$c+2fk,<)$+=5UK 3_>]QWK,b3X_k9EI2Zn1.OAJ_+Kjc:.>iLGEUFV#U;DX";n!SG16@3fcP?[6JMlPs"-j0J/:i+1m":VsTss7E-M\Q(VR!;R3#SHSZl%JM Y[@,-(n /;"h]WIIPk+ UMD1P]gobmSKsdM.J4&#P3M9:ErT8%A/A+><`oO<"0=LK;c2Md""n]o_TbOoESkPN>Fr1PdB1j\?5*.V,)RAJWW`@csFaSWE>cT^PO3EL(=K%]=3ld;$fISk^0@4<t-A#,1*/j:V3q ^7j<Z-$)7+"ne#.)Hln2gn3O/rl6K@h[;AXAMM$jTtaT5YYVGCe7_9.Q*mhaKJ-+jNQ<=jeMU0i:a=UlM4q'__]H.PF)rVA"6JPP`!s$b-X5%R"1A&g;)ZAf7Hel]h@X ?JAT??H\XVA!kk;&Bs5c6QB3^BXo3gY'oNkOnOD3St2EZ0gp]!`.%J7CfJH7+pm0$Ns Ocs76g7?9#AnUQZSbR2ZV'+-m*qHS,:I[+?(%_1IRVW9&K_0W /9="$S:dYP_WoOK%tA8r`3hAFlT \:NpC(6]Je*T1QM\V0-J"6q.1AKrJGH-S<pqA00hDVj_\jVO9d$N(.rUsdN'<h]%PqmND(e;+P>0)o,P"W__$Z&Mhg6=.RDj?hB`''9""+8jeI5,O:s4.[ODpEbr7cL8YA#*0l[0orYp?I]ld!98YXkL)XQTDp:?Q"b<(gY-L*I#f,]4l_PO$&o29Jh3)5Bo^FLs`mbH[F@kNNAnIR:aR.E7 !2U9\1RiE+<qBh1bfpZgMXZ,:Y,0,?7jIq"69sP==ksNkrPK2A?)os_K@_ A^8`SYmX,aDr89h2KIrsi?SK0A++,3:=*lo/V]7*%$D^r3Cj>ml!J?ZTWK88Y=gAoqE$[iG^2&#SPp!+_)iRrjUBm&`!e@]b.d^iP2eiVIQgQSWJa]2Ik^mQMPJDO?q?7"ONnK*4$n*jG"/pK*bUBHWoM#\YQ1D1a[t3_s8N"$[!)'ifH Z.DAaiX7e_Ub`\l2a!Jnk0<D@*g/\!,b)s=S:!2odJkf!B_c8(V"]b:E+>'!Tl]1OpfsO]jdcX?PbXMN4#_DE=,V<]kWX\ba6#V:GRH,*>BYKS!0ome=EtQ^Q^#[kW]&%e5bkeSY:p&'=J+EW=iRQl%3# Ar36ir:$M0tNsm3O;i0LSj,@JcK@KQ5F$#?0;p N817&,m^s7aMrdbsCph_d\pEqLjrJ$\73G`Z_janham:U)L\f1O;^-= rS0pMF57Y-fXA$,-rp01<PLIpiQ;h``1rlTF.)t#Cf8*:Nb<#kTYfM..W&r<`pa>>)B?%3H`VmpcV^>TXRi9]mbnb5Z7mR=H_CbK[:aAbaNpJ2n\M40n>nOe' XG/Wk!-'+MU"d-:?kQYh0it<#n):+ Q+km?m1kCs,^Ui2\WlA*#ODMPfKHEG]("XQR)?Ohc4YPr?t?_>aP>T<hZpT,ha0`KB LK2eefW.C?i1_ek\NdA`(RpAM+Aji(3"aa\)9ttWk4_;(_OB/P_clEc:rcX99REgNR((+!q_WmIgV#__k<\1DLo3R+L>Fgo)c>`Q;jokC1t=Qq%#&=\6kp/sA8+"MrD>apNL%`Y@; ZLsPb4_"FkgoQ8*kNfU(l[8$= "er0G1Fd'B:AZfAlo$@$O\Gmnjm$P$=f.Y5Cep(jc1\:5@?9pnn9MmqSoN&WQ.L!5<c7SH30Ar)'jj;EAAZHOcNpF3IPk7a>FsR 3Wa#E!<LBBoml'A5LJq6MI0 N]E<8[LN>7_fD+mq?OK%#=JUaH0)BE&;]Bs780&ptA YMLt_ptb\8f)/AM of6QC?#hmUEem4MR"IE]0:CT]s5o[OZ$soO!:7I+!LAeCGH&?00pMA/Et)ip8&[%@Pa&^?a'F,*DEe:=i9)UlD] OH@+55;U_%VAPCQkVGk5;7' +4^W.=OP2SEB@?(.Bk)ii:+&^LB9cKgj8Btg8b@=B5K5idP;n%C;N)*:^'H_9aHLmU":r&d Ff_OWB5/g;M4L@X:D/2JNE0dAj"Ceg-WQ]*7I:D?BW+#?_FoSUs*9hCUWe`lK9P^fP KGGI%)"sNAQG-Xr:op<pp%(p[geW0Yot\+7e`AS3#]<I0kF=WD*H:DRP4C8 C*T7[/O'T4+#6f*=7AHC&@?RG;[]_i4)4rt&W?Gq[F>bQ7IO3^#GY\Xs@+:0qKBI2+isSbG+W7O8dm%7hbllcY sA(d]Q:io--%h*4Ge$cAgr`6bh0Z8siL`>Kb=BO_*&ld;k%B+5d@AlW_DBI" <':R#<K?^rEA6_mOk]gZ>Qci-7Mis#?=D#9S5^jn]EHXgp*H_HALM4!&L)N.IKDrXkQbNTYV)LaJot?O6htE%^OE+mIrGUKKGXL:R7Ji:g\UT(*dMpVf\:a0co7H2"6qUnb0o=.B3rI:@pKXMA+_UHaMp^>*>Rmt9m]La!GGAP%0,"%MEF\JXr<%n^OH!c_BdM?NOq[DKqdBL_WC'$D`\qa["]D'c_[AB9Q\AO]c4ad7P-_U"paLAQ#L"_"\-U5b4HnsjOsV8ARl4fi&q=q.(JO:iVZ!8sle$0D&_Y'dG/oO>\R>t4-?kUmH2- =N(;="ShBU9hGEVK P'b/T4-9:`'`cT2'_jTU(,)`p"=@)8"qg4C2hh"A*'3nJJ0]V*89-4t[SPOrkbN?KEJ&<mIhUm9BT1$5#UNnGV#&VjL\;c/%heI6 ASV@*#"f*mWfN.PAT^m$fIYl>#4k3pUV=irI&8gAt]`4iM`r>A`3LiIA'E`'V<^!3mRn$sA*Ci.ah<V0&jLRp6DB0/_?A!QEGs`pkO']5@>to%Nd8rn<kG?\L?Yd&n3k0?_@gLKE@=2`<YIs>*gast/>8)GANspk69PoA5G&>;:9!,SO"0OMa8r`Vbl9B+M>!>1!f"+:9-ctYqdg1fW0d(_#/te4N/b%*DZ%b1OU9sr)G9Z9eGT*aMKiTGY65&8*N35siCZ=:W1GRK<Hpp'G<6!WU)9p`_d&Qf^q,-&8@dBhtI[Y].2eEeVCtijHCiB$Fh"pZK06*>7HmAXCUF*_`S4g 6Y43V>rf:rAEV<0[/,UZ\q5?E20A_OZ25!9T!@5 lm4S"[NjK lC:[(P?V/if]#;Ie^k7gdO$mTDH.m4[5>YIi$hgA#*!]k\rO1U5"J`$N1''Y(Kjdj>Za8.12gsSMk)XY99k8_f1GYUFfn!--Z'0I$hLUKQA_t@W@Un%C;NrqX;@%M"DZ(ZV3UB'B<#Y@,$.OZaKAsmQb]^Qep+OCC:Tj-<q<N\;ACo2Y9P9 /BLs>.W?P`Naoo\W&9T2IL$BH\*W_%Oo:G=Jlr:fiW+S^PhWmSog;i&lO0A0<M=D&D3>9jhl69Fb_,Ghn5TP^4Uh$i"%;Fa<D %`[A]b&8JM9hAU)-mN=f)=qbKYi*h";s6D\]&<a7#i>)P;>TPj;W#r\%=:H\q`*A)/EZ=eZ Y?'E75_`*pL6)H6#ed&3\r:Ug1t*E eT-_$<e!%E\_ oD^:C#kCm>IOsA8!#$b5m$(O$bO$(6kWG@\Y5jl4R-98lt\Y\[tA)IJ a`ALi%)Q*%j,!tWYi3[oSc+HO6oZ>;a>BH&[@=kh(g)?1%s36gf2;7DcC\9)n/([6BQIOB8UFV;K2Ad%8E+6MgNeCff4G3?cFN?-_joN,$<=X+m%YsA;OU8%?/.Ub$fFtQ:(AN9m70K2XEL%[XA-L0Q)>4(OIWAp]qO[cqL>N9&X&qk]]&)?r&UGZPQfj4["I,1Z#QPq\p%_J 2b'nn@p[<1M9K@Tc!<3D,ESNOYoK<+gV\<(ADKc#Z+:MJs8hj_=ae/2m5S5.9<#$hbTNo"^G6=]e&2J3F]$=hS;\Q"]Akt4hj+MlcfdCWL64MR@-^oj"HY=,FeRtbD\[S0P:fo 0R3O t<UkS;dkUN:h8+c`n^NmGXq/ NY@f>"*#k#&aq.5bbOf8Lfq\f^=[6AN)&oXctr\mNAJ.dt\F7a>GV pnj2+.Br\&:0:_S+Xr7IQ`bU?Vf[:<Qrqrp9F4j`^iI7.jllR-'+s7t!Qa,?!1V.&L 4'0<CA`WPVgeW.H9Kj+VrTPZ*? OLFdaE oGn@d;T]DN0cjaX^FiP8I\/OhF>asH.!W6`<_(?BUIGe014HDKZnIDsY/K6`hq1jA["gY^DH02R7#cXmWYh!s^_ <TVYPn;AA7?XVq[mA.tEb(5P+VY?]G<W0T]*C,mNL<]Z@FLmTn`p,mjh8l9">A?@E44Z.cEr*b;cgY4.FRT%28's"b/@6,7-c1.kHnb2$ZjHd'F#E-ir?g/Zf\_#AdeSl#0YD4C##4Gj>$D#dbF9;n/Q)a&cNlM;]&=Yk6k(DQk(=4@'RNh+=*e+kj1AcpC%N8=r2;G+<So 63NJON+Q*aX-+)PL!0%k(:j@.5n(/)2o][O]45X8-Ai_NQISKY$(&r!3:5MM6e"8tED@6FADKQP+JB!oFsAZ@D"39`glmmmR:UW@'8"[QH/g4tr'Uep[+Hg$l*,8,ZLn)s#I(3WPLa.1j(3\_DKW!\BIj#+1N[GAk'KZl0f#(cc=)?9Xc^FG*h[516XRZ3-bpie-0>$=9,4J$0XL!6D0)'Ob"dgmP[20BtQp>aB7D+-B\%4P%\_9Uc:bimEB4od&b!@7k@V\UXltEi<G&O0s!,(6/cb#+#:4et>s6.R,&'; U(]P>&XCp-;XQ\-cU5]9&n)qKZpfC0jEA^++9MA@%LEqS/7Es`Jco=Kf@,Xi.4_p-j+=5eqP\C#qp'rKebkWK6ij!b?V=4-dr#dgXF`9qYWA.nsn)Cf1/0Ag;VC<m6E*i )_)+X:Er\=dg$;L8m +WECs%3=O[+hH'h"Q>>e>E%m27a1"Q.#ng['MR+o;03mp_%#9QlAGKak/A/"Cf+5,Ic)+!*IA,h&jG9(A8"[YMVMCO6m)? V/hN]_c1BkFTF3Sso7pGQ19;m>iCqXEr8:Z@6!eiepA.8K^h2_?Q5@r_8H.5)*d_gH;41/__#gnT8%KE+dqqV cpKtJIF C9^Maq*,]P=Ct/Z-c&E^;M%rlA1"t8Ji42gR\c*EoR)J9.6?e3]_^G+K$:)[6!<Vro&s^H:n`j262HXj"Jp__)=)D N'.jE\A12ae1Z< #EZB41U-%Jg7t3,*h#"=iRo@sC^je%&h8g$BdL'K1L3 5D9.Og5h=&#:a&2q$OSNY>Rn>9\;O7(`8)jBj[+4>AH*Ei@fUD^A;gbla\d7MaQtH#Q"`qF:"04oiKV=^Q?PEI%9eq$7"-2pMH3<\X5L5LfhiojbbZWd?#7m4VESC&=K=5,6Zje@e(58Tl?mmh;Y)T%EPk1r/`H4j%.\p0s%X!Lp2m:L$7KO:B4F/qi0[Wp!O_lO$^*k-lY@QfIDV)sWY*5aD.&ctt)=l-s-l4KTHGR]<qb!T=.8%IBdBT85(krsg/BffhOHDkCM9[GBo6]0q(:eO@&<a$gVYX.HFn\o&ZS>FM?g?P3s"o2[lh[/Y_!K3gIUTiKfQ5^!pX`md#$hL*/S3=hVT/B(P^%8&l7l"'hprAqWCl]c[O=.2#8oJc#A4]F:%\_0K,>1MUd4=;I9k2/Dg.Fo3AJ\q"JZs%XdL7UXGPNmSO![L7acm_XG3cLP886lr]p0N]dI99IKZDEHU9:oMAj&Q/NH+3,28S`g<>RDo\CtZsGaF.m&d,H>*aA`l/.q6d3T3[r(Zi_EoVf?q"UfN\;S7n,Fm4es"OkbE3X71]T tMn(/iQEP>EBM'M@8pa2f`>@VODBO&66T>.3F;^2P?7B%!V%R3#1n$>a,PC5.s'me%T$igaenhZ/';-o&YA+7g]'?9R0 HP)q`g&.N"$%3a0d^B01<O23R"iY91[X*?@7V4`B4_kMCSqjHLDfK#hgMrkVqMh=fWP"" ak5729trq-Pr+/H :ohn_13aM'd52_=$U&()eda3^^8[e-rD[+]^t%<Ge6IX/AD^<;C%@]!ZHo7L+@qd6=+:-fK<:?8cS>.p*oAV*A#_9D2iY.Y:n#kT7<Pp8rWFgP1Ib@]V:BbW:!PT$q$I0a+g&R0UT9D_J_?/A;_0!9Uo,Tca04-c26e1.>N?XMp%6(HC@??ABt^H$%ABY(nZ!<"'Y#!$qC9>mr9/3fdl,8rl[ rD_fBHJ8?oKsd\3hbU7*dB7g:RHIg5lO(/Q?m1&Pjh#B_^i?ZC!3+W6O/PE$ZKKbnVHed4A9aePnRZ4_X02CShsUb`4W7CK4iB0,!2iE2(1 gHgQ(JAAn25(1T/g8d*I+*lY5YG?:.0j[5iUAjmA5>\U"&Y%>A47N8,M\)4B(iobO%XGHK^Sp$:o,BtCTsOp?%cnFK4Z'D-f'4&g;5r>GB1)HApnC?DOCYj*eF]N]1Grg[3+LTKZEj*3S,d]MUsrX+U>EEDJ"%K8lPT\hWM\VDa# 2m&VY&SMB)L2Ot0>2P)Mt-j,UYn"ei>WE%g30n*I@@Y@L.P/'&t">8Cp1',b4*h0pXQ-TQ[_!m>P&>H,<b30ASG88QGojiiZ>Z8V;-[OMTS.:O\+0<L"c)$$Rclt,Q@TWq,AmN=j1rsRmif]G^660B.dm$k$:0EPQnYD"Ag+7M4NfiS[W5F=qPM?It5sChA%g$j$@Ye]CIGBUg?qI+Fa/B(j>ipF%P3m]#k?%IJADme'c<9Li(0rHN76C&pg1kA7!G-,.+83dAE*MN1e57FCNkkN0Y"J69C\k_88)+NC`Al_o[G?XQ1VAiMS!0.]*39J&P^+:D"4VUN)A\Fg-0TpC6j))C2\>;/gP%;C_%NTk^Q^2EJ7Pod8Ia/758r)aF!V'i%Kb)MGGg-Zd#<QFIKOVi0o!q7e6-$Sig<IB-dd8e,7qJ23/`H:@hi(*7X@NPi+N W[pqI`XKX:I-1k^[eC*SREh1'*:ZrZp^b*?6D!C;L#+-Y\A#Rl6ab8Pq?oP)&B25SKYJAtC4=)X2ETM[)J5E1*)+j,-fBKHAH!Ym0ZncnMOHt@<oQVZ!8Xb3J9ESs?4$8B?:Ad*>.HHc6^h/^2nE #<P[*md@R,5:>WQDEKkJmP/7AE'pT?RMRN(>?jId//^M8\D5A-rDDNqn9m=Ok^%CSZ1V^6SZ<9=Wa?B_Q9>lJ#isoZ5-aS-BOdILmhWf*cih9fqn4n)/9V-le Uo-'=@Y%>p1qcQPPM63TP^nQ iZpQM;6kfIl&o[6_MWA$I3*V&nS.tt',OLKtnrCS7Lf.'p Gb1W3RJ[V]7^e8UV_T)_d\/d f+`a6t&dTjV,AC*c1E\[f(FM?B4[3IAWIH]=K;n&rK5_Ts7.T->+;2QEL^^Of`[mY<dVYA"?KmJtjm[YU[+M'NP9UcL=*P7d8km\Sjod4A>3\Gm<Z&F3<+Qi7$`0DaIVafL3a^8gbs+m]-FBseiZZlApU-Qcaj1DkEm&:/d(cLo/+H6nVcH=RQS8"C>o)kRD?`hFU:XkOL."K:a)<-qK+e3^`32^7^Ah>-V.?]07A&m['DG7Ssl<ApJo?6cmc^^!X'=+c(VH.R'XnNQD7/(/ZdkDcL=cqkX#Qh1]8##SEjcnY9LK@5=lYN91_SOSXA"T)/9B?eU8 PS8Fg`+#SpOLZ2SAR(]/IF'kM=4#0<HR7i54\@^\+JGSM/PO,dT:pe*+(C!%r^Vp._CSSd,R!2pVe^*t`@p[[Vk,cD=Io?rA>HD97<>@rPA^$fE;st ;UK1]8)<,qSAOS(jtr!sh&0MC0Bp8:dMq)7d<G;mJ&to"t"g;%)M'2dPe!)*2^mE-^AZK#Q&d!.R(LJW.^Q43d9e[O(^-G$oJAJL2%^4p>7tAO< &$=)]3r"#,XiA8T49/[C5?md_+Z9do<;Lb8s&4U5_PaJtN.,jVn^P$)<'q8QE.8V=r90QcNm@q%\kAIN!E^kCF*B0+'qMf'2>Ah(/^IkHc]fa"`JAS^$$fRX&o813E;hm'U-CS?LR54dTU82 %'^_M<d/Y3tgMl!NdS&&&Sp#?_6pU=I,/Uq8EYoCpF*;Z5N8S_d:Fomj9%E72fo@%X%S_2 1*M1q3*ClM2@3,OZE9$'(N[?tZ^7ClN`%]t-k$[f"#ZW/6DC$WF-&M6)^7[0?bi/!L Tgel;P APkH'R_snZ#YeQ=OD<X8?*+hoG1[,h4%T<'s/tF^Do-0++JA-0J'\0##="sbr]@(JLEHZ6n="m!kT!j4ebJrM3U=mVA.k;#$oD6s"E=Jc0ag19`+P@Set#o3<\BkoI@V+`+#^9NFs"i+Hp)r%U8@:%?9Zr;$kO*<l=%?,fE!8k3X#*aaod&4)O/_`l6.ec5`@O^&!%PD+-o%#'4NbD%rQOV$_;T?@\79rerL@q,V/s6M,AA</SNkg@T./[gb]GoD=Uj*/_1[U^B5d+PL$H,LC6O0Bjq7#ki<<Zjd3/I_pj2^h=<_Wq.)W[<m;i5"To[7!MJ7[Y?=2&;@`fCMM+^*T%dLUHqd1g+0;f>sNOMghCH'tQci%$WPfA,Tjkob\\,5=qAQ#&*#,h`.jDlqi&qXS#e%94D_Tfl&^R=:0he\FO!!ZLX!=d&,8oK64Ab*rT=9IQ.lHJ-gPb<D2lI27,OT(jY+<OG"C$!A=?1Ss*,,fp"=^#aT:BfY>\-K'njG7W$`moQnl'UsIlA7Y>h3[$.50@m\UFFAbKPekPsUg2?A0*@-cRT=mP7TDER0\1gj+WAQ`i\Ts3Z*c1?;jqQ"CQ?^L4=;./Wdf[`cEsFdr6=0QD a18&gs"AW7"nh$AH08?M:NkAGRYL=q_6".6[2Q14nKtMpP'C2I^8/OR!JfrU+'#q[*O3M4L986<pEkr $harIWNoA5\9-pb2@$]8<%7mMkkZ< &\rT3';I<R2 a]YsDieV,`GB#@tSkVA9CS\KAU?%p,\cTRk 9FOAahlL^XpBb$51CT]4'4sBbGt[`VBda5FPg`O!AP`H6:?aFhM2t=0J_'>/%E< Ao8'5X8 O[&@L._>$ZYe1PPY9-7C2# =YUqm+;/5p2_Ann]N-lL*l:tX6AjBiDc`e%!g4;SNaV)>99N!?(KPQ0Q:,]qL>;,.`EUo<&a$=PpGhXOm)t _&;8#HfSRMR? />clW,HkdXYVdjr`:9k=<[WZ7EJ^2l$\=`lM%+_bAn<q9]\>d[?9>jn_n8RL6kpI=kCj6Qg_!6$;,27AAe#bV>$3gXJXD`>G&VB$#H9A lhGF-[4.2?-+;VV02.1c%2Ki3;W[I2>qa0J4WM`K$;[X73<j&dWeZ&sY\bc0DL3h6Nn X9t<aC=Z:tH,A:S^$?cR!(+Hs*)W41qLZ[N$KHetJD<cHXYCZ#gY`1<UeW4(4(CBNKT?H//F.)p)<G"<5gt<br,fBk,XE`7:TQ-A%d65.c-kUq4YA!LN,FBic$fa:NPS',lg):<8EQ7T`X<JBrHp4F1'U5Al5F_[8;MMl:Z9;]`4c]e GaAi e9cMc<<Do__t3cPDb0A%Lee)*@"c[,I3;BP-Mbi\SN;5.MF4.cD>`flB,t;e6GRh)NXkqRt*Z>f$fN%_o&6T@0QjL+8R1O)m?!>YI^K3tWc95Y\;5S*9?Qj>]h._Zs^EQTJk;A?YA^Q,c81pA2Kb C<*dEQ@PRtlj\A,l>W>GBA^=D/<9jZLV8pB^Qe8S#BiklO0EQdM]Y8Pq1^M1;rmMHLUP\$ Xbodoa%^'9XkE;MmRtT;!W/e/;`^&')H.sXQBQ*d<Wc>%EKgop`o!(mi.EsCk0+lONOnlM+8?W't^k)L\==d=l[P5GYAqgPS7TX^C2B:p8\6B9sZQh?j7jrTk[q@!$o/G&n@5E>^7&<U8&c; ^3B;n?,+Ea?sfTB_iD8%Ers@>%A=-h_Fpm&`\/)3?GSTBWjA/6W9p3bf5<A(O% g5ctVrEbd?K:=G'EpI"Zb,-o0/A;?q+AqRN\&^Zn)Z,ti#\5t'D_;NSt,Gd.EnRj0%JIbAY%fMmA_I?B04#>Pm)Wj 2M>5K(<gqi0/Mc9kg=5!aMcd\GkTA^1n.DBj;0DiY>'Y.V"rYkNNDVr?!G;qES[Q7)&_te dYVBJL^LUrQl"25['"`GQ[[k2>I02O^@0sOtNq#Y1:T__V('?=WI_0&TOOBe8f\Qn42^n)?8-YH7DEUq;lLLZoUh@ih'Og:-A"WAK6S8,'/[5/B TlH,%,dO@; A61b85dp[a*F-70:9d`pih`]V:_XcH\P)t k67:f)85Yh6C,pXq>#aC61*5r(agq4K=(,2btc 3CVe:>tKp&mp_;<8KCoG-I1-MgYfDArQRN@1`ds50<AFW70S;lZ:Ger$V43 Brg(]>@g_b:M\<?`,^@n6_1>!?(4U7AHQ3'LU\=l?CG$-,VU\ii_oUnOGSY;f%[mFB]G+e&q/P*V]h!abF`R?;DLen/K;&#e/@V>qZ=cK0ja4KAZ&WlmdCDeV)BA[Z?=4SmqV&lC\%g/6kR!!H)'k<(Sl33W')p_5ko_<OmtFT]%eF=C;lUEJp4NW;BI& A=&cXfbcXW(@XtAbGH8< ;Pfnc&o25si^!jHl+`XO(pUGK>,=q&nS7J7F.U[Phh+4f I$C'6FNtAn[@-&p"Lp(!q(r1FJYc$Zg!9FE.e0JnY"blEYo;r:'Asf(P63)8]7@!o &"?dVC\$n8@^N4GVL K6!:'E(GH E8#okq*n7>,kD%arjH:jgt@p_&6+pTO7;*=01q] Zd@D\\DN@/G3-<[Zp]#">q40K#E@LH/I(3ZQb!NA4cO0B'@+D#@@9naEr1EGW>/4bBj.`lablik#;?\Rc+":=$?MoF5Acg(TDi^ s#)R'lTA>89e.bmM;jKbJk5A(RRJN*i(`d&)5]$VcQos1/#2phBoXB_&VID` _+$V.@8AA`k@Ank9)V36/O.3Q1ikgAl$TZQl5X 83(?dTPs?F!Mo;9L?AX`Jbm[s*2bb`G45<-NM$Uc/ 'rV0!gHW7Bdb`2."U?5`[Zp%Y29@F[Qn_s&nnqKI3o@UH9:hVoH?/+eF(jk')AGn%F1dp/PA 1pstn8fRi:^AL^9sF-!")holtVOrB3f2p6^LEcG?!gmdh9tP<tt5ARprEeQmgB481HkU7i sJ?6+n^DtH1dJ!m8g1rkKCAIqEj$Nt8HnD<7k;]\^M(`0;55dqE[ALIqk"C\<XobsWsNBn (?LX-/6D(=sT+K_3fAi1/kn1&TX<d27G<:htfRBI9`.2<fmRVPF#h:Q11&/T n7fIPA6.gTlXONt'[KfT=MnG2(fV8Q`@KVMabk1t-nX`HJ(V+)#%%4Wo&^l\HD3dN8E^``; G0DpaRp0; fO@SqPHeUDdL34o"f_.&(\9YX*`)o3XtAYi5E&T?<]Y&S(`2=+ %f9Q4O%r*f\L%V.s8fc%W!iY1Q\8[PT.`nF#QO<D1,<]Ac&Q[P](:Qs..\E')<>\`YU0qc>_!$""8^)Ebf!dH#D74(H6K"e%+hNJIfqCh/3BYAPQ\Wj`-AI28K:3Y2S[]Y4B#3_N)X8C I.h!P)f84RgSPCq5B3^J@Y^E.WnAC\sFJ;!E!/bEhsFFSb^JsGAA7E;&%P]c+P:1N3`X5?:A,]%L1IPV]H@Lclptin_V&<fbHPR4O6JV>%3YCo_#q`k,*ZRgL PJ[UWm;C8gkl?c-N(qRHO/pSq>ZOjA8&'bm5e?Eh]9j-slrO.(WA0>"?pMKO`$3MdSGlj_kr4#0<#Mjd_bff'0]O?'gC-dAFK2YKrA%MUDn /=g>K%$mIpkWb-oTWY33@42n(*PNQ:rcKA./AOFI)o '-12j,gQj_&fAH42)VeIR#"]>^B::]DQgZDLTG_9s#`\4\s[BH6gM(k: +2n2QFZ1!clKpAjG7hLn.F$XE^L_SVO*bU<XM*\W&;-DXMjXaaU+*Og'-)=m5nH45B,@q'\..8E!E#;r4'Z@Sln-[[Am<.:CHB00Fa(0G1K(@dH:2tUNqGHpK$W(P- TgRXh*49m.=hAFsq/gS"4[bio !A]C:7(+$[bo,gY@8jQ-&=]3ohbJl#Q1e4#4<$Sd5Q$E_<sXD3glOm,WV]+j,%p4$G8m:Ofk%UmNdUk\6se0lqt?e19UAHb:4Chc;S10-UPET]bR'RpIja)8GZS]#95_COp!ec<1jkMS6,&5Q`gJFK@s[[#-*WqVm:Sssi_[7G#Wlb>+t ?[QX:-cgYH]Fk_?:/sKq22<6L!0hnU-m`la2"Se[s3TOG\U1i`>8]*,j+A^l 33gKkB,l)a );aoOlC<#nf?hf)6g[58_'A(N`> t2f2/X]ZAS6JhqllB!h2"Qe483 !ne;)2$:jbp0G0gC\t3/&b2nRoa;`lW@p']OIIBK&Y)'l@$I2kDEMD:*@:M* )tCf ^:JUa+9,r+FG<;;R*klWW=7O?J2"(4>UUJ:=ismVp!o!.Z6_4aM7\op$0YAK\34V-LR*2ISSKr*+Z/j^L4c!'>%mW%QU"PVbHEcE^=A@B:#L3g)N'e9*m]B8T%m#p[hCQO"k`8,B^j4&Z&<nX*RA#^HOR'M=,53`*RR,Z[,.MThq8BFpCA(sBh5*JOC*(9`_2-$QLUdc$WZP%&`!dg]),$s>qHa-EVlGbn,^J&hG9'9AI=Y3P$tE_4AW4Y^T.p@<9d;9*658d$aS'5f3lXJ'3!_U3M`8JBC-G(FeeG[Od%he_"%Zo(,qo(c33 .m;:psTHUiPU;^lGUshP%5%,l?(]Mff;n=[1Wgb^Jf6Q,<i.M"GO#@k'Op@GlX#>jIGqAGSTVHpWL5bLj'.sf.-UBjlJKfqd_d4rKEL,i2\/Xciq_f>44PRpROh<#^&H[$(RYW*^,/Ol I;l^9&fj<Llr`)PF4MGeQX0Q,rP]Sqo!k@5`+N]i'NE&kV@WE.KMn<m>'DtNl[(CdED]'QdET6dAei#G=#f,sqR@AQT-&1Pc)]Q_O&6#i7$iP@>lFZ4],&-MfTCg"*'`Rq\Hh5i ,L#+6$:V6SW[Tsi^5iG-t3Bf^FahQCOd=q*[R45bc=?GfJMY[cR"R"j9,I=EK;bh:l/^S2b=[8+JK;tZp0p+V%:D<B,JVXfo6Wo5LQDls<%5^fJ5Vmqi^\,5X224ZK\%:;3H`8a^1&-Ao>/Ck,gN'E,_%DkBe<m>g)a$nf!!-2=! >d7j=:l\pIc!A'I$p/]c"n<L/NE-1Qo4Z:@ST1b6)dQG=NUfIq-,db`a^GSMm"._AY9GS?i0*mI0gOdfr`lX?Z^ZF[1-&l:5EV.!>P/),.nt@/,7;+KVsRO-acIj`g;m[^#BkIZq_?TA990PGc\.ACkK/orpm:'/jT9[jn=L8C^-f>DhN?YX:AF&q`t/)=g?2[Y%A-2+0:Z`Qo`fH81%/d[s9oqo\P)=Fh`Wr:ANZ$1\@RYHcJ<:'He lQsoo]rJONQJ"3(f\YqO@HF!(=<Um[kImM3-!U>U*]\1Y9a''A;,IIPeHPFJ%2=*1FsmnZ[ANmVXNPRgJ]B3=g)37Grhd_#mF15;O&8<#?(W96;ImlG6EaI pY8AASA02NtR^I!7^f9k#.=R=cV`-,_&QqIFaI%oTn7an-!pO;)N >JTTeO.)6'C"AoK9RnLJ@+hHRj=[Nb!(JY4*rrE'R0)qSWPb?C;Rl(F9l?7:XL[/DNS#+QI^B1Z7b);nIVsoOl9CAh-d-- \\PIN!S*>-JL/&-RGcC1/ aU([@nG*4AN<(.7Di6cLgX4qq\D?M6.@Fmod%de_DD)&'9-_m&?nrGVEFnk.M)UW$i%kPAKUZ%!^+,2@b'Fk?4skJGKNdUgZdokFE" -7']_m. YO>beJ?g[Y#A5ARiT(U.KVe4;<E#\EbO!iG`bakX:qjjD"p5Q0?4-\e!(2QZ>:QBgOM2^OZ(GK# li9G+rD>P*5o,H/O1I&+jtt\Hl:Ogn4p@@`-"Q/jcmS_at&C8lp-c`DMh!4JXYN$XKYP(Z#%gHI EV$qpVP_Q/Q:#5j\=bi]bbPUH6]sV::AK)HT^O)P P"+AR+S1OTDd$[Dl^!]if4?&HP ITD;-/KhdmBaANA#$Ll4<&rOIA#[H583h$0C##K"tCp'n^_>s4=lrM\5SED,/AF^Gn#Q,P38AD+([Ka);gF43.fV#2iYG]9m,Rn$t[;<4kC:7DAf,R*bZQE P;5MqT$M?_HfD9@];\?U5qsqL8-88l6d<C 0H7bW,,^c[#3;-=s(H-"],TQO?%<`L1Z pXPs$%Cs\-$'BX!PC)16#Qq_WDTaBN5F"U[;(SB,6`rH6o`9<]$K.>_8Y+ntTHYEc21Cdaa@Ak=]3$2?CN8Y)qY;H?XY p+*;IUdr]/5^P0r\_335Ek6K7Ftb$?Zbht\C9[<4)^5]HpA$T]DKTes\6aiW9S+^+LnGOqBN,Wg=L7pC:;h@o5WU`lZ^s]aNsHJ2$n-?LF5.a.]4,r45grfiGJM:>[c?PqFrm:C6*gC4[MB244"jJ=>KV*h<pDlGdMJ2Pc6rS-'H9I.0@VAB[eqgq^+VN2&=Y2&UOn>@IAR:sA!poW,4m r.VEq5lgo](gE3$>d\r<_.U[WQ4[OmeY2jZYaF!2TDolY'\K;S_!L><t312rDS>Y9@p:D2kGjCdA\IA_I##qQ:Vmf:dS?O?=][Lh?OQ50S*nTHOoq%co`Eg$&7<=0f^J\24O@[Wl%p8P,E&oQ+S9Z^m7k.`Hpr ]cCLnZk<c<-iSdIeBg7c;OkIf);j*i$nTV:i^DYhQULG5#UF8%TYHl=2ACj-mkW_$J?@#0B#-q>S+V:&NHM(drJmnra+7SHMDS FVt=aB$XD% HqbAg]"BgL*!lGngo-!DW qI)=A[(jM:YV+jsZ!/Pd>d,6&'^$cDn(U)lDLN\V(@Js+kDN0DcRR\%(@:o32!p#PP6dDP!],@[>N7A VY[eOWZ*@^lWXZm)*+P[B]1+%oY1h[hma#L@#_LJT5>e[Y[&)RH7D^()tHOMCQ-p-k>f-XppYP7a2E>9,b4$Y3N1CFWr'He.?;DDHNqAO83oNqlFR@A[?HJM>4r(QJKfM;Ma-++]'D<^G.E+A;Y5]eVl>'t?S9%-)`3LDd-g''5n<t.aR6SJlSt'aR6c?;NA`,bobV-tJt4hm7sA#'p`0[cMiHbNp ]+g'oa4lTED.MH5#6m=0(,[#Ah<>UQa^`#a'HA;d;:Nnl1p+SJ"(c !$27gl@pHI(gSSa0*9&ggk6BJj^o\!oV(h>oSbg%/BHeO-mQc#q/G?cGdN(T/1&>JML>@Fl0=I4)<e$@VPa2U/!1ec9S>'?9pA&\anPs=2+Eo9c7=i\psA?5!5%Zg5,-9n^jsb/cU,N^3LL<e\rMD$+*cZiPHfF<]l_?/pr>(hi #9lYqB,ma6+F]j+jtnHme!nd!'i-- >eBc/8nsP,PmfHT2ROo'dMQJp%6j]RdBfS:^IEY'b,*PqV!Dg"4($Y:^Pc"Ta!(k5SR[YS#Fr6JpGWALb:JGc__UGiAeXIoYAE`7( :YX?cH/Mt"c-WXD '@Wc:$s^Bod0RiiQqI4@ANe@aY]:+0XHq*c(p=sW*%tP'=K##Zo>CmpFq8H S3^L4`<0^mFrnM&[[`p:+38A*s1/CT%GU'qQnsTs7B_2@Ab`NNa:a@9A<XA0_NVQ*n564;f.PTA`DhU_*&,N4^^70s%ptqQ8=Bc9/j+=(pZlf31m2UTL8'M:#d\K7aA22tH&ICct:YpIW'`Cq>atSl(BMNC<,914O<14R(V*W[VCsJfjVJ;4<<_f@`9bIV&LBHY]RN2T nIVn-7_Re;1OkUg+XUoY8NsK26]>c%eaq*C@'>K4JS9Q@^`Ik!aso]nLZ05%(dtNH3t<6Z/ZLP@Vm911nMb't,d,.\\4Iq9[(tUT\/:[jO7PQ1(p>%/ Z2q8][fT0^:dq]J8Dndps8(WdVSpi;o+loBSpA<(KW[_B)[S(B3fK\&0eG8_^mOX:@_MiD%%YTeDg"M0+#e"nZ&o^VLJG'nrU+1K.^c(OZj8c^IH;%5?A';=sO%fYJ#mkknh`O\.`&nO]g"-^1%d7!;EBsje\tgXg@ 1LbF7(B)H<K?`$r=#j"F(0rM6ga`%#Q`P>ndn)UAU-SpX?.IVRBm)hC5^3XSJQMYo+L9f%QkYrg;:JBrkrB@op(;oXJj03j1[:'@VIN*5)*#&O#T\nA/-UGM-)_AVo/m4N,U0lg>cTBs"X*AKp1L7qfZ3(Q/7N2B5kb1!B_.*3^1cq#]Z'^r>JcR(Toc]XZ6l+6@&A_<?LFrPo^S/L1j#/IX@fbQt<HJ#_oO0)nr.-&Qg=.DS^JG<BAbnUhAdJ'K%)moA,B/>PFjDGs!Xs+V& $=kNhG!d<A.3!jLiphmV2926ifBat#+,&aUR&+*$@"#\d"YNsAVq(C#Aj[A55O9L2f=<-rWfMb<<A\)/tl#3#]IodF3sMjWXBA]I#N4:<^=D09gQ`Rp%h 9X2LlEkgI"Ds#tb8U/S`DJYs4V'$9>e10LY(<3B$L6Ng%,O+1KMe7j]qCD##blsi:D6^hT0L2E/XC1##jJ53>:[m*!<@8smWH`ohme=E<nVb/6aU?ZrM1:_Vo[# sGf8)Ye/5=26D%0o-:V]qfLJK'i:<I#O+jZ6)KEoa+h_`9S^(pX<$A$!p``q-O`UfEt.P#/EN>j QOc4*4[<YH))(9e^8FUbh\\?e/AShNT-r.Z3(g5KZ88CkJE&.?"X84e9\(7J`*9]@(]B/F#Jd_55o0m 0'> S'?Jq000Hj)nS&C-h<Il]`q7F>jpM%OdL"L93iNbhBl]69Oem:CEF4@QoJJaca\+sg'#F:=fgU\e7Uo$11jKI!)&1H,'8EWA+-*eRP2j,-oBCf98A@$6+pnajZo7oaPX:`NCbtfL-<ZDiG`$i@78m\E#8sRD0rLr2aN?)>/G6bo=(o&[V6e<7B=! 5@JO_J`Q.>DT'hVtM8V-OeUSi?MF&]--F*c,kS@&;4e:S_\VRka?EWj+;*aOEtCpgL$r`@fA:i#O\@p`htpaOk@h-4,DU<OjKSIq.Z>LE>%!-0k`DDB,,'^(GhA'^8"$8-kZN5/32%YDANP=eUi\$pB62fL",W%.lSaQTa3.?1"Ye54\9RtZB+l_e%>RT5bE;RcbmS+e%<r))S2t>0 &&eg/UnRGIHB"]6 ZiA(hHW18NSP40lcp.BkhpC\44KA3 G][Jk(YFR]t-c9\17nV;XI?4l_>IsZR3Ko9-WNm7*fV=5i#8FE9f; sq7tdJ9(W-WBbl+s0-F#h>H8K/Z_[B\Y>%5i&VUIT&T<62^%cH6D&r%U]9>0M^OC1\P gd.4?^H/mp[A<i/+72`!Q][M!).hMV9Lg0&Ue RWH:>^TmW<0L4W>1EfDOG:M6r#/+TTi*of>6k5BTlGm[)UIJ"=8]s@s=G9;^!=7?4'KI$"9'R%l`lpGhUBJ]8ncn>D[?'`"fCLit50"Yd^Xt^Q!Xs!>8=j]fn#=Ff9F^b`Q7p*$!)Yk\=hps2(4"sVC"o]k51ro)`1_6[,@<1DnsYa ncRro!mM!?$n5/,?'rnO@gUMmaW:Bj%M<G &6]8^1Vk7\EIS 7.hEOaf!`@JkdTni7VZb/HEI_lP%#`,sJ7bKioQmU-\2An&C)8o1A@#LI?6eXh@c7nXKCbp'l7-6UW0S=UbY[^?K!(A-ss5RcH]8$rnlA?255qQEYR^gSD/`3jh]8:&jre/<#%d1nX7-$mH6AM!2S=dUQ7')cDIshAQmChF#5^<[t$+QXMqGf*5ZS\kMsR"A4+&^\si$6l709m217`F9&_r--k[6\?>bI/2NVWgJ)-W^_#93Fc35fm"M*&H;"HY;A85+eKZQ&o#'%^QUID!nsAm2+IRWJ78+-Unb/<=bpZrO";8mC5$j5q=qcmg<JD/D M"l:IWY#9B-EE4\q0QS]e).[]ap9M7U'a0bPA)$Jdk9eDKA_]C#))`+oI)jl^16eA2-qqGD0Viq5L/40;?5V'kM8sUqbimrjr7$8 *W2A^]\,]4Z+jFG2iSQ*3'ibc8n"Bm@`#<"4MS2N9HZ6:/pd/D*_F2OD"Sp]Q1*84BbMNg/REY+7O^WJLA3r\jF?5?m9?hI](^ 7pfcSO5)nO`,o<C5SG=Qtj r7.^j?8$6a5LF`7-;,lED^KOI,Uq`,7)o9'+eLiZltend`rq&]4YM.QK2hCt/NKUXH<HAU,3LE$9.<Q6I#VV'AcJ4V+l<FQ+`jbSO.<KtM31^V #+LE;hYMe5;ZiL N5]l\.nTDUoP, p/0+KAf-Frf.`9/m4.;fWC0e8*Ak*8Kid=rcLQ/]0#,n&NOi41=(PVU0e,XUWLO2=[gX;[l;s\5DM-/(W'M]1J\79.JtP, 7mEQ@G.=q?40Fe "\.^4q9j67(k%*9He?Jr$Dr4(@LB3)!'(g A<]#/J0TU4io]AkCQC#MI>)WJH`$D?>l+0k<.+-YmXZ(,T6"6h1QA,8>D1p'!(N=!:3&gO:C$9o\>95oW'Q*FC-iK>`IW0:[. 4?FYIM[@%EO;legl" 1F?,GoqD[B5*3NhfAj[a5oOk#dWAq4!qhpi=67X&-N+J9 ia@d.VYOe.M`K@,"""fE2sa]I=-1o$  U88DcaJX,X(FTXOb87#\W1@='4"3#.BsP Qm@s>$I5+cn@>DAn_RB.Q!>[q:p-c_Ebt-Y4leG0)8nOQJ7"5Fb7N(*+)9WtJ1a[=j4A@CJ!T_r/[-!gL[JrT,t@k^iPAX=roY%)G=W:mjl4-rZ7bK+(B[R>6LmlF]]nb6m;".rfrZAHWoZ_bO)b6#?lNc^ 9.[UV+M1Kgap<)9>],RN9(3MF[Z.Ro5a ^bfm,09"iFK 9S37-<=A-2n1Dmrq4rA<B2T-%/GZEMM?Xnr7oC>YbF;LJ++UXR_E[jk`tdmUEaEB\TR`^ZZ5+A0i_?gHjfS^c!t&pHFc73YsNVI<QXs=]J"RF"AZS1%X2k!=7DN`DYr@A*%L@$T>[)_Ut-6'e9U%2N&&r>\HX`'GPo716A8R3C#rN[<2,V_lZ[?QkL4>R>f%EB5_*Y\2Yn2q6K@"7pIlen0cG=dC$hen\dj.ho-0'Ik. c:8(+lT$Agg/`LIt/<2PH3>$oW!Y+DAo1T-"YJf"_`nWT_)-F0-0#TV"\*,/Ai5VT@iTnbAI0ornM+k;QZ7hkX*m=LeIHtLUPD!i=f4E0$h3^C/s+".a0/jgoGb0d/?:ID0jt)T%EL4PLgpqkWU&;d2"]Op#Or@7c#p1r3K&KKUs!3^q:W<rjDEAdjfKj9DBfaGkK]UftriLm@eTU9^YZgAq'&lNr=e>-B3eAfc[A_=nR3lN\hG5![te4+Ealfg517GpXQ*cZs]US@No>\OtkFUXqs`e-7#3TYYX;;p= G`kGU6JJQjAChM9^P+S>qb%ZVL*a5LrMqGjB.h lTo@_H8AlHXl1s_LHZTj6TDPX#KjrO1Ph,rBN* 3 m]dQ(#QU#5s3shF,*rZAW$6Sr9T;4WAQhtZ%3+ r.$%pDG8  <VB>M7AEZ*m'k-3N@(KnjQJ3"A*EF/h/qst?U:Jfjd!)MmP1!#PGL9:J'$sE M.Hf]]Frld-%*HUVWsUrD%?K"F/gXPe!LjmKNSM1tJC$NK*o5Le#ors`6iLUg1=2MaD02aaZH[geljDkU'qc(tg&nL'102,0V>W?pZ<aJ2X7@_D*K4\jmDU! h:iJkSl0bA7`R_l\_W]Z08([5@a"ZA-j3mk[]PMZ`$f3(;^Fr./Ib%d@thJMa3=tcLm"V'R:hkKk 7L`oiNn$U('&*q5ng\&(bA+(XWK=[^kEdd+b`-+/W$:t0-AA\Y#m]ZMdg&-n0l2"W#_hMjBC6fhYP@ATY0H>-CB!:$H\(:dsB@b=$R+"Z_*ZFTDkp"TAD9bs.88&8?BG0oA`/6_9:Xfeo3J+[BYcc&Dk!DAhQn/HO&mEGsZ1A1\%+HmZ8AO6`J T!3:Ggj/R4V"?gW2sXh.2G``LIHD`N5lC8-4X4-9nBoZfAH!;Fg(<LATBWW<iHG N;14)WF<AdA*dQ]%#JSJ-^U9I#h=@k\*X)Ij/>mq!EO<j07jU^?0JMt38[PB:3HH)i+,+V#1,UNOs91c-i4A8*8+ec0KBSee(Ye#;o%&ebPaoR4X*eP#lFiL&e=A',3V>!#i%Gc(1ZE05MaV>"b"OT%F[U?Y<ghXN^:[65iD\*&2VNb!d1(aHr9`c@V5DrKa8\rK^W=t*jhp4 )?g^ McaP"d<m3f0[$1Q<=)LDDH8;& ^X^^?RK#=e)rqV^F^lB2@EDpGi77t=tO)GaAXT$=]`@!!L$VL,A*cA\R'XI_18K3WoC6gfpRcM+RAtYQF T4)!FX^JmrM[AbSE#`gZgMM=Wi^\UR06PK&fegH7NfWAr^3>B.to1!4S!WG#g>I-YU"s9\s*H!RTr]jS0Z)4[9ig)'"brRKb[pb7 7M[k8.B]i.*)oHiFnK/Cb.qr*OkY@jc5>^K8g;gaf;Eb %"#?EZM"6[H,7a+s(XFiP)>dd%(lLHTUK#4pZ]&sJOa,&MRZ!$2oGO&=7Rj7R8\pA/p)0a?d6htHH19G)ej6sGfjWsS2@*1^c"$'0LEI5*!X8CdC%j-99XcPs8X+s7H#H06$*4h\^KfW.U$r#Tb+E<b`OTUS0 ^P;h)/?IGb*fCZn.]LA7A^`C#(h`QHQTVCZM-/)VG:j/^Q]40b!jN!o b;e1=06?jb6eL(kR+en:g..fC.1%9?Qs4>G@Vf6sphpDc:A5]9hkD7`ijZd5^00%_N0B$5,RMN7M>Aa;*Y5.F;rFnK;E'):<U&Ks+Z]5>kMST!r\G=<W.@95ZEM#((]-A;9@-fH9R 'W:7Sf?f/1qO! X:G.*C"kA@<ZF[iV_ZnhI!5-dkP3[E-P`lSQ>^DjsGF>!^\4L8>VY4$VJ\WUpegjJb5ArjLWkJ8@W'a#p(VW%7S*YSI;IH@A\O9)Y(`5jb]F )5S?,I,F$./qc@SFmHfnFf!$bW+erR:4Y99,OB;caB.E:G/B>AnN)jF^75Y)Mh'<<\1PSGUta0&WVn>&c]ShG#qb+)fNmE(tX/13]n!'<J-#*1c^feCND=^TI:"ssoO's8GF.C._rA9(202EWB0*r384ahQi,Y6-hA&g1S_1Gt'X0]'OFbi[cF>l:EM7@M9VX_AT$\[IsHA#NQ+0:gZq'@W"\XP,<*tJKl#E$*0O[M1$ii3;CA.>bT8(1pp4+Z&mlV5lspqsJ)?P0eXGmV%CqZ:P9`KJc9TQJDqa_AqVgGKJQW`\<%>&@"2W+Gg:akVgpl1><:,N[K(d3H[;d$L8)!9tn7 >P,YPND1Ij%bJ""!hm!>VK"AoY8[)gG\@ F1^G[m6=5Y]r$knf'G)e20=.Kd;\c*^*;"Ds*O$K/kVOp%PE33M8C8-3]Fs=HH L](7Xi;BBRDW<`p<&^(IXmlpmT#oQtY>:T[?W`Ze^_' %2.93Mt. $`@c`k=h/nPPf\Gl655MMb//13lm<foXJ 8 +AqqQW_!LBF_,,KLLg@bNOSkK(?YSKPWc:+`1L0l4D49UQKlkCB6MeYp@Y@Bdh P9P"WVA@pn/cd\N&LU^96PA?&FZDsM8$R'YAJi_b$c,_# i2g1+BTg: .%6HA]ZZ,U2J8W'Qn!Y7TF+t $m /5R#RCiRY=,07#S:Z\0Xl$4T\\@>fahEaDQ(+47_f6g)I?ok6C3DmAbbj@Erg&S7[eK\[C2-oTtY<`7AU+?tAllfsA9AL^+5!EU?bD ftpGZD(COq/dchiOg;]F7b2-Y1_mcq"-d^ 4d75YdVN9U%AZV:99,YIPrG'a2-Atnt<q.<&-gC0+1*%F70/$kqf8lH1i"9/dIOtnl8cUEAqkc_\lG6BQFc]p+m8](X)i0:9I=8\lJD\r;5et'3ed&6%$g<[M.kCG[ifSimQ%Qf @OD(jJT/aojR\2_jY\U?TI:IL<$V``i+[O%M7kh58B0p_5QUrB[ ]3C11$<phVV^6Ga>!<_^nER.*g\cp, QW=?8AnA(A+eLrOJ9Ea4UeLi6NAWJJD[Ai$+E?>R*o(WHjq1?"+n?`",]k3kX;'^SeI)]2."\9>EqQo2K?^iG)TmRKp?=Q\DZ"__Pf^MW<KFd6MTqG<VL2*p]I8O=,(T\^hf+-WI6El;9Gr"'ZA"p+?\+k]M8lV.Ddm50qO**ffH4*Q,bkBsTdUT=g[4*pF!\ir.jAN\<9>#&+Zpj=/B<Gj8PaFnMW?AfSmXqKWpU1&O`/A@=O`QX+30>11Wa'Pk[-.:pr_1.L9YfmM2NT/X(6-NEnK'Fl>t\__fN- ;baPoO<4E8tNq8UpS.(W8t[_B]oSl/A:& %(Fbi7t55lf3LigGUo$s.I-_=4,^I;^)$d&92Y$_5McBg<Kdq;UQIs` 8^ssWMa@[580862!#5C?S^T6;1cOOnm#(K<P,jn<H(j_/9$@pZF,hZt]7)RL(V=9;`9paOi7%VW63t0j9T].9=\.4\]ePD1HKGBoq 9/#Kk"QWWcoWk4cC@=on8CK`1sjEE[YIW EE6r9)$>`]bD0npl#ILcRXjMdP<&@H8HQ:%W'!p-b@PjasM$c!tKpg`J-d</W!tLM09@5KD1"*:T"+Ahka;eG>J-1sjS5+cAVplC@Cf^<AqsfQ^rEFgjqd_<o/47_mP"mPLk!>F(k%PSj4/Y6h+!:2pXa#55p?s0)oMCTgq+mZdX*%,g:L)MPG`qi(PmY8V=9:DOa]`H\MeX2BUd%GYRWha;j8kkdDkb!o6%.c!^o=L,U/Pnd[Bm35ED] @N&;,!'_laMA8k44iGQlM?S'F1\a[qAlX:$4@Q/^jMojWh1jTBTF@$SDRlP,%"W7X.-RSkeK*KV;:tDILZ5tQUB!%\P.HA5D>m!q,qVD@r1+lOFE<>n*7D0.F`HHhn0q d_0nFK,1*b^^40GoR@1lSKZV&N3FQ(BDcl^B>3!PHCY *E1bHs*Sr[j=BXR-k/6V0#6_R>VYs:VoaBVCNA5,&r#;c#F@A1$tH/V@4HTFTHA>%IJHFV_=g2_Ue;-b?)pM&0BA`(`h,f.XL8U2<F(IX?UbDDV["!#R'\4QmFA9EP\fkE:qE%^(P+ZnT'P4]22'Q$ @$H+C'4S08@G9A)?HTgXJpX&Yf*GnhAJB"7"f3/2j(NrA:C+i)g:]Ss$ TsfFbZ>CB9Pt#]HNdPKsQ"CXQ6MhQ )poA[s&c F"RZq;i!=DE[Xl2MH"DYV`D8<*UcPWP&i dbA[((MNLT>hFpROb'7W9*&m,`%@"40b>aJMl! 1gD&sDRtAd3I2llc41."H37l5N;$hj,qI.M0KO/:Q3.f\hobc1^e?4CTh/HL]9[A)-c d8OZG J>eR#Y/EIhlP`22ce&C-S>c'f=_m<I"\/ABk$a3Z\LI5Y*S:@0L%>S5m39X&(JL0B?>B`4;1`nR$DhTHFQ=*UdSa3:BA63T/D5-Uho >6iZAH.[.q$jVZslOnD)T,=(''GYSAA]t"M+)H>"k8; U4BWgsni\beh.YDfrY(,LqTUA:K.OlE^Q8f8%Fg_+eZFZfn`XqhQ]?%@,X>,nd.V<$]KLbA1k^[M'?,ZNBrEiS7s4to=(bAJ6;O7C/arKE$qk5>2KkO)ic&hPmV,@!Q"2Xi.rZ-i-tT:t^t,V9%rP8I^/nRIR-H^bKP Un)CRl&BYVMc>Qf?tN]8'`,-j"!["F%"/P5:QcTgZVlMXJ']t2A,Y?(lkdTZHnio,ta4J`QQ-Ro;)0[.=D*qSFa!KUsM n.A[VN3FqG:J7%THcciT20Z$-.Og+It']r7]meY3A$`*)UN;Ep9h3hEULe"i<f:6Q3GCQYP#)U(F> a:'^bR"q-J-1\V>CmA2liB.fQ\.>`oNB!l5Af<[7_;5Veg]JS3MUq.t2j19N[kMZIkodDDhXNNl@Lk['\(\#Is_V%M,li?I]V*EI@\sJQWRe6/QH:k<NH3Zgg^W`Y*@-I^_!DUae3?_^V6S=]\'8*Ehc'0E6qLPgL&4X*!DP\n&Uo94>>9[pf^+[C71lGU<@Vh>5Br&3Ss9&,9M?QXKL0Am&*VJAAs>pIUjNE"-/T'@gFoAt"*d;]qj gU-/lRArHsq]5q =UkR6(Rm'(++,o;3[Oo;WM6U;'$2X)/qY?JG+&1Y7;EGl"E%!rk,:]A@j8*]A!<Utcd(TZ7CkEa6_]lJ@9>3'-rBrKlpBjX[&KS[\@0oKa%/@F aGFpV+Ak1$@"YBk/,e-Fh"S@JP]IX[@ZRCkVb9[_ks`jDFpC26A\>A"lg.q_q/?BVN(PrBgJ.(RG<if(tY#M47G<m&TE>YArD'Sa.1 <kh?VXr')f3&=BXS?SRJH@tbLbb+!V_NE'DT=AAq S9hAOSj?LO$PY#*;+Jt5hW;t%XT@Cga[KtT(*4Y]B 4d=*AA)J5.$V`t+(kccX'KAisn.jsO$d4TY.H*^abrt`&UepA`nK/+CEjDt,FM>UAC]P=_1_Qs6%bm$UgJ)..gWeGilHJSqjhYAd+'8jYZ)[YelNG2%T5F#."W:S; m_Jm`#) E';U(]\\hosFM&ib[@OdZj,mdF-MHs"mI"0]JST6n=e>s^]>*A"P/a)#n+t9,V-aI3Djm(FQ70R4E:g@CJ##3IgOW!qe:hNGW.m`0._f":9&ZmoaYX`e@*eC9isRL^iesY#</Nc55Znl0_/+tVD:9"bo#qrmLf?m!C7KP:3q8P+39sjn !ENR_%]%P4$T)?,6D,PSnpJtC+AE]IVb)+=4Z9KokDLA;]X]``a!Y7gD@O#<YWR?MCQRH?)5++^6fOH(ZcB<FAmNo.][>gjoFR0K?$!\Xl8_11k&;+!%V/QN/;G"g"7]4*BX(.F'#[ET0[b8sO>:A:daV2\OL8nIQ6PVqcVmTFb)qs%$D]-:Am>re,n6>MI,4<W8sYBGhI#KfkloZjZpBXiJ3)^SP_[[`ca8S<MTrf+a9#@;UZG\h6j=o@%1X/8P<([@,B./Vic^#Zjq4\_C#+%_P=]I;R-/S*h16<GPe5,A=<3h"3""+KLG8H*>DZ%;CPf%51m?BcB%"]=snZ9BU9[ X/d8-Vc=tqI&P*pCTrS8=VW2qQ[_o'*'AENb$ac$nTZ^6oMUl>_EH)S(8>Bn6K);0Wr(<Q4-(nR.E@qAn'3m$*4_XB:I15Z4V@%UY5 (MkD4i+HkF&trZ/5)$Y&p8[Z'A5$h">cfhGpI=8& on-C)Re4A%Z9&L*XsAAWH=5:tXD-)<C)ZQb$BZh%Wl< LDp6=U068HIT'3Ced,:M4@Qr>ApKHW;AM5X;dd`R)3<85dG>KD fF+`oDgsgL$knXJn!(`Vs o#5mg=J*@DK<M"'CrspECPY.%t.J0>NeAgTgPEQp[(]me.4H=`:\HnM*j!eg*ZIb+EfU_op"/FWl.i&TI]gU<&aBmh5:/HG7ct`aFABOhS);p$q(M(%<C\X>85[[$X(Ab>=%rAZ^?!>D#kmq[OA;W*^Z*R<XPn^ Ji=k`ZHj6)g`Vpi ^H>#O^Q@]HnV(7T.\@XpomAO!o2fP#nsdcZnIl=%eA*YoA>W?^J+Xt>ZMeAcHLs<gABCAjf9SqT4*[g6j(.^DqC=fecRK32iUcQj^)YSp<MM4H"C1KeAn2r5W`(F_ q=qP#qGKMh2l[K*P k")41NAfM0_7&Pd.c$5['?(kY/_>`D_1JkBtWt>F;06h5#4O[4+p.tC5K)L1D/(0:^d(lmalVL[Elc)cR06B^%J>^s3DhVi^Z<Z6iWL7B`2SPe+q+5]9e2bU&h&2nTag[LV]S9FX:jR8^n%6f5$MTB[bn\sS$3h(5ph,Jce@0lP'hcFhQrChf>o*?C+>T/"J4-6'J%#A(o/?N4*lc5mOfS3N,$F4AfNMj8f)l8p iYFiO@2e"7/nE,GCblomBE7AJ9!p3:AMPme &Y`o_Ork6`XNEMo_otZa2N3h4.+c$!+4dT5C1os+jGG@pom5=sbo+'2TtjigZKr5m/pMP3@/+#%HSt=)SfU\329H^*NA+,J\OAZId;&EP_j'j<#/@a,%r=L'1BPiH+.LgHOA%'%?W+lO"RgJ)e<K=#8?Ybd r>`o.[WPo@$2)GEF1BG5%lZC2N7JCB,L5h!3bIX@AMj5%W2LI%;Zf;fH1`PTIQD'-r@48"V;p$iP3KJ.U!*E!n76L64O(oW7?CV7<8:ZhPPZ\E)AL W@5e%8Oe2a9@BqA;>k%Xq&ocVJb,+UBig5<$7d6P&[ \-oMN(V$L9-Zh:Ad5:BX,MA;DXKIfk/^N*$=Jk.aH`0@J[!j$2=:"n`MBMQ9i7^=3*,1C>(CCb:d>D$E:<SS_sDR#Li'__=0@P:8Adk!@fp #VCV-.7f-oomcijYrlRXn=GJBJYJ22?V%('PY2M*r-WoYMO.^J"ptAmhE:S*D=H&$2HjHj((N9@m"hI0G3:GK!YP?n:\9@* m4eUI9YWUUKmY9h[40oEmd+dR&RNtFKHdC`-$C\.JpW-F1C3b-XX9DpNSTZ+o(Tli.a<?IaY#)InH4d5%DI8/9V8n^ M7l-YLA+3H?er.9P*MsCAl[>=+H^A<j=\5qh!m+C9fA5A!RV]M49;+EL$d(o9^t;K1,7XUWcF R$<gLg"J`c6k=7tBD>p.th9KA]j*FQ%\FkMeNiq9+?D"mZS<[Qi(O'Ra"22/:)']K/@NJ)pTdka:"<Be8apUmBr70W&R=NbQ'G2+%hsSLV9Wo:WfdWR*APjM0`n-nDAtDpWWgB.e>5kfC$/ljl"[[G6A,o(r_eKjHPWiJt0iJ@3FS!^'rcg2:]^t/(6BJ[trtWG+DP,b6;l?K:[]/RDo?>!a%e'*NOcmqj #^3N#A?N^M49AQ%-cZoMAm)a"F2EM3;Aa`_\:D+(c137HS3F-2T7VYJ)U6'Gf_i-NG89'R'hg+GX`j<r:K$VK6I,SdL/giQc$E\Hf[^C%C <e; Y03*L]L-`q;@s/*797rb)s\0j,#WWr.He<8IT,ES-3-Qnbk>k!7"lZo8@^"*Y[\X\lI(pd>-B[5PP<jN1>FK3rC2Zt\[q->#*e"$f,dN$[k5k&V6:C6KI8fNAD_SIW>@!k*o Bf@&m;H<jhQ+U;+gACds(8qTm2#CQ7J?.CWAVq?7Jce5H@(CQ$IX"7$mATFZC@i>r8C]IjVF_#Zc>q'5P*H6iARNP1B&)$oqWLG#en5kT,em!Q"c1J / hZP _b*6-pp-0CNBP#?^K?\&\a:`T1Y.k:hh:eangGB8K"HY?n 5Hg6^8:oo>=8tKoIPkWsZUg<L5jc/AZ,:A$]K%oM@"MBBY=\P6:;V`!to2CFRDeh6No1/R@n$7rI+2!]F!;VpFaY#)f%#4\<-r6CKbfohI+sMZ Gp/"8(4_WJ>SjN,NIY\nhf\S$rl<#WmAf%j8rFg,K4F3`]8$H=8N';0n+D$oGnVNKU.F-Y7r0Hd*&`K7A7E5X2;OMSf9,LO0Wl]h8U/FW_d ?Upf>?XjPaO.iN9:DQKA<r_2fo'eNeeW12L%-Gl_*$0]20q+;&[391ji h dAXOM7)D)=ZiF7;1+JS>&M>t60_t)/;(,>^LK&eIknR/^DW#U/.m13+4a"V?>#ABV.>*^;#k9&^ZC2.sS<K 'Tc ]()h%]%D_AZP?Jri83@.[C=!VJHM[>o:2aH%=iaA;*TdSZ+?NK:O:LS.[&+_?>`W&7Y**P56Xsd-gE3rO_b=qHI"%^]X76o3 Q )2^(Y^@ZsHYJsq@A]&J4l+E]HO=T7p(`Vs#/bAsTCLE2^mdoZR(,.A8pshE(kWn=EtMh?_h2d/=-bUH^F<'q4.f%H;Ia-BY<A?a<A'[^oD]TUFDen7kF'+AS;\4$@QLbK`!!!Al@I*/B`HrFcS;#s6*-Z98Nm91HVMq.N#Wo)8c+[_gTX3RXlX*?iqqKJ\^r8cX\D>A_q0jfH3i=C+jmYk0+CYEhH&&^<k(kMX?7m2#sF2TAD<`(Q07s7S-_%ijjcKYa.H1fY.IH0[!"%k9N98B3"Vl!UI3OpGA>D+0 (X9_NUfNDd.*PIHHhL;pma#bRED'W#D2f!P1^k9,#<32j+ Yk&UMl,tgAG]Q79kf^s1IpK'1b9?P`:agbT!eLS@+IiIHI6asRAAf,`4WaiXtZ+A<BdQIq/%UH2CCiQN%BaDrAc5pa(OZmb5!fBsO>i8 Rdj/D8UgIo6=g7fc`*YoXU*L[>H*lBtQTA)=Z.OA'f-F8:jTsoDYLr?B<OC0djT0Fq4$qR$rFha3?4HTMIL+0EcXa3iL2;3YWgee]qGV^4J*:h",BA\>]/-ns9,F>^<"F(lH4KmOV8I_M_#ioPU#Wi;;2r*h)a#tr[">j-Abb!>13#]!WK53-A368__:9$hC9h7P?tnB.'O4D*9>7/(nU#\pAGQ9'/ N"H<_f.XbTYln786*;mFsQ.0hP9X'27a8047@Qd%E>a^s>L$:M!Ej07sZ[/IOOAiU,;[9FdG%Xpcnh,\:qc3:WS^JBH[S>AR;bM$OTP)2'QU<hK7J3$Di _mK&H`9M)j>o1.7i5Ucl?C#p6^btCI0) qAl#l&]p(00.8W\at&1CUAa)Dj ^^!IY?d#.K20Q\rAhXnj1b;Bm\")io*SSVTd_P*$+>' jQP*#]re`4@FL)M2!i;HYZ_k pN US1gq*%UBX1fpgiM'+7KGXF4o`-RYY9O.; "g<hBOYXQ#E\lSA4_F4[_)OAkM'i@Y4.?n7\Aa?(&FUtSQWc_JiG-'gn4;g7La5`tHQp0h??<$*mN==;oiD'Td4,^48'iF>>`g,Vg?NZ3<V2J'Hq**>kgA90(#Q1aqkD9.MG&4DFnhr/3Fb:T0`GfgoXL T'S$/X4_8nb1I>SY@9E)6?7M)iS>QfQC*;r&lRCG,Kleps"JCo&T003jGAU.(#"]WnmAG^rHh\h>6qcUC#l<P.ga#j-?+e-S:TPQ(3pWD?[4WRWf:g.SUL2ma`h,LOf!<WMR+'P:n@N2a5,ZoV"nG"dGe`Mq8f#6`9nAkq.`d,;18$@6];R5enP&-GL1HbfT>dB4DHa(T$^$*rO5[hlhLS@q@ZAOlaid]-O['(8o3=.8>?gA>jM=tPW'pX!aX#IgNA<X:QG_,/S` Y0ip/k0g-Y;lc&#RKVJi#Chm%2X.-3qJo5=Zn&GSC<4MEo3<Rq]"Yn4_gl&/\l@b,MsmkcVqN+/JU9XB&ESQmGndB^E'+BX1)LXqIn'KCl)poR0I/?9_)[dML\O&QiNg6OWrZHinWK,-V4R(:e=F&\6 \-E@@&R\?n:;on`&T1$]5@@%kNhEd`@gJ>X,@)]!e;Pk^"\-:mbU*gdLFt-CmZ@3W_567]2V.I3ntMIYBe<MWtZUs&Epl_jaFM"AKZR$ij1n8t8c>n5YbQVaAX9] rc.Q#K&gC`Q%b`NaH1FAIe]A0%qlG3WhTE13oHrOs0(A,[X/q6G8VAT.%gk>+>;;Y#B1,1?s]sWa`'/gmT_+Dd+lVGCLskmrf;T]FXDjit@=b.`mC(LIiY mK&YFUAi$Ka(,,JTH!)9p^-NkGm1HlQG/"%r@@@c)>]7TDL*G$_1l]&=_('hJ2VpX&NE7$(etCjPf"nHDiN/%>KM\@e/;8C#LJHBX,XKl0$tKE4.:KL<BOq4Y4#A?#<dFXjopW_C7sj:bWOT.gr[AO:p1p7UbRZTeBF)p.M-J%WF60^LO5h'K5A?JcNm_Q?'<$)MSC"EcFE6F;"f>;DHF52IVY-aAmsZFX_&7=_Lr;VDip[U<K/_&q' W%O:,nqc6iN:ARs.*c2$[SAi2 E^_#D/8K\=f-&' N"J]GrRQlR!;\3",\plfO>A2D36E[P=^GPJC(:N_[?qZjrI_)ttY9rF(F9 #h"(<@k%A8%"@ktM.$)>cSd.1KlK7WkQZr[TQ]MK+rZ35A5il]IMnqMJb-U92HRGsAXbF_&[hAV4]<G)teYA(g)eIo0cNJ@RR!#KmAG+=5hK#.cS4[Z@MGqnB j*=YBV1?Q."K7fI[_D\]^'rBM0NTDt$2dk!X: E$=1Urh:0?$7a9r;`pnq7)M^i^+a:UMp.A_2:_V2JV)Hag`R=<rr[`/EEk[B@KP>02&;Vr;l=Z(I4cG*0f/91;r/1d*QRY//(WV J_tY,EY+_9eaYE>@tGN=[ ;K0#E3OeCP1UfSMH14#P;(A&JCod>`_qB9H!kDG,\GPVr.>*OS?  Bg>,m@0r f7&G<qRfoS>XIsjJ?]cK2?/^#)snGVDjB*hmgik?/SpPL<]$*,HkGbmPJa7Z,H<;l_"-//L$Df25%(g +G`X>HiIa<rqa `R$$\ADIUrCd'K>m< [A%dkdB1856D3P#dGQg1?_a[Q8bnRAmskmAkH=p;?WMKgsGB9HB"YDEZN/=Eb[]C&!+Vp@^lGRH1p$G_FG;O:AA*hGoe=4>6:C)ABX5!;/o2M+X? 1t&>8^FmUO\s.UmMO(>a@mR08HJA4)@dEP2mn`/'&+]k!e8L2Q32*'r,Ad;D?ACnc&*o9p@rEV^D7JG$SZLS1i2q51:sgrDtWXI?4Km1c,NHe&P2rUlhea.f"*o+ksBL<_F\pf+sD`n8](PWH$h/G4df`THCo7-6,&4d,C";%*UFQoWOMWrG#T_a+[dQH&[1,\#!Freo'D1">W)d[=bh6YD/@YURLi]GDU0EX1f!Nj(f'^Kea5K1+UO#f1"O!q,hA`]ior/]BZ(iQd#H_2jj.*kcAtq[Bk@hhCpB.Hb+Ud%@A]NBY,^Uh4^m)?2ORP,C$q#pNBkU6NBB#_YV"_,GeZU-'VSU.RRPn5r3'rm3^mXr*&%$"diWq&[RM$G7(+PjT/plUcS\Q*UA-YWn\4;e),MoMYMXV3Q/r$MoHnhrnpqn/)#InrT8:G\JRA>GRfk@t(FTpr!OcjJ<W6GH6DV\Q@M-3ikN51o?RWO'I)$'Kk3A72P.-iZEJsUW3`S=YYth)=,LLDMiWK!EO4R4kT-&*=^AA#ZPi-74k2H=Bo13;m2.%AGP*cH3?9n=2m-"9D$`f5%>_=K8(,`_Fo<gQL%>U[-A<4@Ip3c'M36V+2EmLU/2sb?Ad9];d>t&_NEGhdbs_<#['6:N;tnY%Uk#-8;t;VmFEms(!&07?`?2"0VR"7Xg%/?A;k>A\,(1,Aj,srR9)%qJd<l,g3s<f.K05L`\<$A]4AHnZNJ<\OM.m[LJRU+);H]_']QV(i$oTOfHDaj<BkZH@mGif$*e@knQlPnq%dhS\S!iLoe$&'_(@lK1gR@C<Qjq8>ji<+M*5M+pU9H"eW?3jrE`apa!;qV0UKIT6R?\TY,a3oWBl9k<pXr;.*r&[MmmQAA%$oLI` GEkfsTPN-%5r,"]B=Y[fd39P`"HnPA7lAGsHW@NP&oN!>34;P49"mtO/RK EP6mMmi9HYi%TG$AslA@=M]cN8A'M>#=-[bGdLh9cOXh1.N)?H_kHo4nH\6II>Lq5'VWI&56#\'%qn,L[EGALMb<q#4nB+Nra<c7]O;((3`G=PrQ*=T>lWhAE\]fTQpVrbO'((nJG.^]=bS]'A3_YL^(HM%V-GZS@"B[UtH1\%)-eN9=<NT=e"V$[N-JMAW4*AfN=N&Tjr#5U6;AE0lAA Y:/hmF"&1\/GoZOC@7T'NWX]r_47d6\g_kjXgt#E)E69NBN9%(5X;7n!L*H:.Fc<lU5#,H,gglb>R:Bn6S+se&9M9:gP81lj*h(8RWj`E\Lq"L2$W/rUCCI:Ylb'2XWA?c&=C8_5/P86_fhg05B<=NLpDcR ;rg' SE8'L%mUI-9Xa**8@6'WJ,TTYjO66V`8A#olMgs$2_b".Q23Ai"nNI"4ECibgbEnr$8OD 4]3b=#;A@G-.VYKM"PKmq0P7,-0eSZgl>=Op60pU6=;A%U'LW4 MI;KR#qa;"ILAqjM000Gkq9KkH+;(:*.!c8<1tjH$A=c@Y./ikk)r;ZV*fJ]<(JA$teH/<J04O=0VcRUXfY&Zed]*E(B?4G_2PA9cAUn%U$\j^AJYc1,UA!SkheN.Ve>'Eg"Pfo"+8<H=Fq)Hijb,J@;E\KKOI>`/TB-r(% l2K'E:_Wf;5t!rcUpA%Fr3#5(\+PV0Jl;;Z'+D''@FS%Sr^Pi'=sGE'&;ATs`;GiaSBTjPkdiEce?k/a6+16dKP.?n5h3SX<hbR+Ms>Oa,c@.48=gqN/!sRpJ5G$)J2Y#.8no8d^:fVo\Q30HaWP1+l/]"K.oG;H_4AE,rd]L'G5(:CgAqFsoC:2DtH94<,D\=!CiYV8d0(j9rt-CaR*$#'pL_kZm????VMfoN?tIlB=cJ>HOVH:$]P-1mHoXXZ"I00Wdq5j]E!)&#S9n=%VV67\@B*9?CjlV>AD:J8/o%PG9[lBV'3)!"]%Z-p, -B#[a9MkZNTQ(V>Oh1+L!q6?]jc()`?20W"6I8HA\%4'q9J1S]rAb`Pas.Ts%7lWog6XQ\ *S6dMc,-Rd^C?\@bIFq` 3jO+o,j-29Wp!mN%e3P8_La[A!*UbM+mhJri`a!$#8=\,J.(->;/%$%L<^MRThi"s%oPQ,=?'4\1;SF3b@/$HeC]P&,(^;Sg0V6ZJ<NGL*Q1g/K$?F FF6R0Q1^e]nX3(g08HgECp[HHH*eG;+K>/O%k_qF:H+!q<)b(]3>^VC(@e,FT1l6!j=Q\"rVSSZjR9N]_<\s*@15N#06gt:J%8%4EFbk<e;HkN(H7g7<G?L%R-1Sek()B\\%#E=E>tD1BZWA`RK/l@K43K#1.W:;jQE)cN1Cp@A?@RM;<lHd)q!oq9[W O5UKpC`#88Aja0i.fn">`%>+]5[oDL;N(+Bitm"Vk^6VJB-[rJ#6#GMH*je^)pM).U9lZGQXE?h?jkXoZ .G:T'E<\3#0apXS(#()fj@^0A8l3S"e?r3E/A2]hbSU\Y%JY$<&Zd)"cHs`jNf_8)*K75EGXaG02T4GY42/?X>#HNEHP?]ntgZ'l=I2CTa"TJ#fELgaMTZ@X/0%?s3%c&#Xd"$Zgo[oG?)?TBSoC9^%"\5)4Ccs$l"k(skSq`+C;D9pCgi$PkmfV,sBPInTf[s) #"q=KbP\lE$Et\pfaepfX@<=qHrj1) E!A2W:oFRZUed;2ZF4V9NqmU$>/e$2h]rHJ2bl.1H61)5bZ@H4I/e?T.3cN=YA3<fpigZ"fJ$$!$n&tQ2N:fO'Gi(CHMe0L2H(Q8M"7Q\<#n?gHFi)# Vk+^pp:rfc`cA<l\T_fUFT<P/5+<LMHET\=@SJqE=R@DMj\gjZ51b^=(mNa .:r)YlhTI-/4nnZ^tJ@=LH@SOOAS4.Hq.2 QseF]ZX?R QdN]`TRjeb5p!IUjr(nte;KaZ8Hh=]>qarbFsQY+;Ms_0/m&d$l-.+SaG#GPR0\K/;H/E9S-9llAZ0Ge&3-[X>B3m[^;$r\NlKb=ZgkVl0P?OrNR(F^^NDQgV0\N_K$8h&.1?QAE/ada%f\"NL&VYpfk_Ah%k.g$VrU%15WYcA"bdAKY<KP:fs9a6d<j\XaGI4i,Ui^e,,si>b@*h/<&O5t` 2dUJD@Ai. [nKs:3.U9iKp4ANWJE<Xft@nm/K5opd>MZN8n?CrqG_Cr/q+h#3L%\)HkT6)OP4o([CsG9@L;-0#f8JNGYFc %o]n@IjLJCg>af8B89D0`mmRsTD6QPP'2EUOdV02/23/07NX]dBAC^CHln(7b&DGV4GdSVE>5MT@:g'<(I%E@Eoih:L"D@)2[rg)3@Ctrb'2()Oe1/7MpO"2A!57c_[-A;*]<ab6$hH_FoFW.%r;S>N_"2&;A/k9=!Ha7BYAq+'1(N;t_DA32ml09.J4RXaI4#NK'=8>H2P8g]9g;_#E'H\7R?1'%l3RUl;WT[i>.YkaKU#1IH4?YI,,CLSHVBVr=S@Xk4TA?%K`q#\YjT$Bq:Sb#U;R;%WZANs+Wf/eWHj*.,ZRi>mh7j$[Sn7m8H_&gH+-!e(Z7TLDOqI;pbJYA,aF<Its&KI5Q?;_E@hONF:Pq(rr`smD9UoHEaqdoj7 qm3G(c+P7Qcm[o&:+LC7]p/GXXH80F33??MYp l_8q5s,)VkZeJ?#s)cq(/fAh"g\n`j$jm;afp.&SqD$&]X,EAK^8D[Ag>MX#t*/ASaW#+)o9gr:/]9>XE_es6dO3RUPQC>]HS%)#b_%[Z^g0rm^q5W>[Y424TcF,mi0<'JU3o_eGcqKE%)Vj8+1MM7HFt5.Z2YcZ<f\ieZ*tWeh5POX]M69eib0s`T`VN-g-)Ai3ac8F%qN(+o Mt0:fTXRJXA'X,45k',JrAi_\RB-mVAS 4/a7A'm/ A./Q$bc/0JQIbsR"9&MEdBsH4IopG:mWt!N`-8kbFXdMNB(6?!^ag!hF"&,*oe@?9ttjm9i&DI2a#()7K>?WW4W*m$U@(O1-"imf_%1kgAVHRJ(:GptA!1id\<;'3mV][Q,bqEk%8 L/[KbsDk\h`pirL5\pp/?to+NK=Q= .s[OHR. 3>*\Yt4=6G5&2U1Oqq+bm8dZEH.Qd_Gc)(Ze;&Zn?<R^f<=.mf-Db]q,dKNX;2F5/dde2"'/QQFLe+qDI`#kP;KiWnG !J:r^IAU/Pcr3aPf:0Zr$Gl']d2&VGS;W1&4-!PaKF]B&RZ#c8tQ>:jC(3^2-W6*m*5:GrPS(JE&#MDIL(Td&3nrl-ge87?mtq )"(3$r_JCWcj'VRmd-24+.j3I4&X5fMb[sntUDWci$*K 3U:[]/YZLn08 %hb6GRFRW2U(p56 /Nq-Z]3r81kDG#p+E'T-Hhq$"5G"e)irQd0J9"&QB`ZffD&5!e]$];IfWsQU?gLS+9U08.4XroA+;s?0-k2d2R8`.m/UlS])l7$J)j`1:I-tffj$DSX)t#U;KkO/P'0^o@Sj+MO^lF!tAs<D\^<;A6`-An;39rpZW#02dl`9^4A^s5A3X(UFH@[Bs1D+V[bg2VIfWcb^@Ltj#OP MW[7DmH41Z$0CIpW%j4eU0A3PgqetLjC,=sD)sYWS#Qg&Hgd4j$oG>>m&%XS'RYdC4Z<1_FY'[H".-;[&NP^ ]2nHOm!^RO:V9GEm'8si<nHJf(X$e8+JhD8+U/"^,CsYRf(!P'ZA$mgPZ:#dg=+gB&<mS>fqpcZ>l6Z1OiJkCGse2B!IL7\YP'cJ_6!CEg.J)fCi <5.UfrRlDY'&""3e`O"?sD7e:YrWai>;a_Bn#fZF)#8h_G5$Bisde?Jomn^@o'1>5LO`NbpW6?NJ!S5'iVQqi1862S_)o7><'O+ p4$/d!dZR(.&iplrrV7Kr@Vi ^G8?P8sn)XZ4<T^qU .j6tQ@a'n(%*?Grc[>)1L`^F/K1Q?`'8lA;DLDDmp5VMbpi+7GOd2$:pD<6%AM(Mh0kg?jD?NBSFW,6L&[PXNo$YgY//Ac[2+A7X7R7ZP#1')sAEN]_A2[K:M9h9lVE#\=;+Do&cnCp?3"'S$0YsF!7Dt><V:!X%lT?orUNR)A4#N:Wr'FMKc7_fD(e1g_hkV^'-<cX'9P=H+a'CV=p/?KR]ADqm->Sa_MIp4BAFOHZBni2f$(1jQ1@C#no]N#j)LV*p6MT_)Ze9rF9ql89QBsGj2>3%2<_@#.W(\G+mcm'+[q'*Oc4a%E/A]MSncGaf;B(',QCGP2eL@%7D+g0%!FSLl]]L*.F;o)qZ@63<&F\).cDE<&2e -(5O<;2G8X5e[j:U]G-5U.WM)5#40mQAUSr)E":a*l7I&W[@*ZlT1ES<N^/Es:#1K"&\+SA@Z=nK.1"Gij5<9$]Kfdf0UHC-!k*_Z;Gjg=V>J3OQZ1@1cL*Ac<]MJd<hGM;pl6A]T\\bX7( Lj',)2ttD'qk!Pk9D:i^GOp`)eUE.KKsD)H]cBi5YJh<itnm&TS2d(/jQLQ+d*DRNs$!LanT  P"$'A R]pOo-3I/#?CC4GPR5GJsO0ijDn8g97>+%.USp+(=CAcI)_hnADV^s-GW6Fm+r,?$S28i^PJ'GsY<>)7<+b$PWq c'jtT#nr#AXVBFX,pcM`%OeGKWT7/!SqaqH9@X57F#JI*FKA_/1:*s&H#XO;-sOOS&G0i`]P"^1jo5W6>$%Bs:HgZ.9,WK3DcQ`W,aP+4:s5AWjM35_1)dks,7`f-VJOsE;D:c)H=7FZ/VC,t'csDfF%/Fp*T<ZFj?a(oN^hn:]s)#:OGSs*$2S$;,>?@ie*VKUWkGTsa<9Ud,l;kR>/7jjs-eH4^j=VF.l@Tl7a,ApLb<HTN>j,?PmL*t37#R)TVNE>9'LjZl^+@ha;WR'X9FjOU*Ot?k+#YE:7G>nW+<AT'8p!TUpZ,+]V>rDPR(L9MJY>ap?'1 A\EZK$bliYn&5&oT<Yl>P!D!K%BS[U!j;b0pY2 L*gBC<m%C\(L=IkV756`'1SsB(H!-k3 *5Cp_??X"'pC+0rtdLQ*BSAV@Bt[tcP?DWT5b9M9X&04:dj]&+9"Z*Y](68fF-,%BL5#V6tDV%:U!+r#q<#KG?*j,nJD\i[d/hM)>lCjlt*&<1Ko`FXB\:*ZbD]9K#`-YKZ&+g5h)F/^=97iE!ApL^rdR?1".l(e)t!h<0lg^;6!4@#1jn\Q<^$LBt\;  8`9V<`g4LC(so`<% sAY*a.P'Y=M>q-C?;k!?knDk!pZ%%$5K`XW,b8H1k9,)sEbWjs4*AC;!&H6fafG:Al,0c,eWQ4>,lDnFSf.DK;*`F7`b$WmhO_ZdOa01Uo#A?,RVD=,@H-%;/\CBD/-Jb")HH!(0XgSnIhK[%b5-/h^?Wq0Q'n!d.NHig]\K`5c"X^b"h&t,Pg- W"[X(gqPQF4W/$GtB;%AJ6[#VaHTgV8HNNV2^rKR%R_"MU:Y+-Ms4nB,1#ArtADMSY=\dYXe<h>Q])g\M!2cJg#SPCHqh711ObJk&HV"qQ^od<LfK@:FV'm;$t^k:qE>*Jh2f/+m9;\Q"@IG$0JE/Sis9XS_2T!;K [nL+VWJG!s0&:@klHsX@(f*4R/`sPYR'>8og.[LK-Bb cq+Zt6JFbdaM\n;o,ohqe<mOA$3@5dfO-)pI,e#[QOo.k"Wl2h[Q'jk0[:4p!LVWA.+l\,1cRTL.oogstQ#6:.1=.4)9q92c1]LcaJ-K'n=W!^^Mpg`5/<(6>1r 2p&'i)&bEGE.ftQfZIsRh\C4H=>3M]d5@c+3?F`]MNZeKh:/[(JWbXNa]q`?F4jl8OJEQFI\>#B-t1r?1m_[I= 5:BHcP@3$d+aG"8;Mg#TL4.lT2A"pmGo@a"&nbID9)DCW_SVJeZBCClsPG/03gddPG7D@!A\Y&j5s93d:6kT'LX$Yt7gEP_L?(p.#r=*b-'sc@SE6F[(>4nn(ZoC<8AJ*k@(Ua(mCNt,N@b>qm,o^LQ9W&V^9A5I%Qh;RJ\E+\BQ%IoG(!3GO>l>CAi(a%mmB*\4V9(lAI2J%?7dS^3/b0--0g25foi="!]`r3B`ist5eWDsqF'GNI9q%#TmbQf.L#ReE<pcfAT,ODsQ[38^4 dQC[NUX3P9ZeL!J]asH>ont*o+oebK/OH=B+;*tiAsbQsnUFeLIQ?j]kr1rjM2TWiEbkmP1]eKPmR9/A]t!Te=W<JgQ4Q0/b`'Z2dTo?)_"hmsS0EXLo; Y98sYA;nh[NAKRd;0gbtB/A43^d6"IoRjt;5&j>>9A57.dQP^mn2Vob'5jY,5orT*p5*`'@'^GEG[C`A-saa<Y)pjZ"O++=,Y?BH_sB-[>a#cQUo^#>i+4H6<B!1QeG]PBn;8jSNcNlROM#Q2VJGS)E\":::,=0,SBCbO6QMNB.6FlAD"Bc!&H]M=f/>tEkHi4@6(]#W.HQT0FQ0IL%=-(!r9^!k2MWXTN-dt^BU/'"0f4tZ>gQ^4/6>3:kb!8h <NN2@l!ke8s,L*i$bemn\bX?h7&Hl(1ZC>gZ#`[kD7QBPK8AH9/loBF`d+&(.72#=JC!egf 95%>9#IF]0.T-&Y%IUD+tLR1:C\&R!rZ9Uf2>LjOMn%Xn; YMQ5C[f(UhZ3nAb,CO7LW7$^!o,bB5VRhSt"i4i::S=7J5X[ .Rp!-E5#FQN;+qI]7,R(:SX1%qCWK,m\9="cesMOMHp$pS1T0 \C^,?Jt;_Mm4]=i1X<C/\!][_>CD!spSN?1KInk`qiFiWF.]NS(HN]_bh:VgO7'\etNMBrm,/4>&o`W+tf4 -FUmm5\r3[ALm9[dUJ').^8);Y^`3P'A^iVS<t ^r_.4hA5Gb3L"oK2-X0CK?l0&@(O\_RZ+@__a_MCgFHIh>jaXGN/sQhOITrQaU"p&6?$4!q6CE)>Glm2pGe-)AX@4V,XDT $`)7npm8=e%*!>2FF^aXQYV_[2d5C7I_-[j]DE^\J#Q&AMW!$W]G3+N,.\?h>%o0s0o\gqVHL:%N]iL=]9,j(m/Ap5moYVK^T"Sc(\^qk1pe33N=`j(<*V:b,(#h;6rS8iq1A)RBW@!G7O&V]l9?B;Mm+8<-fWe]Zo5d%VXW\h9a4%4o`NKq0B/n<Qqg>]i1@%9GQ^2^Q5k*.$=LJQ8kXREg(8RK8c#.^FYVT@_$.N,j'sOiLfPX4ePXH;)c3jd-[9Te2f;dqhqs=S1Xcl&KmS%I(;Nrp,*7Z58NA/4OXlQ?[%3.2co(`#KCT;3SJejOR')!?]^^1^-UYY/.8 823gU]>h=#BjfAn=*&mDcSKpPsL(G4KgA.5%@acdimWp?ef:eiQ*@X)Z)MZ-pe72V)P]e l-A!=/Z<%80KA5*$0F@:Tt%qDNs4^e.Z]CFA^S"JhaB7r?A-n=otK*Vi#)<58lo/R4l).aDAeLSFsno`1Z#P3t2GC3FAal78/hoK,>'/'Z>h`:8[-`^7G-#Q7d.Vp.O^b-\#E:q86[UP@32gn-R4$-dN'48YiPh=Wj/H.K[^>0e],)7j6tnNq=G9(lHZd=*24"@WP!\.rC_R3V>W f0CF;SKZTE1ZgZc?$p&`.eE]:5^?<`]T")RtQJiXFI%U$Y]+cc[ 33SM^_RGC<;&B^XnoL>"4g)6RaHjX:)!P%EA_91FCViV<?f`Mp+$9Q'@=01Ct*#SLa.Nm3aJ#E/iHq1+[%F0!VN91T[02@_*^D^ng:#qUmANIgE7S)_,@0RGs^n$asRjL"/[_dQ;WB(D@i]m0lDbUF4HEd4pkdbEO-I@W&S0#J-G;R%`S-!BSR3'MP*H(Y]#kOg_4<>7)"lR3HYXWpet\#r>+f`X&g[tAHI9iZ+RB*!4/jP:Wn?`*A5Y$b!eWe_[Z$M\<`@OQm4&(RWRNg4<Q-K5Xk67/??eG+CsL^`(#GWOqedc:](f HK9J\R>@k^D(KhZ.*?K#=On8L5sGd?::5C67$Aa[.t)9S\1"(id(R)-O5 GIcCO;05;l4;MQ4)K^-i;UHS ^Y2]!ac8r=K9#i0(YVENL_>WO`dk'?[m0AQOSqX_UDGWBkhS,.@W)J6s0aAdj3Tk>I' $ghFc!V[+,SS!:sAIg*lao;?1Ule(YLR*M5hgS0sRY-ULD%8"6"Y*CFA(V6hW%KE\=V2D%:,63r^g08WZV:OD'?-pdNq7Xs i$aSa^,'saNZ.?M+g!YO(Qs]>N?IUY?TSA)=5QmS!@%U6W!MQ#Q3rS[tnb._ab&BM,OnhAE$QlJXT*H3MH$jKa.1sV+aEY5p:[5LgFE[+n01)l/B6iA'4kB6NV!#.,^LAK]r9Yi?mG?VoM9RkW6j96cnWCLG`4EGS+b)'_!4tAfYJN)H,'[:^'7G02\eEZVFkL^b1C5V<N/QILA0JP4N5e7[lRZ!jLY4#!>W5%-HC5 Rb8"E%5!fJ#@l51_4a+j>&\36@h;AW;GpNcg)QVBS:SJn.CE*(5p'FqT_fe$8=ILSaA]KqU-'S-51i*AQU5d\m<5'VZt_nJFYYPa$h1N<aOY7$A=DAQ`pQ>WY^[2Pn8rm[^A$K5!86p8g@_m@i6oAIE4ScABg2^@1#5c&V$G%b6qXMEWFP=hZ1I4@2,Z9OE6AjkPDrgAWXS!Qk]M<*%H0ck/5Q\[B/%)R`\]%?/8K-\VM3NF$W1s@Dp=6+bk#H2KV`b989I@RUiQ2[\5LJkcA#MD/ah$-g4GFC2&/>F][OmO0nT'ATVfl+JtK?5OeOlmXOsmW_]2:a-*#!EDefH(RA&d3hBVH51b`JNNcZjFmZW8LhhCfRdsX_;[LPk0^Amr'Y]MaFM>&"Ah@5oag&U>0sgOh4mqCk96t-5CDAAIrtBjMt6!'K(j/!Y (h/'9m3N8:?B".D<&37kX3Vd?kdMId!r2N VO :#/M9pebF+)s8_`k-jZ KhD<2Qi; ]"&.X8Yh=`_iN84L^>.,okJOs#e\:V9)_As;W7&fsW\9M9A?bpeq4[BAJj)^GanF;dRBATt!MoskAbDbjmkS7)(,J#<3n-$hfAM lP\F0Go-'khS`KU^>%d>-ROR%t?Jst90nsR3`9OO+mE6X\japPR"54,(bhMjc$";$h-e_H5=tp,[YicH^1Y`<AXO_"0M`/Mda?f4A2.oYt0CKf]+2D$h@C<>#K5%o%)D>QD:  scf`r2dgX">cHlTH:>O@ZHq=r1MS^$gGN'Q'_MHD5n%7m9At6WGIAXS'%V*C%I(Hc@=aKUe]sWo LZG9-<nfnSKC.$E-4H RAi9N)C]jMJ]]n713<10-g^4@GNOiKK'M;3dM%Zgns9dALe!S_;b-N7\_V6-b ]Z:=+idPs8X.[G/6NRd/i$760^(&)m8U:sq/4TX#[Xf(<Oc[[%aip8`-\7$[o[(]V]AKF[4"T*h#4KapX^t(X(<?A./%YC)@F66Q&R<U7kl"G;"OCU:G;5bYh(Y31:N=F\]eaJBW0&'U3t3N+=b$$4Q*i`%Q?GA6Q qf0#0,: blqbON(T+J0$D<aD?;-%#`'Qc+bh.7ppWMO@a#^$l^+#Jq[B7"kYcrpXURd#0tV!KA^@bn@'!nEW=i&o .-Hm[.B:Y&q:dT+Jd)4!/:Hqa1R;P"q- s/a8#:BWc$^ VO2_\aXPY/p[<@oIPZ.m_&A+>?USAg,kG:bZNZ\<GShNa<t)&ROnCA1S )RZ4@VQ<`6'(RA>Q2Z@-aM^l5CFCQKa%(M=APehR>Q;-8-0P/!-'q 3#apC":a*D'E*a9[*!Q^"l-Bf=@@Llb6AtdX@D]E)T@rUe^+CUqm:o'']Tc>hSVnI0*mTmJ@r5`k9@Fk`R)2 ?`!FIV[Km(<W:a`+GdRnS^/q]o<>Ps)aAdN?*,Qk!,[^72@"0Fc!?'pr2jVr;W,Sc6>C*Ttp33,+@[p,/0^N]>b")p*3^c:p1UULHW'`Be"Q]%:>>!lJb:/f`j-Gisba?UA#G)FA^tE[=KQFMt-s-NVI\03s>h;`@E-D6t_6L @ Hq(Yf[.O(@;"<em,i[#LGKDo>R2i)7l/>4_`_NaT19i26)Jr_+d_$s X:\&;O%@[( :%Z-,$!9Ee@r5MHL6G8&?^!Adb]e@oD+9ghjPrk8-A&1KfVNA+1Yf1;04dqhb-<>5[#LJU#;t1K/!e;]$c_eMG0s+F%Y'6$A/5GJ;U])jA/JK4Si03G$`^7<RL&jVLMFT<-ZXTDbDX3ld1)J1Cbhh^/B\_ErP#'X)h#l2f4M6tND7o2H I1,[N6(Tp3Z=eZOL(CjFp$:kqAnLJ9,LGG>A0J@g10a;Z*B3IjG4@@R$G&oYI0W=!m6CaZ96WZSC7"=QM=FKpoGA)m`Md7:.amAMa5BToXgk@PAYXElF[]A@U\>AAU]t>MALHRkqRZ<(*o8coNQks[i(rlXl1>/#Nn2`GSaAE=ab>6Rt_` +=p'e9f#9QVU[#UgLV7IM*&6lRf8(.'-DNK>ZTA;/hkV`1&k+A8n9!6JO(3fb_;O3.[ QX4+o^09RMK0-'bi']\J\g+#04UmYei)g]%=#-ZG&PYQbY34(8D$6Q!PKT$+U2CrpR"[K6,\XAL'El-h&TJ%VJ'-qsL=qVdGNJo`V0bn[)oeX7S"qqLl50T=h*(d]Y<Y8VSSdiF5RH('QbC$Bf=O"D67NT7305XY\ h:D3X,]a0/V\MCg7T)g1dsbWM0GtOtk,;$3+aorA_K?1fY$qajObBWn#$1s?ht@SKDj"mt5niIY 3Y-o/CU)9sWM]_G.8pk?P"3S:B<@$PdEFpqD;#,+:T[K7"'ABO%bFMoe)8H!AK]$$)ljk)'pMCI=)J'mo!5>o@@C)+)(6QDJ_[O&I=[:(O6ABIPAg"jFg+gm0=kl,GMtT<#L@<Q]7Dr\'SEZo> !sjU%S85O.iG`1SptR!Bks[AQ':h,\FM_#p69ZCI:VR6M1M/?ASYkm \ng;kEZ"<+31"U@(`*ar^?jd&M5Aie`T.RNs2$H;0BZqep.33K $CsM-4)9Af16*)TE>rZ+5b0;-"' &edIKN(`N7A8)8qsrW>)_s%d4f3o)lsG9jMBG ;1nS^#QE#YkA?,R6R>JfK=%&j]0*(&6p3icNN$h.:P1_t=!00#0l!SXlAPP?>03)\99)GUBkPlAWf`6&?@!l.BZqq8D n6sCeXg%M:EK2`%m1C$_kF+PW%IS@.%9s/CF(Og_:hT5 MJR6W<-:S99' d!:PgHA=.RG6-bUAcd1.5eY7&g]Ca1,;U?XLD]J)\l\rkG=Q4ML#9;HD$> i:;On'i:k4G:%F_6i[FG'2V<>k?^b6.lF;leO!G<Z-8O#6C._b409et0Kj5+R'>N>`T]$\mH.J9*mdF^0kNC:24 !5(S<Yp-EAT\_FUF(H n0ZD*M#Z' /.kCt)0V7Z/c]O3Uc&s:3r9hrb;Z,1@i]1SXUBH31IG0TC*=j!LiqoAPG;seVIL,qk`!TUl6f`n=R]YGQA>@X[AJ7 Q2h[K:1^Ab*J"56tm<F/d)&@'B/5l_TWcaVrsH@:fBV ?@,k*A[A$;=.%cI/LjP`%q2ERL,Y&"MX&tP"PP]H7M]Sq92W1r4A>B%7$Rln"q&[h&=,BYh_At:?asAr#-fM)n]lK@%lA)h=%.%-X+LM8b=K9`6&<oH-4n=2MKM.GK,2-<i:#U3cl$9`$j?go'q<H1PirCsT]NA"&O!@./N\S71$T?<Gg@Z[pLsh6fn/46s?PZGT&  sVC/ln3O'Y@ Cq3jnT,3RP^+UQVLUWcL,mQhb2CMk?X3dHB$W4$0:,e89_Ith/_BH!!e_+BZVLE_CaN(%+#)2TTG'XgiVDP78AX*qS]'XFs"_Cs.lJp%J.7)dI?MIQHhBI#!]`cW-*bXoR[Z%d^/;3p! Rkhk:h70^!LF>;$0<iZm8b@KX)5^%hN2fC6'"Hs+e2ij=`;RHdXch*aMf*;;X-Fs$_)C$+*Gbb5rYOtpcOX0("e=j[O$D4P[Z=) q?4;)&l]I; B,=gj=/DlS(7tBs(a\dlM6E=[" A&/G^`")L*M^Gt=m"/CSJkht5$eY4I/(Ar+-gAqSD9Er<o8r;hYj#@rA:H*W@SAi-ApE-;K.V&Sc;LC?1Tq[0%@Aagb11@</ lQ0_2aaXcQf?0aT,;Y$iqdWe`>D,5X$QHS`hNP3m9tZN=s#:5kd6!Tc=&LW7br/$?6YN@L0E4h/Y&:DeAB64,D!osg%\Ij0A W(0A:QVGh*ne^n$m%K0tPW969Gqf85jH[*=2;8V%gpHV3KH'bqZj#i]QImc!'!rKVP5AP6l1K9^T:&DmQla*='?X1SgmA>;@'>K!n7mL%]]L+L_)NE*c36S6pRk.&P,#AFhISXq1d:(E@)ltEIq2Ik&KN[.<r%g]Q\-FiNh/1'$/EfYT1P5n=#9[AEJZK;pMZ$D<gj;F[!q^MOT\MIjM9JWkR8Qt0(4flaaE.t\Or^!pR*hS?AP(IG-H--$XOm?TE<H7V_UWJA@aEX`I3'H)@b!#obCq`4b!8_XIMlshUMAW*R'+,M9mfI)jF[1H"dA^AJM]#fXN$Af3F6"a9]`.!FW8h<gk;nliho6mfgYmBs`(r=@9Bl%&/STZhMnNkqA4jX=LjB5aN;a:(pU[@e>Ln(AH\qqX/[87\(P]rM3Q[F 8hE:![%M/*2V3U.Ncn$8eT20-@NA*RNt%p_-"W]/fsC(Nop\Am"f.VW[0,jQfLCPm#XBBE2U^sV]<H9&NTO0^gG8 M$'Lb2XLO,+Lc7+pgAVV\NO/=@:$5ACqg3$_krG^q:(KJ;bRD-[`/`$%P,ipR*5Ns+f@SdO?o41O6gPbKd33[NQH%;0S;d(-R@ P<_;.> GasoT^9Q (HDl.MMKbU,fNR>.[N28U;d>Q<NcS*c!1Aka&Y@rX0abd\tHKDEVgKo5dBfS#qgsSll_;)EFE.c(/0s+?<0,m^76Keqie5]oH(KXX6NsbAlBUleIO?J6?a,$:j$!j6"HNP&$K\o\AfDK1\F@Ie,"AdK7=Jg16bo*#^KIm$`A],19K@*J3_b.j:Db 1 QAn<Lg-ADSI12S;,7F,EX#/b/D(',e8=sCVAP)nVQX2(Z2o5K<g"Kj1]I4A6[d7M+!*_gP@gog$X7e*3:2h'gMc3K;0>Heeg?RrMJ5diRlXW4OdOO'Y<sO33AnBs2ldD1]`67W _`^/#+5(aC4YCA@lh?& ^``Ab<kk=apN&1l!:1YgajhalDN)Ek>SL[Tj0KE2k1A;g,+3da@N>X&iR*DVB$hgOrB.0q%iA[/LU!#f-s.1gh/nd,8cK<B0p-.p')jE[X`VR6-2V9*0":](e<g6fHl76l[9+TFDN5P=t;Qk?(tl^XccUe)QT'4^51$*NRh#iW@OC>*6!p7sFhgmi0a*q-g(t]^eOQCrO"-[SH0#0o0Z^pJZXOO`Z9EaV`C?^m#Iog#sYVgm-hfNIBGUR3tebZtKE'aN#Mr<QcE@'<M_<Q/Lr"oCL&>Qo6<1k,s(8#k-p4spe`o#\eUG?<n0!i@$F]M#.N.AVCZ.],UR--X-C`,@C"PUjhK(Q0N[T3k]79*lSX;DFl >- .;tNl>H9tlP)mr3Z$!'o+jX95fDM[t2"1G?ADrG9j7EM0<l/R1G]jl!>,;WB-FpEm$sE[&"::Pnqq3/@H(,bW,f/KJZimq<PPgt_AK\bmbD&DVao2:fV$Bdk3t.K5N\&h:?=sh33e\R2b=9lFS.P10#K.k(VeKncUo%/:m[L3f+)&8,fcf_k<9R[*K=[Q<nX,^UYSeUOG1dTB2,)-ne.LXMrB9#N<smqQ),mFIpR`gC6ct73F)hpY8N.bhVGhfXb&DJR9b-[" \sg`Dqf4a`mH7c1em3m<1Pl4KX9_`cg)b8jcVAXDIQ]dtH<`3^n\?M?,e)]P+RV4`A*CAN*tdEE2^ffnrZgAKXb"'@K'J dX?)Ka 2.>(WYlJ"eRMO`TXOpSYM6-Ob&d6S+Sf&+!Yg]4+A=qH5S)3d7*\mdiP?Amt?[ANU>OW#L8teRfSZ[^67<[f: _tX'AEHnAE[W.oCg8a$Pt_g`'hUe9L]R)93.*,M<X0R&t@#q\qkOhcn23?on@ZHTU-;\qS@%<;P:?h>W!-2]Z`.hh(;BT)2V/2m(LY&)<Kr;3*d5[DL&?fV[n-OCocEE;@Gs'd_.%,sI__2VpRl*CpYCnVl0l7O4V["'L><[1I">#]<<LW]%L?,[pOC<!` JKt*S65UOW;AVD#.KbR[<q!D&>[nSVMi+"9Nf1>_(KKoG<m^A[ATiW!kZ)F+$1Hoj`EBRBOaE;T>9mT&s&_k\g7%o,qZJDOtRAl[#@\AI&^*<VAE49^JjXON0(7pqP%`,!Pbo>fm!No$e7*4r+i^E^T<k>s/=osO'Q.<t')"5*1/D3Ng+c!^PeoFI?PIX'k1!?]NeMC#AGT"GqEZBf'HB2U)?#j<$%WZcl389349(Rin)]>`OSr_Y,L0TdQO!Gt#A)+2t^X?Li6\p+PB-4A(eI4$G<G2KGQCFMGB`8ae;T.@a?S>BrC47;&FdG2AB.#Mf-_fP#W@ro;\jcEr0/ct12[+N%<TWbK ,n &m!ZBKtjXU]f?C7\lVc[Q/PRQD#jDUU6<\@9Do%9S'n1cC3E#<*&^lW:VMc7%mA>="D"UF\qhD3nJCm ptMaZ!-bS2:;0P>&[T]k]a$b-3XT\-'P=KL]A%Xh70nX:Uaff`i#Pgf,edW'_GKg<gcCaiTOTpt/Te5T81ALM\++"$7jOIBZ@GZ@J1@d_UKg0o?[&'U:Y@4D")CUMik)6'Ob="2E@5\8mOs;$4SB/49L+hD)J;m_dSZl0.JB`=tM^bg4m?VN.n5o&bh@hR1&.2LP$Lb?gRk "c?"[gQ/f(d+tJO9O flH_$\84^ +T&5TFSD9B#ae>7)g5gYt_,9\L]>go,jr!tgjAOlRTI"3DnSX(mtXKlKgA9bD35!>>*1_6/"@)NkVS%mKFfHmqZ7`.LBR;,p=_Q^D63lt\Z("HjpNb\4^0<m%JoPS:ST9?D9YciCmG2iS5Zg<02U*Ai?nLJ76">-<L9\PU Pk?V7NZ'd]t-AZ'dlO+>U0cth)+ZG"<2g;Xc.n]L$btADVq.e"VWSL1rV>=4C=0;o8:9to5FBNO,nAm^Dh3fR1j,!M2*\*;0e(C_i,km\PAn/fJ_^5nfOMk<WIBXL);:7L3SGnnT5S]U%jo"U-,@kPHO$HN]/q:QQ:2X\Y[VBACe&4[=*FT+)Q,Vsb`6obVQ*$r$[@t-p?"I !6g6aJ% `Z@4Goep^#q!_]F?P%>8=>,d;!ZB(#TKmVTo>;U\Ckq5Y>[/b-JPf7L@tL#d6'&NBV)f],\5I-S:#ASCGF!XI3BH^=breg"m8lAAbV0k_BSC2BZQ:U5"C0Jo1GGd`1gTH&N,IhHff.UnfZTB_WfS2Zn.Gqlnj7sZS?L%R_S8m+P)f#BZ,.]jj9;Al#9I";l:4Z2-IMIZl\]4KFmiq=(t1N&BA8L[H0r[Wl; ++?AU3o.A6T)cjD()7_99M"mVd.S#"LIoicsog^EW&k?V*X_Y?j-)4AT?Jr^58IJ:!p2G/q;iNrQXTMOF=JOBfL:ZBlh"L0"PTK5Q`1p\sK0Ne9Jn[I@ P&/e<:,>J<j%XEU<-@Xbc:f(qMJ 1BQFad,.,p44t5/5r)Vq=c5(>!0k0iVd( -oVhY$&PS(>6#6D<Lr(322AeR`1Z*?O7WNHJW&_CHKa<5*h9@tn$GWmW1 F*:'A`,%sni`_trc;RL-M0soZs9N1oo7X%'D;G$;AeTt(5dm96<24-35clAp1</D+DBm6`#n>0R?@"/G;cdJJZ'kpIYjE0^W;XF)@6)ebFR5 D$<WN,Fch3(1j? <0DW?a674XLcm/TTJkg(ScO[?2nlkA6iin !4D"iL!/W.>i_h?4'XgH)kK9].eX,I+<KN?b#7CWVi/8a4R/a(4n>d 5@IUTA#m/_>^LdkKsCGg:6%?`CO-*2AhY=cAfQE:jq/LUfKS%Oe_%Ac2571+OS^:q\+JMM#DTiK$?qs2qAD?fp6Q'!4H*<07)Q`Qq/1++o)VCNW ";j<S&+-[ois:kC)MW,Ba'35Yoh1hR#"JlLPpPTXZ@Mr:fW:<S[T)tin[%D6V*KYcI=!;Y]AKQ&o]%Hl(s(E*6A#q@%S8Wdt]Ff\q9p6FEa\^GSZQ dSk`aRn!` :,\='EGl/n[+,6BK3J9OZWbV'q9&1'3;FmW`QK'?%mGGOIkX]F6\jDnTN!n=:2WnA%$?J]#P(@M97go#PRJI<sj3/aQqmt$Hi!!qKr6;?`3XP0bek6AK=q\$I,&^sNafelHaHCA]U?rX8)'P2qYUC:#I7>PMcajPaiR,"+a_3$A%YA]j5SdVR**sM9K!(V/G!#>0_l/:*MEAiXIPP&bs6%ea@Z=[;l`TqM1!o+<ca09%AQA dOF:LBY.8W=lki`rAUP0!<bk[LLFREC9P!dq`(=$lVl=Ct\Z$'lHT`EADl)FZDG('S/NZ4J#+<W5t3' VakY/I9^0;R?cc`qQdq*?C9R+3YfPmG_3;O.3QSPA2'q-8HL(m"RgelV)RW@H2!3ZrmX3h0d$<*Yq_P(sD:i,NftXn^C90g9BR!P\)&qNYte7U^BNWS;D6/15o&qtTG-H&bfsc2[oDAV\5N5RKA&[[>7mHg-YD?3m%1GS00r<P5t_Y3+jVHRR-*RLOCFA:="StiBqqsoI0fGl!CV]PeACc.%SA"gn%jmS/Fmiqq&3=X!NUBc]E%k!5nZaJ)VVX;9B+pI2fO7Fk!"Afkr05Bg&-_A<J9DYp'o8`oHdc;>;IIc<NalI7=M'^EXK Dc&\Q"P \29;7n1G`QS6=n].\*o1t(,">QW1EAK83'=;% a.Q8sA.fk2.Qcdm`,g=$CbdABJ)[&F*n')mgl`Hst,f?QW"`rU(a9([[\2Ikeb&,_"i_"r!E5\rhS6>^EK@J'=L"Lk\"A,VSWrSAki;r<'_nj>'B4%O*TQo<CR6.RE .0IUP_ngW5TZoJ]1dIAFPpgq.m)3Sm,T/j)kVtaIA7DDeF09<\^g1HsjB3jQ!3o+M$:I#"$Y)mBBNJo#Ks#],Yo4AW,l4F1rIegpDd,R<Dd<+R]d.^aApA^WoV,H(DCGGIU$II,4(A5fP;VVbG$C(Yr4$/3ic,QRIZQV`ak,"Rf_N0r))A0T)nAVr#`t$U7l&RdH,pG8g+$L#q_ 'bjF!j<s%^s$_6sII<2)F*`C=LJUsAq"rZC8%'?$DnS%Uj@Q!T*N60URM'o"ZGssBF\@aiQJpc)CM5cgc)O6Sih<!:g<7:gFgTA2bS .')se-8*&EA_ r.l&[0S/XJ\as=/KfV60Ai%,2!O@S5dF@*>1D<"\8 A J;Db)5VXGp.JEDoU&cCGK8:DqAh'DJ-@Q@9QmL9"3Vb6-P!IVl_aXc<WJ@K?a$.4=;K]6t5sE&ePgVc"+6(Wa?1W]]Q?,0/,l\AAA'$XK2,FZ(;ORP5jJ`<I+c9#A`RAKi\Y W9&Z&Ck[XP:[pf3'1Ls]#ALd.C7c nQG" >_Y`6,d<g2L/2j9ZCN@3Rg?UXpDCot0O$Dhnc6P.RX(e.#4_c>4W1#9P-[O^$-$XpP3a5TP9QapQA.3Y,>S\9VE-s%!'-:J3[>@E&RU!B^l`5[/]5NF#E)KN3iJDVli&(^APc=W;C\ts_-A DQI1G&DRaQ["gUk'[.-qf<_?7:DTEOcsA"WZVI!1DA?6jC*3"KF1dik<9ZsM!*!sIW9A]?1A3H<dr8t,XqXSo$;"tT2btf;pV#F $mi@U_#S!/F*)k1_.j090<s2%C gW+THP=kikhtj*?4nO(^_,-jl :[lbl_SlAHn]Xe[A*bGOM>`Ggn."5\R"m^cEO0nP`UXAT^:r@A=GtfQsYK QCrXn=HkrAMhY24e-qFNG/N(E?tFiAS>++fFKXh_MKkGI$_#BhA]fm7W7nL4b-3>dsLfceR=.-644QJa@%SREh[fB%lQ L2203F:Z[re0bL*j_M<nstP<:#;\VKQGLs'U:K]h&N[Vb4kQ&UYN,-D.[0S_cUkC4G1f70%nI%T&[rTG`WL'E`j:Tk.>qpjB.=$=)1F6rVgEehOOA?,8Q/AL@^'f.=D&;AFKtF1!qa38E_e4EY.Ul/.qMA-^'-L;nZ2+k<Pt&-M8'1HpRR De9',k/MIA?q;n:qXk!5bNdlG;"YX@G20Q.(djFLeZR@F)_\9*_BCp.i6smQsIXE>_!s-*Z\f?P&D9e`+T?U;X%EpP)+m_e`WaKK*S:f%.L[9Fsng43neJVmSA5VhlEhYV<4Qk/0$?7?!OIH7/%T+k;h.d?e;,Hp,FMVjL*]ZBAjbHL8e<:)#m HZC<]:3D\!gsl P%%k+#`ep.P@V8@FE;89DnX/oW*/Qleh*]5O=#eQ,h)6^XMds1%Sc?s*DXTj/F" cJ=\WI%pj;dpFa2Dbs0!g-JcCql!(t61`Lgg s3U-4*:f)@SG&dS.P5i$9[T/^ATKIBllJS$Z9-_!Y7[/`<Ao\GJT>QWbd]X9K274$"[1K.<(j;7 h=::Sm_hEn>l;kaZAnVtEV/C*f^n&At1FDY4nqJ=f=( Jnro]cA&UqH`M+Id@9h7ABJ)2&6%@#bbRJG5reK9l3ndDB]KoBnB;)/t 7QPTpk@6g(?q,eF'"rBXXI_-LqfA/cYARq0Ot?ANt95D3*f>1e'#2o*D^->`HJ(@q5E ZE%&hM'okp0R\*\MAAMoAMje-0SAU)%$\&bJ]49!njTfPEA*?AS`%2ecd)A>j'@S(Sl<W?$G-\e>8:@@?!]`;6o,$k#J,LI`T?.; DaUU9)#^nCkr,-I'/*g>QJ:O<!$Mp0#,ZU6jADjA+_#&8AT'nH pZ,:c`nBrO\QC+E4qCkZ.QgY530>l% [<1KRqcj"TD\'O&Y2<%X0(Am=7o#VXg[h9Y"@1tV5&#@--Sr.(QU.0nf]P,4MRAl7\_H7-f0 \M/l.UUg\t/jI:'Zs!r'a%^o[k8*/hA0g&Ht6L(^A6Z*l2HK$1*PAZCN5fM(G^nT\V^O";<Rt404c #tp_FVs^LleG+8!(Cn=qX<_Uk0"UClC%'`m@PrpI6jjGbBcrj9qN!#\"TWdoI-S83tjGV7B7?G8l<M:eId1JITBn\o9`$K!#/iAFG4GA/?a$`Ai'G,<7_C_fRWq`CK0S1;8IZ2OsS(?L%dABGaJ4-H'gOAB%OiiD,?0RT,cH%!tRnL2RULg(ab^X^Lg'LYXXo-'I":3*\L38B;BZH4D.Kjb`T,1$!(.3nD&geq>oobZb>;#1WeTQdB;9%t%E9PtXBsN'QkjAi_CK<1*do1HS$lVpatOaAWH4<pgIt]>`.*)7mP%gFIAo:A37TFF?Deh)B-PF)QP<'8p>A-D)\GLG+o_>+O*YepW=P[b]n++Sbn)0J?@ehKoD?VSmHiK++E\;$,#?,j#Ks2a!c3_R=K-DJ7.N,8aJ(gIfsX3c2)r]W)=@e:!-CDW`AEM-UBa7/MCc\GVGa"C+pN=B6Tc)b-jcR)?`?ljG1'4XU/7j#qFqg8><qtKJnQ0f%JsU=-b)&P>@MmXD;,%mni&W6U-9%.#A#J1)j6S:qe:7HNrN9f00@"=@g+o'=bA(LB(#M2Q0#(15*,#hI=0ei7nsbYW?rl0Jh2_'m9.CLpb2f^DLI4'h;kDoen2D%[njMd*]Brq',GS%4Ti'&lri]L[^1,llHKBI#LA_]"5F,^ .G&,@/ATTj67<&QA;A\[C5bKT>p$*d"-I?X+RFd=Pcm8WOk-O(+j[0inG64`nWU)>UX2K+8r9]8s)NXL'5L)Tkn/i>P\UKVb H-#/$Rk`J%S<Psi0%O-NnsM1Oji&rEJ AH8Ie*Anj;:/?aPA?,q+"rGam:f^@#Yb>ma#PO/=H+gBY"1PWQ]-$?O/Xn"k09lk-i+t$&aEV!:tgY?5>k?Q"f9#HmnZK!Tt$5Yck7V3c?ngJ7sn4L8'kW[U7CVAjhCb`F/Q<RU*3?kSQO83c!G]=76=C6OENoL"J>q/6rP6-eIQ[RNfOihrr%[ta3RXVhh2Nj4A2Wd&<;sYQZXpo+4Qgaocf]b 34B;59\?%Zp_8TBbO/`4g2lAm'H/2%9/;eP<peJ]:)M'fXA(+Eg^cAU/Js6CN_!+1a'\8nW=XQ6T%"^)&U >-0.U`[,SOYJ:SVE][itH,RW"PbZ!MgHV4%[&-9LQS^O\C?B_YL%W r1/\2l`r5r-eTKc7:jRFI[3k(Bdbk5$3E'NGeg#!Y),<%'`Q+$Zh>N! ?K0:2:H`P`H1mq9kG+',b]^`-TJ6!VDE kXq7ODPC'db<HTc8Y \?'3<nWDGj46c%C[=8_PPcC:H@aUJQ@jc,/e5i)MRWBH) 0] jf3\+UfB'Ks#WP"QB0i6g0Upn(Q-a:n)^^o\sh<eGA^24 "`@[8;.F6FtXr\N=5bKpt!>?CLLWB<58:Aa3G$!YP9>FaE6>YRi08]PTC*GQgeXpnS9djJ\/8B6\i^40"ZC=e[O:-^5'F#3B^MlD(j,SCA\OLt0@bK:_6]i^3/QrapY0OcsfO4F-h(n\FqE5JT[?s?qF._HG JiHBZ`;*b#m]=(2%96l#.!rYe4((6K>DN;40gp@=iV_4=@33s=!+%_Q"N.7W)Hb[_/Kfh_gAKBL*NtJ-_!U(ms5KnL`.37@`C"sO-R#rNf25)WKiB`CM'7FJ,!b9#&+:J-6pb52+F[XBQN4WZG1fJS!Z*s,JmA6[KV`g+q6pq4EAc*5!WA80f_Q[>0]2*[tA<G$Aci\tYO%Z?37[Xl-8)9(<'F7r'lO"^*"rf#h1Vi#6=TtDmi0@#5D<ebV8SZE/S9Tc*,b4e"ODGFVLO8G=bO((lg##k$-8A<9-VYbi6Ai"OJ/Ec9qc!(_P2So@aOB=/jOML8m!:;+\/;LDeA&:JRrA'h/0:$)`grlU5^7#[4)Cl1H3'k/Cs-+dDQ<i'(/9(320V0:NpUH;)Uh0>3?=^Am`GiK"R:N7*]<"c4# ;gZsp:3<69dbiHc7CG=Gr1Kgr(rt$,e7;.(oA3C1AfXY`q6pO0%rB!M+a_AD4Pj46.O2mI@7n/Q<[@,n@t5C':,T?AN:^[P57m(nl+Ud4el*[,M2jn5!(?2'M\S*7n(EK$TqA?THdj*g"?.YM3F'\atVAkckL:E;CKEA0_!dMc3n)h\Or^A'YiLPW&I3=NL.Y+B@tATD6MJV]=j[F8N'W*nA'hWpm(^(,*p.[BYnF84Bk??ZfX7(/i3_Aj=fKl/N&#kIi8dIAM8Q5Z%.Q2A"KfX/or\San76C0)s&Dlc!MY_R+-f4A.H;'$H>os4rf+#>f;'9)t7KAqc4nj+cB9)%]UrV6P\pS abT$k>HB YaIYN]j3r[RM;I&#`T-ADV52bqOo->D'X,3R3:=f/$Le%Q#g7qb)2[]^mJ<7+%N%%!`#A'L!tQ-s?Ri??bX:etWR_)R:."<%g2VUQEe(&'H%/4k0eD,Q%g9+ %O9\0Jam:+d5.aE+<Z-D<9F9`_HI<hK$B9r<]&CT,odGn,&_;2sAL\^CniFnAEn*&8FG9'87D" I0OPOhWIHT<GB%N"An(M6OUjJ>):XrnrDCZ)J1MO&=)5G@<iNYD%8@<GlIoM>`bQS[Hn#E)N_Y$j'^N[&!^N.KL!^YXKH6=JRr?)Qk$_c+N(38A=_BrTSJLh;s].,D^-)U]H!*80l@_XA]<'iCPF$#ZGM'2,JWo=MP'&Y?HlMKd;Q7L<1#U(3f%8"iSNFfh.iFXTSAJQFgYp6\B)[TI)]N\ZNti]8+$OKoF=eI"AhS.VcC?E+7I1IoFV+R`]dJqfrF#@?01]/QVYk!n2aiq3,RWOJ.d!VR&h.RRm5"C1-#hSAI1NdXhq'b.MAP#YF"Zs`R+Uq8tV[KQs!bOlAG35odc0aHh=3qirWA<ON^=YW:_T6+CG;qT(<HF>d"?Z6I^.4lb^XHM"'X4E=[YDbS%I9pR[.J"kY6H,)2^Gk-K9SiUN"o/S-;0_sgoNSsh+kp.GAX=J5,E>\l'$. LGU$=Q$Q..^_Y%gf<X<cAI`=IC+,aGb*3U4Mc3k@gh58D<DZdm(+Doc1<UYo.2a7XCP^0=7Y+=F2s>5ft@6)hX;6SSVD(<tbBW8`MVq:;(/-A91'9P4<Q6ss`87+P$@7]V8ff-kaY!*ib'9>2ES,cA,i9;&9p;U`sirQZ[_n_QP/,lBIKpaEs9=#l+*n=cJ&3bto9DC9UN%ZWiW?l>TE26L`D%lcqQ:U_Cf*5d+3%Il0k9>Y7D[r`#pPm[8,.]EiKIIVY:jI*@tNioeS-plYcaZ3YKc^3rUr8kS$a1:Ulll&Kbcs8a)cCXO.n_/?DKneYMB_AlA"giW'AkkjhA$5#Fqo%4khT_2ABAA^QI 0JT>b/E`r!bNVVXXNG!.*]i!PVfh]MT q@rt@:%d3mG8*qq b<17hcTcU+Rk ]d#.Aj0:Sgh)R^:MN6o[R&2i,+GdVdj-0-QIngWG;N`$DBX]l)K0%n;3C)A6$N:Uqp^=2(XBb!]&AS;Y1-;)IAY(o*3,"JEAmOX O/R`(q>=0P(3`rSP?:`mR3N-#En].K6e/h>Qf6[>WT"*pK0=6cMAUgL/hR=e(%Gg"nED KptIn1S_8P(ab9M?RY#^]G\3s(^qcq-6E_K.]hRDiM"sEU\ UWV\4Q7h;[2V9Z+dj!(E+W/Fl//UFb\`QLN%>YdO90@4O96K;_:M/4)W*Kj2e,QZXo!Xq(l@6<'+L=c_?,B3VA2AS?AfU<CkU%`V8&r'fnT  o4;XCg3cRmNp&XF`B/3F&(kD!Q)b.bi qO+=B8G,d1j@Gq&f9NOA60V4'm1_A!?Q4AXINT%di=,ih8D0Ts/i(!'g4/[/c/hN+d.+aUOOKGDD_\lA%-C8e%OO`91bt=gb<`2U'^K,Ujco)rreIN&b`HHJJ*],A$ 9d'VUI5gSTJmoMaidJlfq$3?+/1N> R`X js9,kLt3$GCS%21rQ9seO!e&@;3AVdl)&3s@HCfAj:H8l,UUE^*b!0Q-AL8l\:^#2+ $KLAh&@C[(Sit(tV27A*6r<X=`>lR=>7>6dM5hHA8AXh](F(==jB"mT&mA],Z#nU(50e\=8lQj[Y3d\N/XQFsHs<bNM/_:Yd+-iY2O_$QG=EBqmg,[LGP[d0cAhd_aXZ]0+TY<K]oVr1+@<?FQ8cjK<b3 ZaTEn8A./T>T4&`tY'*H)m!ZR[$T<V99:HA[%h_>?^Vdp,RI;oJVfKp::cpH:\POOh>7Ye^I1aW_BoEl'B@cj'pk]]3EcShAoPXg7jqHI6!KlVl(kb1"]6W?7c>Gg!,;<l2Z=&@QQ2CACig"q!/F`=8_)*e&Lo(4f%7la3F=trN7i95L2$q.@5t !dO@1adiG7"h:7VA7j5m3R+6_k-6hBOgIi0st[<9b4O1q=HOfJk7"TPdB:0NnUf5hg?HUhOO*[9j\=QH6J!fe/f1F/G=O;A*qHa4\%A6B*oFLtVLhNZ,=k'opTa`ao0,]onfJC"`K,CSp6K50ID$C2'A#,)rQi" `bdXVd^7L@5GZf\['!kXp'Zt$9nfEi6$=&C?/7g2$gr58BRQ=*#j8W+J!*,GO`\`Bh<3gP>m4<]'.XQ[DcUA.9/UnWfWBf`-%DTS&g*O#NsfW9P#Ags_HW'$%0RpUZG&kK45VaCW@;7TSA4,[Zt,,-@b18*@BY< ,1.Z_nlhV;:SXo&TpqJGWb99j^lX^,qY>%dSGtf)M'FgMW,*c27@0]ACL4fH4HH4rejDXr_T@0IA+LW K2nA^0ZTG:[B.s5J`0 ih'!10=[:OtA*W>8Qin#'1K,OrS-,1GT!,5,+2EM("!k9RAD=M`+p";q!S76o79;Ii7g"WD^DOT$k4X7E&-mZj&GfkZa sl:G;(05#jSq1Fm"Ym9q 5K!U.G%_@Eh]=,M(c5G[+:l-/)U,)Sl*%A\$3rW$X;AOXhAX'sf*7DBTi>9skJ%GQiYSk');4L"tMK<\(HVF%E&,coajVoUP^V_T5?-!MIkCnGC-m(2`<f  :ZE/XX1XY0A4U^c,f9qs.m`cpdkX!fH*:og;q@KEW(*enVX&$/Cl^' /\EU+NHe!t+I1#"(-h1$!* Nrg/^XG%AWrVIM]]5AW0i$n.r=#OS:E;gk@>mBGWj7(5T?(9AQ )0tYgAo?&:U=Bm"7ia[?PC>0)_qWWUU>/jP(q)+OWn6LTW \gfa>^kXsO^qQlD33@qAUseAl> "FdoEpAY7jtf1mJNj0=!/sSpBU[Ghs\q4j0abhL,A"f,4U:]21a*5!N$p$&9<Q1Z-8$jrGkd6dK+aY`T8V> ]0A;EOf67e\04b.-+3d].\mAkfdhC=@.ZUgng!9dqF$7R;l7)&lE&mjN$/"E,<GiEZd?i61=hFVp<qPGX(j%-5*,3&g=&A@/m=9Ai,RaZB/R>MAsGAQY<4&AF'[s:lG JT&lO=[ck6$$TQl*B@ViB#gPRaNF:oKs?GHXm$91<\D+J#AScG\fPC_!D:5iHm0QoU3aUL? 3WB>`C]7QB]pQE,B#!]CGCK66p%EG6[8AW@"*Dm<8H2#(:Fgtae4#<t,!W'TBsO,"ns&9&'(a0%.jNX@92"p[_l=r6*A>!&CAV*?!A,>/5j<k@nW9CL 2dD!!.#VUC&X&+Q@&+KMm.#oSInWX/I@`g)4Mh[f P+X4sL,#=rG BAaPKtP)`Tao!aE^_Neqd^_@P1[%haI*`)<.sid4>()QJid19Wap'025k,sB(-1NVt\KAlm;/6lnQ'=$EoDl(A!I^6?0JVOO@t5Xj6a]glpg`Kb$pAVC`$Z`%*t4Sr^+3%CT?H'Q8:_C9q$qeDB2G@&NAN?3"2ct2!R`J`_Rdk<[2?#'4I-&eWNbEcH5O^A<,45kbAJTng5cjY3a$@#"7D>DgbL_oY[>2N<37$)A1o52B\fR mdT>'igh2Ij)VtGBU\6;<?g$APIHf EQd)f%VgQ0 &cdkjm\W@4KrXW09na.=?s?6\fF8f#<A5dDVV:peYQf+2i`FH2l+5VNKe+JM2i>d6C"t`L5eqC[KAaP4XPLQKYB?Dhmo?,Ul3(f]AdVF6E,#4?N;@+d[fAH`GcpAl j %m>5q\/=_HrDnj(S=l)gqNXXt5i3tO<G++Yp&["Ds2GdOLaD%Y2sBA*CF599bLj4b))R(*Z8_Q'4A.AQH0RCA3*(;3i"Rb_.QK&$mp56-qQftE;!`5[P"Kgen1K9$;MKLiqCn]Gqf2r5]OUOsM3krBF2=)d5-*1AWrcB.=cQ1W;g+C@RNYI6k6e3Y&rW6XnlOp\OeD:-Kf1:;2HiN`_97?S(,@I6saUZOCbQ,K!5MY!>l$/3P!5&dINtg7XbOR)ngTV^`f1!#/<A86kXZ,?pIg#5>[-;Jp<RTNAm<5KdL)UHC'0)QMpcX631'k($pk^g?nX3\]GMF\g!:C7Hq\="A61_\obIP)fn@S6LR"SiA//eIY%2on`7*ER_3&nS6\[+?a8i7RC 'nACe9sCGZ0\r+A="S<W,!PAd-AABAk'KW@qrm%FtB.R1hibc*@\W=DqaRY?Hp:(PX[*`N8NBGr! 7T\(:Yc(A5\FN+S$(\7'=Z^_^m;L04$/r1D.*%C`&4.Djr<AU<.0FoGF*+<`Jfqlc%4Y?;=$3T*.Lt%F+J;BR%LD1g[\%"F:Ao7A='QA41,ArGXHZ#9 Yr]DnmUGYqH;Z*A5(n0M&o[@"(hC*Y;H AD_J4"lNU*DbhD^*(M11@4bY\)$G]Nt\FYG;>P?kH;W-QAb*iqI'Zcp@Q\P?$%*qX,Lnj4/YT@1Am[Q':WYY[s-Wm;Y`*n ]cmojAboKS"20Coaf1"(Q08XsBeVUMLN`B]so<]%1n_H]9l\j%4)-A;0%4o7H/:W,^`T.YXmMHjQ64\]M#_NG9q<=Kf;(,=f`l%Fi4YL)/D[Q1lCJ1Vq6kpFhBq$4TAVY%<Y^-53WF9hM9bpa.b p*O4odJkj<!lj_E\^gAq"95Rpb>%Jf$@9VtfOQbhA%%AbJ-$@/Tgf#p9ndpjVhjT:L0e`lJABr5<-&Mo/+m9:.)>SZ*%LXfiJLOT2g)HM,+D[U=7hAO:oLej8=eg7#`TK/@FFm8/l M"oXQAgoEK(]IfTt16t]=Cc9t1RKc+1B9ZHU9B+Xi:j^Mdh;3QkL42'MUp$ke?IdFVP@'>-W[PqLNLQ>P<Tm0gJL0NN2dE3]WANDlOI]IK]R?Z0MA7E_`n!UR<d*>[P8Y-,ZtJ.2rlLo:pb1AXoKt^Q<k/o p"m\I2'A[20^#8Zc%]6hj@_AEY^S2:qA`Y`[g1NCYfh.Qd\(b.6.j(/k2;` _3"c_:?H%CE :Z#< .>l]OTo/5DMrp_DDiAi#Aj0];_#:9LB]2%Fd4ni(*BrD=7D*MfeX"aF*0o]Tb<4mN;!J"N#g.Z<CR$@HhB=/HP'T>'8p>8AAqrY0< d*PTA_9JA6<^^cW(E2g_,.O]B8XR<cQ]fAXEb6=-"W^IKSRoL!I5A?ZYh))#FZoa(ir.?JaCrCl;SQ`5alDo:jO<0Xj[o0^=YhC))"GeL-I.c.0>Hc9 g_1A)f(*o^=.TQqIGp1>6[Hhbk"I"rJ'D3Gs2*IA*['9aiT%<#+UB@_Y!IL/`k: GSk7YW[">kK=YP[aj8^X<`&?;*189@+<op[&I@3[)%dH> 5A!';)=k!n@&>K 99BErIqF^-HYH(tsdRoAD,-B>2K]@SkUBhFWd7/7L>ABL-6[Ai_!!X9khq_IRcMM<_$/Y]rBa@t+ab/`maX2JBXqApH<N!\6%Krd=DW]&Tr#r@ItH_3P5d+f[rBa:!S0Y)<]<X)A3WT#Km`-9]%sePdTAeY'[H)@#MkfcG4/j#aV^t;G_$e=Kps8\pSA%s2^1@3\V.qp:U\N<CYZW>G[0>sq@o]A-"EY7f&\VPn)A(%H65WEo.?d>CJBMnB/sEDOU&6_OtY,.)QE?j-`;qF[MQN?[%l<W;,YP0BjF/V4Oj$#VYP.e6kpTB[:hNYeNa8D<VG6@J=o?d2A8rt4^O)LR3mnUWGH:OYCP9)e7;gjLKJKN`2a!M5jS5_!b3/JKcHZm8TJkeLA(OQg+M_&SeI:h5+rM3&cHs[#AZm'C(Hk.73t3!@bhUjUj*b%g6k:d\2K*b5EC^-6FkF $3,<h;-lZMs;d:\2<DD<pX&@+<s1ai L]]LM8N[E>(8M6)6(50.^^!8!GPGer__c-dA:TU]Jd)K\ oC+,0tIjPR9:Y&jMEk+E'p<W%IQJ=2g?/tK'0f.fIsHECsBOttGn1VIjb*LUh^h4J4^KiK0';thTCtSt:L8ke(?cO7AH3^\G,4*Kr)aOR;SE1ECNp2W"E$m(/BQr1(^kmZ<X%hLKID.XQ05DT&&?RN./Q"!HHs^N/ `F=09o'*i]o<J,Xbr3$`n&XC[tt0C3Z*9$SPo:NnSBqb4W[/E6:lKc[lK<pMr> Z?-S?>Mb6eb3(j[+8^-Il=2<#/OH2hT0k>Z0[[ssS% jc&jJDqIj05q 2H)_4:tIeX,9O#k<@3j_kI(\mG`MJRJ&lq6Rlbn(Z(iXCLs3W=D`7O=%qE/a3L@rF,2 LHMlOd/+O85Kf%FH:Z5th-l%DNoKcPk6qHA0 SR`ng%p*^tf^hl;pQaHdRsaNiPIa%H$Fj['!_2IARV6nks2EqA$2Fg)]/dGf?,7HMD)#;=[es(W=gOOrQc%Afp+'Mh<1#3mV71a0a`R(b0BEm%<ITiA8Q:$rl"^ QM_0(\Lf_X6AnUU$*:JH)p-cAf`,-<+k@YnA`6HR8DZi$1o=h<gVr?=26Om?E=KWiN1R-9+clIIYmac58&3)(78U?U4#hKm!l-/OWFWVH>H1H-j-YNWd338:"\d khVYX$npa2m\'p,$/C/,iPi<dfr[b0o%M<2`22AbjVB!SC6dJ.m]"159!!nroqoeTOcQ).snY%++HmPN#:9,Wb`@ds+R5#V<VFS0qrRh>jo\,'$+)$VA>fX(BI=5L$Aq9U3j6gQ#&A1,V]="AX["74tC$$h:@12L8BIYH#2`h>#p)8,PeYJR%Ji?f,8aH!N<j)5NT/k)!^[2mPGL_P9t%+RYM)K6[clBeL:mbt6f]]e\,J+_@s7+@-]3YF JY2hM2UW`!Mh\Hs# 1DU#VgAa'iFIh.&5.7;]3AG^fmD k;A9*?aS20KsbA+GL29'5A ZB=iAkm%i.dN90(ifXAdf,WF*Kechl>6oQ)I%_d[+0aE$a--fXo"o?P-J/5T4&& Q]@ZH<1hindE]&N?!fBjUs Nt6>.(*Il"`3bH2M;,X[_-tGAY-or<3<e[>jPBii^BRB6e\_]%/='Qm?YO=BmQ;\o>6CX<n8r_B7AC@:]k6$ GrWW6iVAMRTPLh`%S-PcHpe:lYHME/F<g-J^!DOFNeZj%b5$To^!)6(5LbImkHQQiK&PIc^biOj,l1(KT`dl!:A*D*V@&g.e9sJGkL%MZb-:+;=TqSllktIft,^X]W7)#4eirn Er?DQ^kBH:*l@LG6O,sI-:s!XZE\B%HfM]r`65f"#U^" &q/VYkjtAL,,=`;>b _jV-p9Gi32@">$<__8(mV-2onU;SI4D1 @+7H%d<Xn&4(>3Na;"0MRd( Ip'GR;Y9-U+%tN;VZVe!C ;DKo"FH69j$OM:8![&VrKWlmb`m )W_hT7m1Ki"lERR )P 7Roe/E5T+Yr6 aF/*4\VFg?),8H'0<19moRd.P:>;:37Eq)PLcE(m+c2_i`L9Et+a[%QA#dhT20^;fq4Pi.b+dYh.fnaQ`K&_s$;7+2DK=G2sBY=Q]D05AOSgkW;gMpJk]/^e_8Y'HTa@hQ_4 F4k[Q.X.gH9,Z]MBh:@O,%m1<&&%q87meWipq8<R?WeZ$;XV!A$\(`Bs?a'Eb-XM_t7h.Xc`;;*r(#4S$n&DX^9::W&m6ZU"&$=cq=?t:U5`VA"noeH2]01.mh2"@;Ahd7f8<rJ;jA.mhBYQ&ARR)0Y/W#(+X CDpTAmf)-\+qTo*X+i8:YP[tC#4%0S5jnpGl20<,1)o3Cg0\honDGkW#th^_a?cJ`1]$A2'dis/rfTk?LeHHnqh!^AZY>,BTSRh1jn2Tg]tJPjU4(J]$PGCb!jCYjt(=e51DQ:%4ce_e#61K!brrMtO0K^)Uc+qA:"bj:P0d-9HL2oj%72HJGJ^@Q)m7A(eMC"AM3CHg-@6plUUiJ-$M]0V!>^E?\M`Apcf&@YKq33(Xm]O`:ARIe@QAfsF!.QnrZ1"11A;ioWci%j3rETcn.?8[-b!^Am39b''D _kt,;D&^\moFpEbaFQk.iL0gsh!7`g=!=<aHm`0!SC=p\k;sX6Qhr)?]4U5j>Z2mIS/Xi%A,sJQnE^o"642#0$%?"&5of9d<..7 $![*BQ#DhFerH@#oL*K2[oPr=8.pr>,Y%l=Aqar\;<V.n(['6@5QItd[Xrn"k=E4jr_)A+N=[#Z`T[FALCVZXqm0C;DqSm),[F1/EV?kQ7U;M?8&]/ALe.3_ABnA22V%g?\p24; TQpW/e$Q;r<[0_6]d)8m,ZWo1sk36>_UVZ]YBDO[IjF*M^YB;>ec<Fl5J6r]dY?38)75?9eA?(^ZVM186JKCpO&gg,oX3C9OrB'?lR40s\97q+kpUMd&(=q3,L&b0)\FQ*A`r85d4AJ@.Ih#*VSXa%2D)/]Q4S-t.gN_!D&CdF)M]U'O!5eVR@(7Qt1jA:5e(QQ*Cp56"jIXl*U"8eA5&47p6[ZE58*c9VN$9+'n_2P<m[n@M"qFII*C !b6UM$G49sAs6&R,8%iR,9(PZl;L_+hXd<[?6T,:'r,P'Gp Z9 0/[D.((-Zbd7^fAAK"'5A=A<=V!dUic$<IGaXIjNJhCC`]O'A10Xpml68Z2L`fUk"Y;A[iTM$W\n$*[Jj((A12]cZ-0W$%gO,5Sbie4L-pmn4b:fn.bI)KhkO]&Lf3$UiPm;'^(LeLW(EAh\m/"LH0*]9#bk rpeJ(,L<q4/H&'LiBY&4B HL]@/dr#+!2@J"$2q?3sG4`,L+0\K.NC-JpU9VIs'.'';r<3skO&Vn;>Z@OO`!t#!jV7C%;bAtf%:gnfgDhkb"B=_pYmF6%WE@_2gV9-+<$cCNL]oAaM&5)m.GrSno/+>Q:^f&POh#KblmSf`3WtZo8.\TS[.6GLB+a6"%m3m38>n@KU\K90Aqn5j+,`"\F9Ya%\@qpK=Sa<<G"cLADD(a@]3Lr6g:EXR5AL4@QVB0a=3*oH6U+haOdOd/rANcqBd,AecrR8s$Hpq(^K(RlcjT,(T`>T!33fYi=[2jmA,/$ r0a.C".=aZK@q$$ Ecg_kl.5^Q'Se;=/UT.age >g3km P9- 2,^RpC[q?9r[:m GcQJ/.M$OO&b.r! /X b.[_\moe/C-kSpon'7eK.g\Apt]612!=B7BS+)h*MY.9Y5Nq'5l\?%6[Np&!o\7!B)6]O#nDP,MJ-/F4PKCL46[EXg->I`nje*+1EJQ4_Z!W?98AD0t">@J1S4" 6DnV-?T*r"MH`!$Ec6VU)2gH##7P !/.Z';K8R1Ns/$0A10KQ9RTd/nel7AndJRfEAf24+#[C/c,WeEn4/Ws ALN+QJ!Q5;ib-Ucl5>O\%0//.fFi\S,c)%,K!,Y_6n0^nD5k\;nmbSLf_l <Yf.cs"o#gAZhK'SBW1-'&q:@-p65<UJneQmSnJ E`Kpe>!GQ_&Uo!mN:kRl\=8_8HQLX1TaOANa/?(8NtU#i<^T!&ABOS9:^:PULc"ghLa6<9ms-QL'E0UWUbYIQ<.hIbP]JA8qgb17+l)shX(Lt8D.8?d+tq$Ym3bGk9Ng(Jq nOlf=C#O9+Q:k^cE=,]+c^T2UY$GmPqRK:1qdH<aT]JNrO'"d/ )b+:<1q0pfm(5OVCT^nZ@IRl!VErQGt\WPB\):iCc/%5so!/-VtIs3\#ab(6lr'h8nQ&.qX],[iCNCa. jU*F,^Z5O&tB.ISo8eAPDW121PX!5k0/:qc-d44p&W2B(0-N1=HPA[AF,8 @.]>&eH\b%N#0d#%P=%5Y@pUa(m3J=^.OU97naP_H^Q6=X5VgnEB\Rc\R]_rPPF +CjPJNLVfJ(^M9:L>C:RaAblU+[7UAfQHWP+q.`rT,PLWD)<8PB LH@<*lanqM=l/.\OkXJEFFV0tA#2'o,Bt7#PZ:bb3q0To=)S%N&rL30f.Id4o,3Ie1Vt= '^'3'^i<t"L6@j(E[['tK""llm2MR?0j[#c>9psXE\M"MS\,?S-Z nG8[C&V5TMabH4XVi9Gb.GMCP"D*ma7(#;nIn(WfG*hp;_2HsUF#GUd#<XMq97(.YXN*1rE*][VJWqlFb-0KSd!09$U09:/D4!Ft%O$Z5SPU;kAXL8qLhn$_3XJW6%%r8_rg"U+jf2!/mriPrh\a:jHC[2+NT&[<m:]Wj.ESA2Eal!GRDOS5N_K*<"lrt :5mk4!]@^thj3&Uq)qKf#tQN$=cY[p`=.cs+3ti+iEAl qgi; Xn7_>=&7XL`[sVMi%j2)/-:->AUAHiQE"GXQfp5Z^@TKQ)YJ3E,)!AFb\IW#ETD$NZ(<;)L4D#Lofa=)(G7QR MNZ'[)?*"K f3teIh<pD;<"p#IVEMj>HA&A+Z;gN(2Db^A,M9Ak7 92o5EOtEhe%H4AC">Bob^kO_ o2)`2'1Y81je?]k_Pn]cY&sg\pb?c]"_ERiNa(5Ih$?aj`>or.b 9ef3`lX+^WpEDP"sAK7E`R?t*R]""hf\a.5ch9@FNE/k`-d$lO4,S"PLKkFo6pmpNX?-!.fN,?rMeIA5;^:m5AKA3h=<m6>XqIRgrkS!XW/W]RtKDUVtY?/Hg% ^A_#tU`%HI7`O;:=fi;CO]t&+=cWkkO= qW<&>Gn4DbX%WG^@LMiOA:DM?F_noP<t,j[ci`IS"?)HYV1_7b=BKZ\Escm41 LQdjItR?ssdT.2)$5i0RD2TVbC:WXR+hi*ACQk_'L:nb.k^<E^#""<C(SJED(p'[lkI@SXIs<=LPV#7' 1SgT=k#:QKgVs-aZ?=Qe+1AUFt\Lj9tdW8qWfBU6WHQsicOFOPVf.JDpnQER\4CA&D"]mt?3E"*'#M0sZN?s$TXXmsA]W0(`Qdnm,r5`73\UY&_qkm7>0[ti4p=eb=jUlQr-%?=tI_*h;j,FGg6n'[1+/YL2e%XQ%5Za?H1 FijKT=bQnh)Y,L5jJ'i(CiWe@k V3I(3\..UG^D,1c$8'^E"d1&OPb3+b;9'"4XIGCYW"3rc@7dn!lHr2NVI#-@GlePX,B^-/%GM_?AWi>=UO42JZtbN[=hB:__5lCt+\W_,4ABdQ,*aU V]ZJ8>7g\=L>KWe>'!C?6+Z)-0jRE&CA-Rps5K(70Sl1roE&LBrla.XhSmI\UtL1mcQ8be+cl4S+-aTsWbdr#fJIVl37Ap$.$"jJ;B'nQGQWIXlTh:(\9L1\kq'h3B9nm5h?D0F6"mHe:8WfR,:FZ`?B:Ls;/A *\FIk_&W!4'B[q=eqCG O74/*A)T;>1s$\fnot$3M/YYiY]U*S2808n3Qfk!F=@lZ=D(\N6ZeFj+6"g!$.Q,J*0,IO@Xc0m+/P )6J-K_8@A,(]XdT]DQ5NdUq)2]a ARkg"jCl.Chf?;X+#Pf4UtXtT_%I$L7&;t.4dr3A^eWFCM(l0]BHnG"r<_ ]3+&RcIbb4/sFEg=Jlo  ZO+@;_]qG09Pf/iN+o*njqQ*]V6NkkH=o).iL/N(eaS[;Fm37b]/hhD9e)Akf;iWg:Y@#=RI`ke$N(M8/Vh[ih9YeB/kE/]6ISo:h>t4PVNP#d/I0jN]q0-]U0)5RK/B8E\ D6.O$J"^tm">HrScj`3pl/G<h+mG%\Ki<IEE/JV4TQ'`mhlkVfaC<QR4N^RSm4gmll`k(!a<TLI^asG2@/jAlO^/YkP k#mSZi7%9]#lrDF""F=qMYIA3jq+mIH-jcd0Pos2U^.kTlHEd#DQ:I0G3=:Ae"C$9-N4UJ&7<+X?QbRl[Fa1@50hoPFmXdG>2JHQ[&VaN/R2qUiXES^'\*L5AIV9;I/Kj>@m[_tl7LZ"K_V*9_:toSt`Jb&HKZ59Jd)M.U=2L*MK#RK]D+k_1O \rN$SjGgbPH6c4q&9<i!ih]RItn$p"i$>40efHh`_6pfr<`;mZ!+'%QF #Z"NL;b2tObIKb,oX_<]5?L:4H@.:^?Yr1N0XKrflHFe08O8iAoC&%_]5 rGLQ:;+];W-d5Xd\m@cMfo]n$$nbFR$D=!*sFI9g=t:UA9pGhX;Z_"G-j[XieTA@N8=8)$VV R#seSQ/-OO1g@ho:05*ZgY?=F$L$8FZ7isK[`M[P"#r9.8ij22-VnZmCmT;Zkg]fT_D= kCt`_MU#4HfD)7#QWf8 nfI<M>KPKFK$lS=$n!\5YB)*/2<)1iAU2"8;OGU*M*j%A `i?USbJHUGmrenT3\=<k-J23`n `r\71d3Z3qp<;*oA2Vh,rd2A5/0%BM18]8j_!QkDN#6oTX?E9E?bcPpA$A*VO!nd3?Apg60\[o"++YAJEYigSs'(p9adY[0>#]A3NM$AnIAo J9E];p^10fG?g./pNe&$.o>&o>1eI/e &n^0m'Jc8eYdG*AX4lLWg9eAJ/d5;Kh-T'O%I\0)\("66bIG"5+2D@,C*tf#)_]ElQom_j%<o`DK^.B)o;YAKRjCPAkAko>%ndqP2i]esMMcetM<`?5+P>K\?9?A=$*OQ#UA]\b[o+diY#>"0g8S]475sOa,-^)"nRcF+)ALo_FpEA,+/j/5!/4pR l<^g,NCj)eD]OW,-T93p+T<*e++H.#Y=/<GSn%r:Aqgh`.0#8 BOHC8U+\%(:AK%-L1aK?bT-dr7I2]j^Af*tf;=-Yr`qNJ,I'Or\&]G4$A5cs(MR^mL.dtc:69/(grAXFK5QC#_I+#pZQJ[W]VjE5tI9P?&tbRD*90"4UQ_9"Lb@-dTB&0VH"F\8i hB00'plbGFAeEn43<iE'+b-9C,,%#87tB*4.8@aA=Pt!66AEXo`/9n/M_\q4$\25!@_]0b?;VhNCVI$d,fZB jG'N:]+P7&*l3d16IQRhlDaimVYgCER^8"UpmE!9#X:;NJB^Y %16FGrJC8Hd[)EA2?[InP)Y/B/d>HAmhBtKs!7jc`pf6kV1aEA( 5B]9KBeVop.NV(:%GT.b.Fe`RcrX$-]aYZn$m@4t`7n[GVNm=i`0sP_$[;,2q=_4QW @bTWV`dCb8<^hcrBaqNB59ERMDEP[ps"4lRM0R<50k>osK$U0s's3A_n`1]6igsA1W1PiqsF3X'ccZ/9AO,YBP%m]q7N&+Vbcbr8_b7X8&n<3mbD/."]38%>p=Y!s1PhSV5ld+qO2oK/t"D<d"fSaGINFrNBLZ+(hpEA7[:J>2mgS)r="TmFF0^5 49)qG-GBn/k X`[J,nI/.>Wf,Cm'mI7B9\PX=.J<scG*mj/ 'jAY$$E rJ_ DTqPA80t"Tl_b6e[M:%-b)=AT7k`!+;t\+2?k4fge$6R+NXa-ElZr4L1K%%Co-\L)-ZIM2hr+<]qaE(68L[)KRlcf.Zd&elS6l `rPBO= $2Jm]>]00CBkj)I<X<dEaVT66!,Y0P+3qG"9<0>oSG%isn6.K`cS4.rX-c#`.IPfP:I2Zo3'F5Ybo7Q4.24%+:1Z#CiiK(Mf9,>VBh'qphp%]56afof\X;UXnqL5&qT*f?%g2RT5AL R $j!.baUTI57,H7"[m:A0g;KjN,g9X%4bZl,f=nQ6.r;$'2=('\pb5&1hAfB"A1!F2!Ig>M5`Si;dk' Larm2<:[GRk%D6WCL_%K*to=+]RhCCqb(\EZE#.XU\geYpA]Q-S\Z24Pf(.FI9NopAptQ_94Fm#:kFBj;DW'K$a3tAnF3#oY<%EEiskpRq-"q_sPeHPIZ]O7E5E40TMV q3kU MNVRlRYRWI?tLlsb!A&-ZQ2Q0:j'AnA^NC*_N#Knf%+RO6EWS5b&s+hp9n7Jjt`NBHoTa5>*$C=q5=Zt[*jhRQRYX^#NaPfE;WB>NYk'PELnsn5.a N(8QWAa@VpQYUMO."U_U'j7XFVr]T ?Pk[HDbi6$Y8)8Jm8ahPI^3]$lLe<D&NgI!1G.pc,)kDg$2j 7GT:`R?IH$iW!Aaf$m=.@[CCZRhk$aMoISYKIjm]T"(VFbr"pre5>Rte_fA7VA.._H-GcAtMsC%$k&Er5QY>] $eLqENb)rRA&]"L^!0p dUK!l1NUh9N\@1rS1MiGSN4r&B\Y59U38)E+C:[(=bi)ieLR?jTRY;6,f%A#^(qC%j&2:[bDN<mEW&l5Qg'W YR2AX cRi/iA;oMJ!%^Bm%\hBaqE>:o*thZI,HHk7`?o'WbA=$TfE<g?EX"(.Il&HEg+E+3G+/L*>nGj3&^@<MrOK8LZsF/ie %+87>agOQJcpQ-[)W?IgsX<csriB-(A>O4Yl0h;fnKaDU5oI&FdiOeC$S2=p\SPR2C.pdn M[.65Y:b",dsJQ;acF=pP P)Q=OZ3N3@kMp\GdI4a7DUF-\r=^^jJcK.V26Ul#2>=A260Y8P&,g&aYR^fIh(rV5dRt;IcInk8E\Va:++%D<+1ETI>_:#"mjF/k(SYr0 +-@.+mWLM\=p)%XBFN/#Ao5MX7>$BFYkq`<7O9[;nXPLO)Fm$h!)[O6V(/hraN_=1m4o1q6benTM+U-FWJSEtI$&UKQ69cA;Cf5qOaE\A$B$0W'ns/F1<imRFbT"E \NN@_Tq_M?%BAHa%[RQG`X^UkVKsDa$@o<9pb&e$=T.-].[r"Y2K9J&cCpb_`a9THa\YZY0**\CESsX3CK(8Dq l!rnO [rANI#bd\0N#f-S+(R:]&p$7thil[tC!cDJ :*ZJ"a(!A7fAe-rFJo/Ap!G+BN@*&m$KplAA'7qF1bsreDKeahX#nQLs.mANJAMif4OQ>lJ3]sl"q`T#b)o6@CGfs3N+i#R@*dsA;bicGN3I`ABl0pN1[UA0HKOa(RKdS,>o`R>?r-D'hWG]iTIAA&GJ_T)!`0I$Qe9MH[]3Ar33D4nd5(g><^5 H8-U_0 5c')PJdA73oOW?I],hh3C1.m3Z?:AcWU=p&7P!<^>Es_NeEiN-!$S1=r6/4WA@`LamN!o7>Am%Z@"\fn*O8IqFGp3fr6SGN*6BC/`R?> bt\R3+3K`2"C&AbU?>pk4]+MjSTrHs(F-Y.S4[9[ZL\@$IIB[W, 5.AWBXqJ9`,9 /k2)G PXeir$/jEdp<n2G<iitj^R^[W'Pon/.,qnj0&MqBL&N!V0dGO0.(dR3/_iW]BK'4^C:lhUG]7(F;:.,=_JtD;s.`4Q=VBrBCNe-ID#03cmmK"E-0P3Mf 7A-t>^rdN&A JfUa(;<6\4/`_^OkN91CJ*dc<ZRSc\T%,!*ftAMQmb=AZckKA4P?k+&&ZQjM/K8PNsEI'#K5<PrIS$a1sSr*qBl)e'@3UsAf(7M)5YSXVBQG.n-U#8hM#(gO+ciSfo@,^ITW*&[H0D(UAkED2$cg03V&,s8Wnte:tQjR*S]Q2KO&YA-^qXS) WH[D\KUA?)] :T ^+cp'<kS1RDo;cILl K8XeKSCSCA.2e4cH-*j^(B;9$7K3%\s:(#AHR+Y&M@R.qU6a$K@8totIK,SOVgK0Sf<">#bjc^31p*Idd@hkSj3)Ft^P"oI*Rf%OlZI8n):UC`R9M%Yd$>"]25-_,o9"[bNAm7!@4HeP/9@O>rR]HGHhJmqr$Wnh@:Uc6\9.bB7/-QN,3le@Zk.T"#O#;g8[lU&3J>U^G;emN)b7gHi*^i,h(^I6riL0q>V6%ON$[5 VU#k#O&KGXUBAoi":>TKBbOld8Cp7Z*QedV!)qEp;GeRmL#0L1t)Bg*SEp9ZFWDZ!pof[[tfifAtE'[_M2ODcYe8G-QP\%`JimAQM'2=bQhXtt.=]a\17:QZ]>^[KgnAgU=HPk0\Jl2\<fr@H83=6]+;d)J#PN%*`<d#+?E]&Dh;`'Nj<7Qp#b.;8(#19a>L7=c`E.=*?kij%`@&C6fFG73qHg$%KAP6r4CFOi;7G^Y`3i[2Y>Ub&rLr7mp1>Y,F"]_0G!b*/(rC$Lk5F%`'BHD%AU-s%oSq[Ti-#[&^0Hr^j4b^+d@1)J'on<Wr:M:>Fnf"KL5;Ar=E<fd0-<Ii_<%OFfnPYGS$r-`5C\A8r&RA/J[[MtlI ?ir4sO?Gp,U7q\=pcY1h:3=Ajb=kWP19A!P$WX=(+d=h\Z8 ZjWmqq(4R9[=]Q0 #cJ'?Z5&ch4BDlL[=TdUV<JpX<_);ak/+ras;/ 5m4UVp,_l=H8YpZ"F+rAt6q9(h%1J)hXF@"Q[)EDHfZ0YDP,1H2%8?K&ej+.VG33./rEHXA^2PAd;M!/aI.]e;)k6qT?A'>o>,"#>TGm_(Z\/<lW"5Z`CErlS*G0RFP<&^8*7XAqBM'@?4+8;W-B)'rjD*_@tm)'tfoXspP7 X!"6n.]WGW5W(\>M>JXZn!-i,;&[Tg\`TqnFRTTAR[7%@_C^kA_nfNXs's!f*G;-ZW=,/%mfR?p70-Q-iA`"Z4:qM0SX?tB?ao1,O%ZS4aTe4[_(P6*g&7,DPj%DKp]oDtlZn9sgcK&jaFE`TjQ[B]05&\E^n4@e*hA]ZnnhoEA.sX\MJ%aNJ]NQB3WH]`[6d!$[m X ?XZmc41Hj3A`gd\,XKf#X)oCp7pgFt*OtSUEe>SdPD`.X;b)JaNA\^n$Hqq^Mh3_r?+eHn,8$5)cDH)M-!<T3CpqYk@e_i^hR/>eWgOZ(]mlh`AJ^,tDp/$$J&;pH\%^\Y[API<t5D-L[%%W48q<ksCt/.-+l!0iA1s0[ZD%el;G=#iO^tNiH=)s^7mW;FS$pN81DN4QA)hA_/;AV3PFbYYg<Qc6e;T@Q\g%p;;)$.\dEbNA!]B9dmXlpNro%>^i+AHK/i,3?26(PW55Hd0RC0VVPF?@Af?AViM.k brkN5LA3M?g_dD+(b8.tq,2S"F* Xtj.VbXlpKtTmP,mHh`)HLeXP#tpcBeZg-K)tlqbaXl'!E9%Ejo9P?0<=T;,ANC"5Bn#^H]?-d9!Se'+mp&o6YR(m25k2Rf@`CFUD[kdAQtj'I%NmQ/`W,BFdMG;;P%XaZN@D_.TTS5KfG3W]br<.]YjY'k.V+O#[0l1O &#g6C5AM(#_I.*\F.No3^C#s6<qr(Se/;aV,r-FA$40"NL&dhKg./L_#Zb %ipAa:Pr<Nd1j<mme*7J5;35_E+!N.*5A1f=i&!@;2iin<c-@ce%I@$U^?U hG60[Q>][S8\MYs[iG2I2dPPG]W5JAVkl<U2[7?6E<%6YFlr45q"3^:eFbi.;:t($pc6&Kd-:U2\1)WK;dlp4AARd7!]>Qg_EG25j7`I'(jKo'IQ?0D]L#X,pf6=<j^d^F=p1en#"L'rCspF`d1oRQ'4jj7ZSi1Wek9E1X+?j/+c;gCB].IQ*sbj],2RMF`pdJ2e1Xfqs*^FYfZ[BR65^sIci'][RNaEMQXW0pL^)(1g;b:YBW,^$.SUke>-RSB9j),A@6.+@#EM(_esBY)5>PX^L]&F"fgaFS]2TBfVJ*BEc9@>K%D#:>Hk9R#n8Hj;n8F]:;Et8UD)ArC:15Q0+1snt3Dj&\P.<q6r-@S%abjA)K6qe*+\#tIlsb=DQjS=.[[OD;&k;T70#&X$'A,>W/bPTOr&dUS..Z1GQK*KA(X%%$k/md0ZmN+E21iLMiJK'GFdS'@5L5!$j!JD#hJ>9s#UsqU':$(#)!NoWq*k$=hsXj=n`,AA6AAP"]Ak*5_P>0.VRZcP.A?0]$^P.94Xps^Me753/ONH_i*`*7oG)l!g8CGG%=1-fH^BIU_t\#?Fs);Gf('Xbae<<=:l)j#h\e;rY.ZU#F!N-:o.1-<$Qk. /M!rbGAc&r%=Vj&_,i-+M-"G]fX@1Aj#j7J+$/5hfbHUOA>bmb(7]@+jYCNCg>B>9M;0#rCq5oZ]r9@hb$e$R:P"+2D,'J '"ps:gb'TGEc@'t;"4DJ^k6aM-)(E*pScW4dAq%?Yr<T?:2te8*@1Edk1Ef!A'b;^.*O9q82Ps_>^,[!`.FW3q?^6aF40AYUWn,GM$Q3ZN.a2Oi).1P$>s008oRAV$XmF(!X:HeF:EJ`E$tnj,RSAR(`(Z_2"=A,)WE(j65.=r-3t&6b4[>/EQ5Iii1OBA:o;''k6q3W$N"+DnJgN<TAod3_g_IB&sd0-E<-%f/0jL<VPh8]`5Fkln!clZ97BarE(*hA&7DL(DK+1Hj/f\pS.>KN^2%:[Zk="Xpn[f@7#GLKdFi<[>ZO"Y@ 7R#Vn5DNP]kl.6?QX<]JJ>"U(G>6;TV,'>X`N/>^<6e]Y=rEG/maO*SWSH5cMYhpfT:%TC3TAgm[aO&f2R"WSX39mtB)ED:`;-;W*7?*Oa6_I0YY[HPeFAsQ'A"5s_f'H^%BUN]1f\] ?p9LX+lWWreE"PZ].Ef7Qe2P?s6-^$t&'T&[0,EaBKthk\9*AlDW$s)CS[Qlib6-r[q@UshcoJBUc7ndrtbQPM2K#CBm6SI,i&A5:]>C +4_ZC#R%:mKOq0$FmMCUeP!Hg^Jr8P:% ?e%Jq19AG.\OAbFGV*\B9Y.?j98=S=bJQVrOjV&hHGTf+kj6@%%l$El[B-U85[bg""pjC6;r4Ya[dN`#[N/$1\YfI*l:DiL%?#N`9AHh6A2rLr(Vhc$[Z*C^U?SM1I86hto\\Tb6@UsglAIo%L\"lbh>h]G8/s+4)a *4olRYgfXB,!]:BDb=DA)ICp#)6c!BZ\])'K>PAX$,'0]5^E-Z<Oj*(F_SpY4'9e 7$!Y4,)cLF4QN4dX:SBtPG ELLYK^5AtJ$B:Of8"pFg:r?D*5);**#/r="Eg-GlT!+oAINJVC;gXPm(dhblN;EAYE]>I4MEh(!YME9"dkX &Ar]K?`R[o!*knNfEs7,(MsWD-?Wibj_kj`#$KY]X/,AohF1U`tdg#-3F"J4QOrsep<3#g+*'3.T\2,3l$>I>MGac F)J<rZ&n%^'C#n/^;EoWS^'j7?cGhVS4-mY@jZN'UR14NsN\_#"i m]g*5_s!9k_a/HF[0G0*_h#.af3s?q3]U<h31YDM(0Nr>p9mi=rhr"%n[W*BMPIHkU"_:]Y?P*\N]AfdGkR?HEoOn&Ge9)BkUjH ENcm,MV:EP?QB>0B!SCDoBWb%I[A"8A7Ii5A2A)>e;ViJ#bC1XQ.g9jqLAO$nQe'g@ISjnFkg\t\Q^EVg:e(XaU-AT2,Xe.%D-<Ta&s0NQ`?A(s=JK8P)-P3A.Yhf?Zh:kd$88sC0N.C*a`d&j9512Z)@+3NhHg4jMD\?!d&J9!,]Fhl'tm#e4_FA*IJphtmgdZ00*cBAkkAW"t0"S],DoGBf@?l]O$%SF!m57pb1\EOOh%L5-W$LT`4lsb7MP^2"d=&;tW5Af\ ShQ<D"3RLP3b- YWWP5K_HCI\ZL\]1%CM^>g`<fi]qA2qPtJ2:"-Kj;Q,`A;LUqAm<cqA-=X4edXjr^/.D)\`[!U0Gra!^=PbU5<hD3nKi"i#%l& o@XN9k2sg/hXsL2j2G!!DnK_Ik -O_E9d?mAM>W$PFDP7c_C1ln8Ui]pp@U*/2)'lc3Q>J$_5Nl%.a.=1<<46LR:WBdA2ETem5-^'p[6G_n;\gN=)N@E0DP>kfn!_46eMq`2Di4(K;+1G,X*!KZ'&(Gs6IdK_c<4n)p'gP.K!hp%g2+'pAHPdX.kq4.H10aO($ns)&JtK:2?^X=L$T!nd`:oHb$,O/$PM1 hGkFY5mi"^ZHKFN<0*\0rkWB[()Fg.1'^8pd#G`sA!X'r,o0[eiF^CM%G0pmYKKHg%gIWJ I1']6cLlFKg5nW:&:ofb+Ejk&naPs3iQBTG7H`.UDg<P^Oji>;n'<38XtqH6-NX!5<c,K@W#GU$3FB3Q+:&taRkl2'?W/Vp,I?*+ SLp(a6f(*?90(eT0<drjk_':;\*i3WI.2T]%SW&Nn!t%?a? ;^@0af/pB^(c6S+8.N[4:U4e^nq/38NgK)76QK:cor?:lh G0YGJ.gZ9FS/()) -4 2Dhaln(^p,H!T7%&iAa]Gk`:i'UO_e/Z=JR6l[WV"I];WTHU[^l74)cOk1@(Ho!2UM3SW-[-]J@>rG*h`n>r@'`.c9&F"eG+A?3G![`;O%*LiB;koOJt3a,\aG9L??iGc?RF,Y9)FH 19=WQT0W_bU -:J/!3Q(R5f! gD9&5HK,$B=<S<JQ0!qV^-%Q.Jk9CYPsml;>d)1i-#,bT9Me9+PEY4*#Lo#bK6(ADp+_8MLO0!R/esc`#W/,ksGpTGJd1k0Zl8dk"LrEPI;`VfF"&]`bl<$9A`NPBAqEV91:IH4-mH-b$?W7oHKX.07\hlj]G`&QBi 4R@+l16$ T&P7KhnF_LKNG;DtSR&ch&;j$&828OT==(BMt,/WGBil%iW*j,HZ=HRO.TSV7m`>kbWTo9P$;l#2;spkK\j>E3?h-ZpNH!:&50TC]\&0-%hiHeZf4j<39n1e_Bl^+JhkBJA? DAatMI%1rZ(YF C_c!2D+_`t+kAFF=9@S)fjBPD8L]Oj6%J<Ul<`mbNC_bk%?4SE89r#t2J-q"6! mK2c='Gt?%*!VcRp90]RW"q`;B2nXCMf:.^(_=aM&VAIZLo@mm(`oC034mbr6/r; /.T#k%+7!H"/+N92`nfL9sUH(q_bYJlpS,6*1A>s-X!L9 =(m'o;L.:DWGC4TsQrCiL,t_RpDC:#]"lAeIjm:fFc?@=, PdaqWbpis'*NoC_KRFlgiUe=&K+4Zf7V-)MqASmc)6hcfja5WlS4oN=*Zbr?d>"5A S`4(OidbFqk",r-O N^g9j$C7Io9^!]KZk"s:m#90bM]D5E)js"Qa9+lk'Xb+ONY=,(-1 4aA6OgC(qdOV^"N] $AhU-9&2Kd;^UM9K`7$6.W3F1Fc\sp]/%]rG-Fk-(jj1qNr,5 5QqnA]@A[Nt/&0U1?eA?A[r6&Bk3,DjS17B@g4o[6Z3lAT>PXK"MG1fl`U27L<C92i`6A@^\%Ri=F>-7IRk-//*\ k+6OJC1M;X<nX(IgkYR^(%3rJA@E7]8K%7.DbQ'P]P*ef/f&L4M!]9F/TA#]D#+h3J<D\2AIA*X?.!Y7A7Xk#B:%4.'[(AS,a FpW>:XY?s9-VjH*AOQ%QGK#o]bm$,`hQN&$0QF_.-(2`$"cbE0jQrH7pWZ99Z#%]*s&i/ctQ?r\9K%gjYMg!ZB48"q/q+n.fW6'*`0D3]gGFQj-B2X`&e&*Md%lgf-2:>lk1AH7h5>6pL3p"PSO)^)!=/?7(S:5%fpb<.]"!frm;o5/.qtpr4G,=Et+<?\Kr/3h@_m>A"-_VS:Q2Ph&A#?p*FVUi9OPj)sYJK/d#Pj6ZU =gXMCK?h$WTY:#JjN=AA]K.dZY,B;AGO56C(c'g]$P-(\/DF*X"/jl96JA+gJn>l;]RdWp6jH#pNdt'1U7^4qgNl7K#O;PA&\;1a/s*,PmG$BZj6L,5+dO^Hq($;\je4;`5`D<"Y0-[o1_6\r.AF.emOM71,=T])=4RlPMa5g6.)mheX*t6)M7cVKQ8LV!BEd$R<. h2GqHFC0Ns+s?JYeXAt"C=H(apc;]J0pAnML& $Ma55qhEp&k=:9n7mPTOA7P8rS_E%an7;M^(:aY,D`U?3YH*^Zh)^na=bkW@JOo&f?E\qNC+Z(ck7%Z/<asX>r8F]c]M/si't?:2I@;o2?FT8O1,iGhM9Lc/^EB5lUKa-Ilj_f'b?KH6U)A&A:YnG.k8;.gc#iYhU/5PKO*2CkC)A=Wm9hAF[I?7rA=J;Mjb'9n-nQ7c?_Q/$L[VOe^_q;%%t)@\A;G&r3E1-Ce6hG(sYt9<qAtqC/q(#h82JgF[K"s)R\>)qtJhs!gjpPa5H=Y^sj`7M:3l')69]7h/>Oo !E]7oAFhAf]O h<=)V+74ipr4'OWir3bB(rp](X@B9(j>O)&UV29,=r`\E\pgD6aLVO@4]p=QrA^d"48KtpS=JAXLU'U)FVN4mQ2helEl]%kf L/aPr4R57HoT+(JOA?Of>:BQ_fEV==N#Y=D-!jeEcm"U,a7HE!AI#Ap=c=p!?jq8rl("H/m*:3iN*+YI"cm?&!;ESGIao< +`8Uj!FAiL+r+lD(oh(\G\U<-mb>Zso$QRskn#&=QLl+)M\&D,mmcP(G.V@Q&\l6]%WSNUaJ1\EV+-Mo,H)f0c<Z$>@?=/-?hU\8n T8 9o4M.!^]X%i5VV7kL[l$UT4?D!aJH!:Lp`MgXJE_2LYVSA0s%P-a0[p596d%s %V!@0t5O#e(+ `E@" X[B  >tQ>ai%n/D=:Oi&@%,Va'npZ so4N$!)k,_h4mpg ClW+6jrf^rt@\7:L0AUVG6j30g+Dmda`I*AcG[r-J^]S5ER:L ([\ 'Oj^$!PT^/K%qWl&^VLV@'Zq`WcbF+tL`M E#RJfj27.I$rC_hh.W==$s>Wo^8U<`3p'\)>6#:Kmt[IS;/FA1c3MOO;15qR\k%b74fT`^j8NQ]kbs%N<I[E)@jiM\6`4]ni6eppR6KQ`c;cl(jM2U-0a3rMH`/qemm[UW%</)WM>.5k2))<Un)f<Je5:;NZc4M^GA;L6YF(a&Y)p[:+*lA/Hh.H1V3R:0.2JiY[`0_LaW^qNYR:.^lJ07ImZKDCZ%AhjEJ9-gH^Rpl YHX8nCPVO.(P^5.'h GS@\W%\I3"JE]FtA,ZFGo-t)`5$m$80ZY]M\#>oGJ]GVe\gY">Cj+cc^:RgbqaGOG7*aPjU=4I1h8%Ym&)O/^UdMO]s_i4aCp&:Z8C2`R2BDAj['ZE(bY 423me^/KaMH04tC(Q>3 V fiIpUX>EKdp_.bCc2AWW\Tk>?KA-Z&jX$iQ/*iZO#HFd1:SrVX#E[ZkG.3hbt@*r44i4&XC/K]9%?Hs3caUV+&fIqI21*h4nnW4tVD&tVK44p];U,Y;LsLM8&>4<jt1FslI$k)OY@ASS-q;l-sckF)4oR<)5Gi64:"F@j4&?L&JE9:JX RL`>l3r.:'YiW>)q2=K^!3h9O29F_m40jW7Bi##fM1^DnJ5oWrN_O:d;&SX=e=Erqo;JHn\o9CD00qVpT4IjMA`<EhoX&(F4Vs0=cZ^: c:jadc6q%'Yb]](p[7*kk&-;3mjtXf?"H@AN6R*N,f+WpTe9g9 n"%[Ki`"k,I/%;cp9 CIq#fsO/A g9#p(AE6_cTlZ8Kb0M9fesQE1#FpN[L=%ef0UHakB3V^Ifr6dta%pGZ0hqK%WpD)B!1IPsAgc&;.8%Hd1JS[Ra(&;A^ls=d7s?:#3ttAqThEs^k=hMSj-qEAR+a,\hUD./-=XH1Yr`!Nam)rih#.=)5j^_Plm%.Er3F#:;n(,f()%62NEf0,IGbmK9hfc2X1M8Y^`"QW4t'&JQGpp?k&,.\&%S_3XaetpOT'>r1')8JY,(p8d5FAb1ERtAsArdH+)1!>c/As.1Af4q,Ads//k1^V%4q1htP*"KAAU$:8Tl#Xp' SY7!a0h)GrV2.g'JmbYRLO)LRA<4b>"`LY([l!QIeWG; gd,hdog "Y`#g+M:b_O0"\.f9E"1'O*o'eML4nD@:ARo5/G>7[rpTP\i:7 tU!rb#sBA4EEN)o)Ni[ ;2/-_A>F%U)GMj c[_s18UoP6k-A-t@"aN/KX9cA'Bp'hAc3Zp'i.O(iH=o4b5JRt6&`C\RE8D<.*KA_2fRf-IR?<aDo#7oeI+ @cbq@6QnaPeG+q@RXQSS,A2]J:ce.[VMq-8H$dJ2?Y<p<t.g]Pd]06^W.C94"7m?6F]<r&b&4$,FTt;FLt?skeb(&`WCM+Bf@04l.ECA_^je2A/Rr)%X.SVaSj0W;AaJK2^?m`Wo3N;[ eW[TSiFn*e%=h:6KULm*>PGfr,$a6gXYkkZKCoEYMCe*HFbQCgYn$GfFr6ELPGH=QF<Wi%7-T8f[._L>7g%p;aj)dPeUmISchF\"]6P'FAH0f7iDjao-n'[qXI"PT4la?DUfc9k?'m.-N\i_:dC6/'UfH8BSZ!rAAd/+58iD,\I;#"kZ-NoUr$*li&@'0d W'RnmA%&+R^lS]WQABo2P3P^#>;>N;G2K-fTeXB%;%8fn;A,Q%%ns?><8o *QJF]rt7:1#[loX+;0\@"RL&[*9+.d^2Oa+2cc<d?\&kY;#g&CC`ik)@6F&mP/(#7>$(O>kMPCp3&$1aAV&(-aL^I$QM6?5UnJ"@]_Vs dBbJl_681=,g7pNAcVD@6MFFR+p=Na^QKhb5" m)1P&9d](6!0QB;@ZNN"[^RIGq?dteA/H<n"\?Y?Rnm7@kqdK/GqnUlq63"3rEJB>&Wg?!NmJj)n9j?;Xrn<t,8N<<IRL5c*c=_AV-Ddd/"L9Wt[:k]q+D6A+tD=Tcb%Yo>)A.G000"[<X2lh.ADb8ZAJp*JG:XqD0q s^T3)jbPK3aX[)r+Dd`Kg;3YfeVEotW^Q3f:]o1 b[h3#t\T0%V8Z'q#lo ^'Btf6b:(A'j_*J][YQkL]tE<U6^<B(tIX":-[>?bIi982-n6rAf`gnb]Li9_pD_00p\O!VJD#RJ:SM7,.OBk$a+/%ehlgR/`-a,3Dl[6U437I(g-dFm?3a,"0WAS`MSdckj;^91A,DS_jlfSnr'NBPn9An^:<>2,tT%e2IF?MH:%T:om3@b-H's/A8hT+",Ho@<0.:MHts;XK](7P9ro#NaFHsR6n#Sgd#56*RM+t95gWAo6;/RK5Z\\kSam[(9@)/;_!H]eXco]q3+hUF Op8eZt4 `OKe+LDOF]qTMDnY9!M@Fo_i2LFLsTlnM@cqcl(t*a/Sd$Vl@8Jj;a*@diN/...8;bboWLH ";eF<?%>tsQ3!S/U;]UA_n@+n2://?H"OZg!t/8BY18p7HCLq,]h/O$M^XNV[]B$!2mKZ&Dp#0dPk1lc\9`"OC!j^ KqIoc#/s\GAfsW18)-XA8fkOT7W;_BJGar%p`pQGl->3%W;3kb`NBh;jlPKtiT?opK"Z)m#(?2jYIt;c_Gi=m)noCh3nM,MURUo'"-A@Q$E!/YanjTG)F'>lV I!6D:6iX]8;[">h:$aQZt[I8J5b5ha'':)oK';Ah",Zk)mj)$IJ]53kP\9%bPcf2q1>J q;4c/b7U 3]5<Ji*PH&"e!2&i%s,PV&:+mM&gT^osaiZ BA6 jIsjqOJs[*'2pJWZ2?!MPBB[No_QWpD*8=41&0ikriorRAi3[<g(o5WNi-'-J>QaID5ki5@SIKL-P7`a h9FWc./jQndQ3gC&mb&>EHe(m;rHk*JW</1(VW&be!-A$] D@rmnq2ViiH:.C:#7WS@?a (r\C?a:A39Ms<sRc'+[1DNX3,!)q>SC `[`6o'U#MGFW+=#%]>gYQda..\Uai?714@2"n`26b5N]AKfOa"@himgc:Cf-?$K3*Na%'2P$R)OmJ48%&[i6c?+#f4@t?<W!IF0Rq^[V&JqSmAMFB9+7\r=F2MSP^l7]/!RYS.INp?4,sPX!Mt<M[B8.A_)Fo8!kW\f8QEh#&3B3iBXH?X'J P%2i(tAr$AVa5@f:4BT!OGqV=..\jL) 3dEfc%lYY%0_ ,NNhQd=l8SUT.clsJnDsMcT5@i`CZAT?atToZ?([ST+/Yj^t)4Pi.o>A!jBGktEl?gF^"=rbPBle;s0WfFpiWKpOO;#<Wfg&UN%]\Wo*aG$D->/5`\Qnghr'\0&QMbrZYB2nrrB"E?n9@F;/:;!H:/a5`YB4"L%+ Vs)6n^F'R\fA(C^n3f:AM"`+!#Pr*!k='5ZS'<lt8a0qpkHDcUiL')5+0S6>Ka-B!S]7\]W5DnaQZLXfeT0+l:PR6l[ZhB<6p2m#$XUl4K=-00dVAat_b371"\eYVPe+kK'?gFA9T;PAG5SFqN%;Jl.&FP&G?KG?`X,hdZQr"3)D#.ZrP&"eFHP4\>,>A9e_M@,51:Oc6,Z*`m?k]Ej@9B/l1e[/NSA#/PDK`+R^Z0:[$_9=1R,$!PhSOY_#c0\<<n:TWg!J;JJ>;"J$tt:>s.0Vn\L.V07Han]b4!Ti"d,M%5)AC(8JohVLR5qJkkhB 1 4h:t()$A0d4>=k`nS$<^*T`,?-#4Lb/A/HC7<l)I)%)T,!lCZ6JF.sX$>Jfnt$Ys;@p6%%ofb1eg1d*[Ob%,S?DP#"[2Vt3,D[A'')R>XQief!P!GT>PaSE4cD&5d2D B=1b-*9`kAinr6pYq7QA']!%JA\b;QE&-B%76m*3T 1d[lW@nf(o(264V s];P]5A=;Ad#a2BsM1>oF^A+<U%UtGDV%G20PqPATl36MkMK'50G%:d\> 0fTQg]\ofTp`<&_e[A>YYFAo7b%#?\QUVS]l^AQ7@a2>WDBm&T)[CkA)H>h7q(m4)-Ilf6Ah%*rZ&Od!"no"AFLQFg\]ODV+j+_/Y#%c7fLZHD*USnfmKn=W+J%Y><F(Td3NK_(/F=b%;?)Q.mW<c0a_FgK;Qj'Ihd&]RX*?7o(o]5aWM//0r2*ciA`lRBLh2N8!W$#Ha0[T[[U Tpo4oe>\$H]]a&eIRgLgm36@KthJp)6^6r:AcJjicCR+dJZ)8AmNFH^MkSoG9G72Wq"oRn8bM+4Sj0Drg&)b<9s>+SJ:H!+B:.R$rCl%@r*8VGd7lmX[/Bp(\nhtU9No7OG%Z.n%!t40&=RA);(h&2re#QVjaI">Ie+&DpW"%44A@l$_?P*S.q#Kp;-?\1<n0S,C+^h;RfAG;@KdcrCfFcYOl3+DW!+89kD$>`((G9c\?G(O%^A6I1(%j"6_;(h-oS05i*]PVsl!B1bGr+,#F9B"Trdq2\#rXiV>&q8BTf8)ZPl`P[0dd&chG=?GiW-LC80L`6GP8/sf:S\/7CJ_,`sU!VeToliSQ4)oN,E2CjEt96D'A'$Jf_GHlPW05caah,O.?AjL3EcW0 U7,qU-E!pKce(2/0`8"77bgBQ*"q2Gh.+[@.E"tn=X0[h%CU1g?Bk&,>2d7nS5qtU!%?b:3<7O[!Ym;Ws&F.bq$$8V/-@D38X*<\46*T VX7rg$cCG?#W9 =CEi(:AYk38OAHBbo@6gPSM\)FF29a!l6U9A9iFq..l=45o23O-Y"ql1KW?!;O=7TFBdokoT5s(XkbBoRGs7pZ[s]k;;'`i*2olK%K6=]eapN&*G/A30`..,U+_n+;Qai_1\t[4%iXHT''ABsqHAWO4c7(!sY7@DAt>rS^`LJKmC)62:KCh!L>1>M0Zplodd..Kd9 U^c=h)_c0->jP=W+9Ip=c3q]Vo93^ZMQOG:!#ikFbN[dV0*+sK]A"q^V?e2?<C.b+"dUB^3kmf]Fj-3Gk175U^\&A+_aD0G.98eOVRW_e(* k5StMMQiqkA`,tI+mH,3s0>YbA;()aF5od-c;Uj:(.d/$C=\o-lHB)4+0KiAr`>Y%A$A47US3H9Ml!/i1I=&!Gsn4T6qY/N1Qio+m$p9'F:VhR q&B>XDo6'$>e6pdh*WYB7[5`k^"HtY)0?V]2_>BWto!K599ds]mUt,1KOt=3B!lcQ'E,>I"A?O`@9qMec$5$U-5\3pE:TQ7F9+]!"p`K=ek@>,dC#O/$8BpkYbVL\*6Wi8f2o,;VLS<qhbHe%,<,?sMq\%rp`UG, 86V2T)Pkl/(`)=@D&ctDt;TE"eeb+,MQ9nHV\0(KX2_gL1ADp!=#n]`fq)+s'ACJ8d%t57UXV>@RHr(]NkRQnk?8<fj'p(8F`5\d`tN>IL0oKLj'*N\Vrq:AN)aZ!"i;e(D(`*F:_.$f',ZEg]NdGT*S.:A6PkGf0?/@dmLP^A) Jh3rjQ0-kZ@OA^T41[DLG)O0Jtg'A#G+c2^X>l8g]1A\lB*)c&bnG8imS[ocBS.AXL%K^ .*L2`,CUAE.'DkbK4LZ_@)4MZS:Wlk('RHFUO&WsM;tK5fL5dm<-G_tmh><M48CSG#aDd =eP*OC;Sr6-64e74'>aA/"Rn>e-@cKaUr#`D"A%Bp2)>#0 Y-=s-jr:ita j_jD`jN+i(GL@)Cm%o@Afc`c$66HAt[k5gOGJt-<Xl`Q@-J0e@ZMq082A]Vcb+#AhUAG#tSG!8YNLYDPh-ge(BJALNLL]jrT)LcHXf[r^=ZXL:dp'AUr%l"11^rhds/@+M82o&oa@MdCgB$eIX)2`CdR9V^k(L]]C&p:<.^;]XqeVQ&@ F[DAGd3!)q^g1*:2[\A8M@h)][7L"(X;r([rA Hf8rN&*A5=$oTo5s#;GCZX
 
--- a/j2se/test/sun/security/provider/PolicyFile/getinstance/getinstance.sh	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/sun/security/provider/PolicyFile/getinstance/getinstance.sh	Fri May 25 00:49:14 2007 +0000
@@ -23,7 +23,7 @@
 # have any questions.
 #
 
-# @test 1.9 07/05/06
+# @test 1.9 07/05/24
 # @author  Ram Marti
 # @bug 4350951 
 # @summary 4350951 assumes permission constructor with 2 string params 
--- a/j2se/test/sun/security/x509/AVA/AVAEqualsHashCode.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/sun/security/x509/AVA/AVAEqualsHashCode.java	Fri May 25 00:49:14 2007 +0000
@@ -22,7 +22,7 @@
  */
 
 /*
- * @test 1.5 07/05/06
+ * @test 1.5 07/05/24
  * @author Gary Ellison
  * @bug 4170635
  * @summary Verify equals()/hashCode() contract honored
--- a/j2se/test/sun/security/x509/AVA/EmptyValue.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/sun/security/x509/AVA/EmptyValue.java	Fri May 25 00:49:14 2007 +0000
@@ -22,7 +22,7 @@
  */
 
 /*
- * @test 1.4 07/05/06
+ * @test 1.4 07/05/24
  * @bug 4721433
  * @summary AVA throws StringIndexOutOfBoundsException for empty values
  */
--- a/j2se/test/sun/security/x509/X500Name/AllAttribs.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/sun/security/x509/X500Name/AllAttribs.java	Fri May 25 00:49:14 2007 +0000
@@ -21,7 +21,7 @@
  * have any questions.
  */
 
-/* @test 1.5 07/05/06
+/* @test 1.5 07/05/24
  * @bug 4244051
  * @summary Make sure all PKIX-required X.520 name attribs are supported
  */
--- a/j2se/test/sun/security/x509/X500Name/DerValueConstructor.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/sun/security/x509/X500Name/DerValueConstructor.java	Fri May 25 00:49:14 2007 +0000
@@ -21,7 +21,7 @@
  * have any questions.
  */
 
-/* @test 1.4 07/05/06
+/* @test 1.4 07/05/24
  * @bug 4228833
  * @summary Make sure constructor that takes DerValue argument works
  */
--- a/j2se/test/sun/security/x509/X500Name/NullX500Name.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/sun/security/x509/X500Name/NullX500Name.java	Fri May 25 00:49:14 2007 +0000
@@ -21,7 +21,7 @@
  * have any questions.
  */
 
-/* @test 1.4 07/05/06
+/* @test 1.4 07/05/24
  * @bug 4118818
  * @summary allow null X.500 Names
  */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/tools/javac/6547131/T.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,23 @@
+/**
+ * @test    @(#)T.java	1.2 07/05/02
+ * @bug     6547131
+ * @summary java.lang.ClassFormatError when using old collection API
+ * @compile T.java
+ * @run main T
+ */
+
+import p.*;
+
+class SubI implements Outer.I {
+    SubI() { }
+    Outer.I getI() { return this; }
+}
+
+public class T {
+    public static void main(String argv[]){     
+        SubI sub = new SubI(); 
+        Outer.I inter = (Outer.I)sub.getI();
+    }
+}
+
+   
Binary file j2se/test/tools/javac/6547131/p/Outer$I.class has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/tools/javac/6547131/p/Outer$I.jasm	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,10 @@
+package  p;
+
+public interface  Outer$I
+	version 50:0
+{
+
+
+public static interface InnerClass I=class Outer$I of class Outer;
+
+} // end Class Outer$I
Binary file j2se/test/tools/javac/6547131/p/Outer.class has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/tools/javac/6547131/p/Outer.jasm	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,18 @@
+package  p;
+
+super public class Outer
+	version 50:0
+{
+
+
+public Method "<init>":"()V"
+	stack 1 locals 1
+{
+		aload_0;
+		invokespecial	Method java/lang/Object."<init>":"()V";
+		return;
+}
+
+public static interface InnerClass I=class Outer$I of class Outer;
+
+} // end Class Outer
--- a/j2se/test/vm/verifier/VerifyProtectedConstructor.java	Tue May 08 19:38:19 2007 +0000
+++ b/j2se/test/vm/verifier/VerifyProtectedConstructor.java	Fri May 25 00:49:14 2007 +0000
@@ -23,13 +23,13 @@
 
 
 /**
- * @test @(#)VerifyProtectedConstructor.java	1.5 07/05/05 17:39:07
- * @bug 5060487,6490436
+ * @test @(#)VerifyProtectedConstructor.java	1.6 07/05/10 21:21:49
+ * @bug 6490436
  * @summary Verify that protected constructor calls are not allowed for classfile version >= 50 (but that they are allowed for lesser versions).
  * @author Keith McGuigan
  */
 
-class VerifyProtectedConstructor extends ClassLoader {
+public class VerifyProtectedConstructor extends ClassLoader {
   public static void main(String argv[]) throws Exception {
     VerifyProtectedConstructor t = new VerifyProtectedConstructor();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/j2se/test/vm/verifier/VerifyStackForExceptionHandlers.java	Fri May 25 00:49:14 2007 +0000
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ * 
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ * 
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ * 
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+
+/**
+ * @test @(#)VerifyStackForExceptionHandlers.java	1.3 07/05/10 21:13:13
+ * @bug 6547378
+ * @summary Verify that methods with max_stack==0 don't have exception handlers
+ * @author Keith McGuigan
+ */
+
+public class VerifyStackForExceptionHandlers extends ClassLoader {
+    public static void main(String argv[]) throws Exception {
+        VerifyStackForExceptionHandlers t = 
+            new VerifyStackForExceptionHandlers();
+
+        try { 
+            t.loadGoodClass();
+        } catch(VerifyError e) {
+            throw new Exception("FAIL: should be no VerifyError for class A");
+        }
+
+        try {
+            t.loadBadClass();
+            throw new Exception("FAIL: should be a VerifyError for class B");
+        } catch(VerifyError e) {
+            System.out.println("PASS");
+        }
+    }
+
+    private void loadGoodClass() {
+        /* -- code for class A -- 
+           public class A {
+               public static void f() {}
+           }
+        */
+        long[] cls_data = {
+            0xcafebabe00000031L, 0x000e0a0003000b07L,
+            0x000c07000d010006L, 0x3c696e69743e0100L,
+            0x0328295601000443L, 0x6f646501000f4c69L,
+            0x6e654e756d626572L, 0x5461626c65010001L,
+            0x6601000a536f7572L, 0x636546696c650100L,
+            0x06412e6a6176610cL, 0x0004000501000141L,
+            0x0100106a6176612fL, 0x6c616e672f4f626aL,
+            0x6563740021000200L, 0x0300000000000200L,
+            0x0100040005000100L, 0x060000001d000100L,
+            0x01000000052ab700L, 0x01b1000000010007L,
+            0x0000000600010000L, 0x0001000900080005L,
+            0x0001000600000019L, 0x0000000000000001L,
+            0xb100000001000700L, 0x0000060001000000L,
+            0x0200010009000000L, 0x02000a0000000000L
+        };
+        final int EXTRA = 5;
+
+        byte cf_bytes[] = toByteArray(cls_data);
+        Class c = defineClass("A", cf_bytes, 0, cf_bytes.length - EXTRA);
+
+        try { c.newInstance(); } // to force linking, thus verification
+        catch(InstantiationException e) {}
+        catch(IllegalAccessException e) {}
+    }
+
+    private void loadBadClass() throws VerifyError {
+        /* -- code for class B --
+           public class B {
+               public static void g() {}
+               public static void f() { 
+                  // bytecode modified to have a max_stack value of 0
+                  try { g(); }
+                  catch (NullPointerException e) {}
+               }
+        }
+        */
+        long[] cls_data = {
+            0xcafebabe00000031L, 0x00120a000400060aL,
+            0x000d00030c000f00L, 0x0a0700050100106aL,
+            0x6176612f6c616e67L, 0x2f4f626a6563740cL,
+            0x0011000a01000a53L, 0x6f7572636546696cL,
+            0x6507000901001e6aL, 0x6176612f6c616e67L,
+            0x2f4e756c6c506f69L, 0x6e74657245786365L,
+            0x7074696f6e010003L, 0x282956010006422eL,
+            0x6a61736d01000443L, 0x6f646507000e0100L,
+            0x0142010001670100L, 0x01660100063c696eL,
+            0x69743e0021000d00L, 0x0400000000000300L,
+            0x010011000a000100L, 0x0c00000011000100L,
+            0x01000000052ab700L, 0x01b1000000000009L,
+            0x000f000a0001000cL, 0x0000000d00000000L,
+            0x00000001b1000000L, 0x0000090010000a00L,
+            0x01000c0000001c00L, 0x00000100000008b8L,
+            0x0002a700044bb100L, 0x0100000003000600L,
+            0x0800000001000700L, 0x000002000b000000L // 3 bytes extra
+
+        };
+        final int EXTRA = 3;
+
+        byte cf_bytes[] = toByteArray(cls_data);
+        Class c = defineClass("B", cf_bytes, 0, cf_bytes.length - EXTRA);
+
+        try { c.newInstance(); } // to force linking, thus verification
+        catch(InstantiationException e) {}
+        catch(IllegalAccessException e) {}
+    }
+
+    static private byte[] toByteArray(long arr[]) {
+        // convert long array to byte array
+        java.nio.ByteBuffer bbuf = java.nio.ByteBuffer.allocate(arr.length * 8);
+        bbuf.asLongBuffer().put(java.nio.LongBuffer.wrap(arr));
+        return bbuf.array();
+    }
+    }